]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
2d8e0a549fd33b70ef162387aefa57a7392a3899
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30 #include "qemu/log.h"
31 #include "qemu/bitops.h"
32
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
36
37 #define ENABLE_ARCH_4T arm_feature(env, ARM_FEATURE_V4T)
38 #define ENABLE_ARCH_5 arm_feature(env, ARM_FEATURE_V5)
39 /* currently all emulated v5 cores are also v5TE, so don't bother */
40 #define ENABLE_ARCH_5TE arm_feature(env, ARM_FEATURE_V5)
41 #define ENABLE_ARCH_5J 0
42 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
43 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
44 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
45 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
46 #define ENABLE_ARCH_8 arm_feature(env, ARM_FEATURE_V8)
47
48 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
49
50 #include "translate.h"
51 static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
52
53 #if defined(CONFIG_USER_ONLY)
54 #define IS_USER(s) 1
55 #else
56 #define IS_USER(s) (s->user)
57 #endif
58
59 /* These instructions trap after executing, so defer them until after the
60 conditional execution state has been updated. */
61 #define DISAS_WFI 4
62 #define DISAS_SWI 5
63
64 TCGv_ptr cpu_env;
65 /* We reuse the same 64-bit temporaries for efficiency. */
66 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
67 static TCGv_i32 cpu_R[16];
68 static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
69 static TCGv_i32 cpu_exclusive_addr;
70 static TCGv_i32 cpu_exclusive_val;
71 static TCGv_i32 cpu_exclusive_high;
72 #ifdef CONFIG_USER_ONLY
73 static TCGv_i32 cpu_exclusive_test;
74 static TCGv_i32 cpu_exclusive_info;
75 #endif
76
77 /* FIXME: These should be removed. */
78 static TCGv_i32 cpu_F0s, cpu_F1s;
79 static TCGv_i64 cpu_F0d, cpu_F1d;
80
81 #include "exec/gen-icount.h"
82
83 static const char *regnames[] =
84 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
85 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
86
87 /* initialize TCG globals. */
88 void arm_translate_init(void)
89 {
90 int i;
91
92 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93
94 for (i = 0; i < 16; i++) {
95 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
96 offsetof(CPUARMState, regs[i]),
97 regnames[i]);
98 }
99 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
100 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
101 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
102 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
103
104 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
105 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
106 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUARMState, exclusive_val), "exclusive_val");
108 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUARMState, exclusive_high), "exclusive_high");
110 #ifdef CONFIG_USER_ONLY
111 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUARMState, exclusive_test), "exclusive_test");
113 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUARMState, exclusive_info), "exclusive_info");
115 #endif
116
117 #define GEN_HELPER 2
118 #include "helper.h"
119 }
120
121 static inline TCGv_i32 load_cpu_offset(int offset)
122 {
123 TCGv_i32 tmp = tcg_temp_new_i32();
124 tcg_gen_ld_i32(tmp, cpu_env, offset);
125 return tmp;
126 }
127
128 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
129
130 static inline void store_cpu_offset(TCGv_i32 var, int offset)
131 {
132 tcg_gen_st_i32(var, cpu_env, offset);
133 tcg_temp_free_i32(var);
134 }
135
136 #define store_cpu_field(var, name) \
137 store_cpu_offset(var, offsetof(CPUARMState, name))
138
139 /* Set a variable to the value of a CPU register. */
140 static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
141 {
142 if (reg == 15) {
143 uint32_t addr;
144 /* normally, since we updated PC, we need only to add one insn */
145 if (s->thumb)
146 addr = (long)s->pc + 2;
147 else
148 addr = (long)s->pc + 4;
149 tcg_gen_movi_i32(var, addr);
150 } else {
151 tcg_gen_mov_i32(var, cpu_R[reg]);
152 }
153 }
154
155 /* Create a new temporary and set it to the value of a CPU register. */
156 static inline TCGv_i32 load_reg(DisasContext *s, int reg)
157 {
158 TCGv_i32 tmp = tcg_temp_new_i32();
159 load_reg_var(s, tmp, reg);
160 return tmp;
161 }
162
163 /* Set a CPU register. The source must be a temporary and will be
164 marked as dead. */
165 static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
166 {
167 if (reg == 15) {
168 tcg_gen_andi_i32(var, var, ~1);
169 s->is_jmp = DISAS_JUMP;
170 }
171 tcg_gen_mov_i32(cpu_R[reg], var);
172 tcg_temp_free_i32(var);
173 }
174
175 /* Value extensions. */
176 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
177 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
178 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
179 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
180
181 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
182 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
183
184
185 static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
186 {
187 TCGv_i32 tmp_mask = tcg_const_i32(mask);
188 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
189 tcg_temp_free_i32(tmp_mask);
190 }
191 /* Set NZCV flags from the high 4 bits of var. */
192 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
193
194 static void gen_exception(int excp)
195 {
196 TCGv_i32 tmp = tcg_temp_new_i32();
197 tcg_gen_movi_i32(tmp, excp);
198 gen_helper_exception(cpu_env, tmp);
199 tcg_temp_free_i32(tmp);
200 }
201
202 static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
203 {
204 TCGv_i32 tmp1 = tcg_temp_new_i32();
205 TCGv_i32 tmp2 = tcg_temp_new_i32();
206 tcg_gen_ext16s_i32(tmp1, a);
207 tcg_gen_ext16s_i32(tmp2, b);
208 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
209 tcg_temp_free_i32(tmp2);
210 tcg_gen_sari_i32(a, a, 16);
211 tcg_gen_sari_i32(b, b, 16);
212 tcg_gen_mul_i32(b, b, a);
213 tcg_gen_mov_i32(a, tmp1);
214 tcg_temp_free_i32(tmp1);
215 }
216
217 /* Byteswap each halfword. */
218 static void gen_rev16(TCGv_i32 var)
219 {
220 TCGv_i32 tmp = tcg_temp_new_i32();
221 tcg_gen_shri_i32(tmp, var, 8);
222 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
223 tcg_gen_shli_i32(var, var, 8);
224 tcg_gen_andi_i32(var, var, 0xff00ff00);
225 tcg_gen_or_i32(var, var, tmp);
226 tcg_temp_free_i32(tmp);
227 }
228
229 /* Byteswap low halfword and sign extend. */
230 static void gen_revsh(TCGv_i32 var)
231 {
232 tcg_gen_ext16u_i32(var, var);
233 tcg_gen_bswap16_i32(var, var);
234 tcg_gen_ext16s_i32(var, var);
235 }
236
237 /* Unsigned bitfield extract. */
238 static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
239 {
240 if (shift)
241 tcg_gen_shri_i32(var, var, shift);
242 tcg_gen_andi_i32(var, var, mask);
243 }
244
245 /* Signed bitfield extract. */
246 static void gen_sbfx(TCGv_i32 var, int shift, int width)
247 {
248 uint32_t signbit;
249
250 if (shift)
251 tcg_gen_sari_i32(var, var, shift);
252 if (shift + width < 32) {
253 signbit = 1u << (width - 1);
254 tcg_gen_andi_i32(var, var, (1u << width) - 1);
255 tcg_gen_xori_i32(var, var, signbit);
256 tcg_gen_subi_i32(var, var, signbit);
257 }
258 }
259
260 /* Return (b << 32) + a. Mark inputs as dead */
261 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
262 {
263 TCGv_i64 tmp64 = tcg_temp_new_i64();
264
265 tcg_gen_extu_i32_i64(tmp64, b);
266 tcg_temp_free_i32(b);
267 tcg_gen_shli_i64(tmp64, tmp64, 32);
268 tcg_gen_add_i64(a, tmp64, a);
269
270 tcg_temp_free_i64(tmp64);
271 return a;
272 }
273
274 /* Return (b << 32) - a. Mark inputs as dead. */
275 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
276 {
277 TCGv_i64 tmp64 = tcg_temp_new_i64();
278
279 tcg_gen_extu_i32_i64(tmp64, b);
280 tcg_temp_free_i32(b);
281 tcg_gen_shli_i64(tmp64, tmp64, 32);
282 tcg_gen_sub_i64(a, tmp64, a);
283
284 tcg_temp_free_i64(tmp64);
285 return a;
286 }
287
288 /* 32x32->64 multiply. Marks inputs as dead. */
289 static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
290 {
291 TCGv_i32 lo = tcg_temp_new_i32();
292 TCGv_i32 hi = tcg_temp_new_i32();
293 TCGv_i64 ret;
294
295 tcg_gen_mulu2_i32(lo, hi, a, b);
296 tcg_temp_free_i32(a);
297 tcg_temp_free_i32(b);
298
299 ret = tcg_temp_new_i64();
300 tcg_gen_concat_i32_i64(ret, lo, hi);
301 tcg_temp_free_i32(lo);
302 tcg_temp_free_i32(hi);
303
304 return ret;
305 }
306
307 static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
308 {
309 TCGv_i32 lo = tcg_temp_new_i32();
310 TCGv_i32 hi = tcg_temp_new_i32();
311 TCGv_i64 ret;
312
313 tcg_gen_muls2_i32(lo, hi, a, b);
314 tcg_temp_free_i32(a);
315 tcg_temp_free_i32(b);
316
317 ret = tcg_temp_new_i64();
318 tcg_gen_concat_i32_i64(ret, lo, hi);
319 tcg_temp_free_i32(lo);
320 tcg_temp_free_i32(hi);
321
322 return ret;
323 }
324
325 /* Swap low and high halfwords. */
326 static void gen_swap_half(TCGv_i32 var)
327 {
328 TCGv_i32 tmp = tcg_temp_new_i32();
329 tcg_gen_shri_i32(tmp, var, 16);
330 tcg_gen_shli_i32(var, var, 16);
331 tcg_gen_or_i32(var, var, tmp);
332 tcg_temp_free_i32(tmp);
333 }
334
335 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
336 tmp = (t0 ^ t1) & 0x8000;
337 t0 &= ~0x8000;
338 t1 &= ~0x8000;
339 t0 = (t0 + t1) ^ tmp;
340 */
341
342 static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
343 {
344 TCGv_i32 tmp = tcg_temp_new_i32();
345 tcg_gen_xor_i32(tmp, t0, t1);
346 tcg_gen_andi_i32(tmp, tmp, 0x8000);
347 tcg_gen_andi_i32(t0, t0, ~0x8000);
348 tcg_gen_andi_i32(t1, t1, ~0x8000);
349 tcg_gen_add_i32(t0, t0, t1);
350 tcg_gen_xor_i32(t0, t0, tmp);
351 tcg_temp_free_i32(tmp);
352 tcg_temp_free_i32(t1);
353 }
354
355 /* Set CF to the top bit of var. */
356 static void gen_set_CF_bit31(TCGv_i32 var)
357 {
358 tcg_gen_shri_i32(cpu_CF, var, 31);
359 }
360
361 /* Set N and Z flags from var. */
362 static inline void gen_logic_CC(TCGv_i32 var)
363 {
364 tcg_gen_mov_i32(cpu_NF, var);
365 tcg_gen_mov_i32(cpu_ZF, var);
366 }
367
368 /* T0 += T1 + CF. */
369 static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
370 {
371 tcg_gen_add_i32(t0, t0, t1);
372 tcg_gen_add_i32(t0, t0, cpu_CF);
373 }
374
375 /* dest = T0 + T1 + CF. */
376 static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
377 {
378 tcg_gen_add_i32(dest, t0, t1);
379 tcg_gen_add_i32(dest, dest, cpu_CF);
380 }
381
382 /* dest = T0 - T1 + CF - 1. */
383 static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
384 {
385 tcg_gen_sub_i32(dest, t0, t1);
386 tcg_gen_add_i32(dest, dest, cpu_CF);
387 tcg_gen_subi_i32(dest, dest, 1);
388 }
389
390 /* dest = T0 + T1. Compute C, N, V and Z flags */
391 static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
392 {
393 TCGv_i32 tmp = tcg_temp_new_i32();
394 tcg_gen_movi_i32(tmp, 0);
395 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
396 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
397 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
398 tcg_gen_xor_i32(tmp, t0, t1);
399 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
400 tcg_temp_free_i32(tmp);
401 tcg_gen_mov_i32(dest, cpu_NF);
402 }
403
404 /* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
405 static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
406 {
407 TCGv_i32 tmp = tcg_temp_new_i32();
408 if (TCG_TARGET_HAS_add2_i32) {
409 tcg_gen_movi_i32(tmp, 0);
410 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
411 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
412 } else {
413 TCGv_i64 q0 = tcg_temp_new_i64();
414 TCGv_i64 q1 = tcg_temp_new_i64();
415 tcg_gen_extu_i32_i64(q0, t0);
416 tcg_gen_extu_i32_i64(q1, t1);
417 tcg_gen_add_i64(q0, q0, q1);
418 tcg_gen_extu_i32_i64(q1, cpu_CF);
419 tcg_gen_add_i64(q0, q0, q1);
420 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
421 tcg_temp_free_i64(q0);
422 tcg_temp_free_i64(q1);
423 }
424 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
425 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
426 tcg_gen_xor_i32(tmp, t0, t1);
427 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
428 tcg_temp_free_i32(tmp);
429 tcg_gen_mov_i32(dest, cpu_NF);
430 }
431
432 /* dest = T0 - T1. Compute C, N, V and Z flags */
433 static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
434 {
435 TCGv_i32 tmp;
436 tcg_gen_sub_i32(cpu_NF, t0, t1);
437 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
438 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
439 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
440 tmp = tcg_temp_new_i32();
441 tcg_gen_xor_i32(tmp, t0, t1);
442 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
443 tcg_temp_free_i32(tmp);
444 tcg_gen_mov_i32(dest, cpu_NF);
445 }
446
447 /* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
448 static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
449 {
450 TCGv_i32 tmp = tcg_temp_new_i32();
451 tcg_gen_not_i32(tmp, t1);
452 gen_adc_CC(dest, t0, tmp);
453 tcg_temp_free_i32(tmp);
454 }
455
456 #define GEN_SHIFT(name) \
457 static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
458 { \
459 TCGv_i32 tmp1, tmp2, tmp3; \
460 tmp1 = tcg_temp_new_i32(); \
461 tcg_gen_andi_i32(tmp1, t1, 0xff); \
462 tmp2 = tcg_const_i32(0); \
463 tmp3 = tcg_const_i32(0x1f); \
464 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
465 tcg_temp_free_i32(tmp3); \
466 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
467 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
468 tcg_temp_free_i32(tmp2); \
469 tcg_temp_free_i32(tmp1); \
470 }
471 GEN_SHIFT(shl)
472 GEN_SHIFT(shr)
473 #undef GEN_SHIFT
474
475 static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
476 {
477 TCGv_i32 tmp1, tmp2;
478 tmp1 = tcg_temp_new_i32();
479 tcg_gen_andi_i32(tmp1, t1, 0xff);
480 tmp2 = tcg_const_i32(0x1f);
481 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
482 tcg_temp_free_i32(tmp2);
483 tcg_gen_sar_i32(dest, t0, tmp1);
484 tcg_temp_free_i32(tmp1);
485 }
486
487 static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
488 {
489 TCGv_i32 c0 = tcg_const_i32(0);
490 TCGv_i32 tmp = tcg_temp_new_i32();
491 tcg_gen_neg_i32(tmp, src);
492 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
493 tcg_temp_free_i32(c0);
494 tcg_temp_free_i32(tmp);
495 }
496
497 static void shifter_out_im(TCGv_i32 var, int shift)
498 {
499 if (shift == 0) {
500 tcg_gen_andi_i32(cpu_CF, var, 1);
501 } else {
502 tcg_gen_shri_i32(cpu_CF, var, shift);
503 if (shift != 31) {
504 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
505 }
506 }
507 }
508
509 /* Shift by immediate. Includes special handling for shift == 0. */
510 static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
511 int shift, int flags)
512 {
513 switch (shiftop) {
514 case 0: /* LSL */
515 if (shift != 0) {
516 if (flags)
517 shifter_out_im(var, 32 - shift);
518 tcg_gen_shli_i32(var, var, shift);
519 }
520 break;
521 case 1: /* LSR */
522 if (shift == 0) {
523 if (flags) {
524 tcg_gen_shri_i32(cpu_CF, var, 31);
525 }
526 tcg_gen_movi_i32(var, 0);
527 } else {
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 tcg_gen_shri_i32(var, var, shift);
531 }
532 break;
533 case 2: /* ASR */
534 if (shift == 0)
535 shift = 32;
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 if (shift == 32)
539 shift = 31;
540 tcg_gen_sari_i32(var, var, shift);
541 break;
542 case 3: /* ROR/RRX */
543 if (shift != 0) {
544 if (flags)
545 shifter_out_im(var, shift - 1);
546 tcg_gen_rotri_i32(var, var, shift); break;
547 } else {
548 TCGv_i32 tmp = tcg_temp_new_i32();
549 tcg_gen_shli_i32(tmp, cpu_CF, 31);
550 if (flags)
551 shifter_out_im(var, 0);
552 tcg_gen_shri_i32(var, var, 1);
553 tcg_gen_or_i32(var, var, tmp);
554 tcg_temp_free_i32(tmp);
555 }
556 }
557 };
558
559 static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
560 TCGv_i32 shift, int flags)
561 {
562 if (flags) {
563 switch (shiftop) {
564 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
565 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
566 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
567 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
568 }
569 } else {
570 switch (shiftop) {
571 case 0:
572 gen_shl(var, var, shift);
573 break;
574 case 1:
575 gen_shr(var, var, shift);
576 break;
577 case 2:
578 gen_sar(var, var, shift);
579 break;
580 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
581 tcg_gen_rotr_i32(var, var, shift); break;
582 }
583 }
584 tcg_temp_free_i32(shift);
585 }
586
587 #define PAS_OP(pfx) \
588 switch (op2) { \
589 case 0: gen_pas_helper(glue(pfx,add16)); break; \
590 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
591 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
592 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
593 case 4: gen_pas_helper(glue(pfx,add8)); break; \
594 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
595 }
596 static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
597 {
598 TCGv_ptr tmp;
599
600 switch (op1) {
601 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
602 case 1:
603 tmp = tcg_temp_new_ptr();
604 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
605 PAS_OP(s)
606 tcg_temp_free_ptr(tmp);
607 break;
608 case 5:
609 tmp = tcg_temp_new_ptr();
610 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
611 PAS_OP(u)
612 tcg_temp_free_ptr(tmp);
613 break;
614 #undef gen_pas_helper
615 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
616 case 2:
617 PAS_OP(q);
618 break;
619 case 3:
620 PAS_OP(sh);
621 break;
622 case 6:
623 PAS_OP(uq);
624 break;
625 case 7:
626 PAS_OP(uh);
627 break;
628 #undef gen_pas_helper
629 }
630 }
631 #undef PAS_OP
632
633 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
634 #define PAS_OP(pfx) \
635 switch (op1) { \
636 case 0: gen_pas_helper(glue(pfx,add8)); break; \
637 case 1: gen_pas_helper(glue(pfx,add16)); break; \
638 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
639 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
640 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
641 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
642 }
643 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
644 {
645 TCGv_ptr tmp;
646
647 switch (op2) {
648 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
649 case 0:
650 tmp = tcg_temp_new_ptr();
651 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
652 PAS_OP(s)
653 tcg_temp_free_ptr(tmp);
654 break;
655 case 4:
656 tmp = tcg_temp_new_ptr();
657 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
658 PAS_OP(u)
659 tcg_temp_free_ptr(tmp);
660 break;
661 #undef gen_pas_helper
662 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
663 case 1:
664 PAS_OP(q);
665 break;
666 case 2:
667 PAS_OP(sh);
668 break;
669 case 5:
670 PAS_OP(uq);
671 break;
672 case 6:
673 PAS_OP(uh);
674 break;
675 #undef gen_pas_helper
676 }
677 }
678 #undef PAS_OP
679
680 static void gen_test_cc(int cc, int label)
681 {
682 TCGv_i32 tmp;
683 int inv;
684
685 switch (cc) {
686 case 0: /* eq: Z */
687 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
688 break;
689 case 1: /* ne: !Z */
690 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
691 break;
692 case 2: /* cs: C */
693 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
694 break;
695 case 3: /* cc: !C */
696 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
697 break;
698 case 4: /* mi: N */
699 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
700 break;
701 case 5: /* pl: !N */
702 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
703 break;
704 case 6: /* vs: V */
705 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
706 break;
707 case 7: /* vc: !V */
708 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
709 break;
710 case 8: /* hi: C && !Z */
711 inv = gen_new_label();
712 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
713 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
714 gen_set_label(inv);
715 break;
716 case 9: /* ls: !C || Z */
717 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
718 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
719 break;
720 case 10: /* ge: N == V -> N ^ V == 0 */
721 tmp = tcg_temp_new_i32();
722 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
723 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
724 tcg_temp_free_i32(tmp);
725 break;
726 case 11: /* lt: N != V -> N ^ V != 0 */
727 tmp = tcg_temp_new_i32();
728 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
729 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
730 tcg_temp_free_i32(tmp);
731 break;
732 case 12: /* gt: !Z && N == V */
733 inv = gen_new_label();
734 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
735 tmp = tcg_temp_new_i32();
736 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
737 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
738 tcg_temp_free_i32(tmp);
739 gen_set_label(inv);
740 break;
741 case 13: /* le: Z || N != V */
742 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
743 tmp = tcg_temp_new_i32();
744 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
745 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
746 tcg_temp_free_i32(tmp);
747 break;
748 default:
749 fprintf(stderr, "Bad condition code 0x%x\n", cc);
750 abort();
751 }
752 }
753
754 static const uint8_t table_logic_cc[16] = {
755 1, /* and */
756 1, /* xor */
757 0, /* sub */
758 0, /* rsb */
759 0, /* add */
760 0, /* adc */
761 0, /* sbc */
762 0, /* rsc */
763 1, /* andl */
764 1, /* xorl */
765 0, /* cmp */
766 0, /* cmn */
767 1, /* orr */
768 1, /* mov */
769 1, /* bic */
770 1, /* mvn */
771 };
772
773 /* Set PC and Thumb state from an immediate address. */
774 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
775 {
776 TCGv_i32 tmp;
777
778 s->is_jmp = DISAS_UPDATE;
779 if (s->thumb != (addr & 1)) {
780 tmp = tcg_temp_new_i32();
781 tcg_gen_movi_i32(tmp, addr & 1);
782 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
783 tcg_temp_free_i32(tmp);
784 }
785 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
786 }
787
788 /* Set PC and Thumb state from var. var is marked as dead. */
789 static inline void gen_bx(DisasContext *s, TCGv_i32 var)
790 {
791 s->is_jmp = DISAS_UPDATE;
792 tcg_gen_andi_i32(cpu_R[15], var, ~1);
793 tcg_gen_andi_i32(var, var, 1);
794 store_cpu_field(var, thumb);
795 }
796
797 /* Variant of store_reg which uses branch&exchange logic when storing
798 to r15 in ARM architecture v7 and above. The source must be a temporary
799 and will be marked as dead. */
800 static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
801 int reg, TCGv_i32 var)
802 {
803 if (reg == 15 && ENABLE_ARCH_7) {
804 gen_bx(s, var);
805 } else {
806 store_reg(s, reg, var);
807 }
808 }
809
810 /* Variant of store_reg which uses branch&exchange logic when storing
811 * to r15 in ARM architecture v5T and above. This is used for storing
812 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
813 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
814 static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
815 int reg, TCGv_i32 var)
816 {
817 if (reg == 15 && ENABLE_ARCH_5) {
818 gen_bx(s, var);
819 } else {
820 store_reg(s, reg, var);
821 }
822 }
823
824 /* Abstractions of "generate code to do a guest load/store for
825 * AArch32", where a vaddr is always 32 bits (and is zero
826 * extended if we're a 64 bit core) and data is also
827 * 32 bits unless specifically doing a 64 bit access.
828 * These functions work like tcg_gen_qemu_{ld,st}* except
829 * that their arguments are TCGv_i32 rather than TCGv.
830 */
831 #if TARGET_LONG_BITS == 32
832
833 #define DO_GEN_LD(OP) \
834 static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
835 { \
836 tcg_gen_qemu_##OP(val, addr, index); \
837 }
838
839 #define DO_GEN_ST(OP) \
840 static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
841 { \
842 tcg_gen_qemu_##OP(val, addr, index); \
843 }
844
845 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
846 {
847 tcg_gen_qemu_ld64(val, addr, index);
848 }
849
850 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
851 {
852 tcg_gen_qemu_st64(val, addr, index);
853 }
854
855 #else
856
857 #define DO_GEN_LD(OP) \
858 static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
859 { \
860 TCGv addr64 = tcg_temp_new(); \
861 TCGv val64 = tcg_temp_new(); \
862 tcg_gen_extu_i32_i64(addr64, addr); \
863 tcg_gen_qemu_##OP(val64, addr64, index); \
864 tcg_temp_free(addr64); \
865 tcg_gen_trunc_i64_i32(val, val64); \
866 tcg_temp_free(val64); \
867 }
868
869 #define DO_GEN_ST(OP) \
870 static inline void gen_aa32_##OP(TCGv_i32 val, TCGv_i32 addr, int index) \
871 { \
872 TCGv addr64 = tcg_temp_new(); \
873 TCGv val64 = tcg_temp_new(); \
874 tcg_gen_extu_i32_i64(addr64, addr); \
875 tcg_gen_extu_i32_i64(val64, val); \
876 tcg_gen_qemu_##OP(val64, addr64, index); \
877 tcg_temp_free(addr64); \
878 tcg_temp_free(val64); \
879 }
880
881 static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
882 {
883 TCGv addr64 = tcg_temp_new();
884 tcg_gen_extu_i32_i64(addr64, addr);
885 tcg_gen_qemu_ld64(val, addr64, index);
886 tcg_temp_free(addr64);
887 }
888
889 static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
890 {
891 TCGv addr64 = tcg_temp_new();
892 tcg_gen_extu_i32_i64(addr64, addr);
893 tcg_gen_qemu_st64(val, addr64, index);
894 tcg_temp_free(addr64);
895 }
896
897 #endif
898
899 DO_GEN_LD(ld8s)
900 DO_GEN_LD(ld8u)
901 DO_GEN_LD(ld16s)
902 DO_GEN_LD(ld16u)
903 DO_GEN_LD(ld32u)
904 DO_GEN_ST(st8)
905 DO_GEN_ST(st16)
906 DO_GEN_ST(st32)
907
908 static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
909 {
910 tcg_gen_movi_i32(cpu_R[15], val);
911 }
912
913 /* Force a TB lookup after an instruction that changes the CPU state. */
914 static inline void gen_lookup_tb(DisasContext *s)
915 {
916 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
917 s->is_jmp = DISAS_UPDATE;
918 }
919
920 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
921 TCGv_i32 var)
922 {
923 int val, rm, shift, shiftop;
924 TCGv_i32 offset;
925
926 if (!(insn & (1 << 25))) {
927 /* immediate */
928 val = insn & 0xfff;
929 if (!(insn & (1 << 23)))
930 val = -val;
931 if (val != 0)
932 tcg_gen_addi_i32(var, var, val);
933 } else {
934 /* shift/register */
935 rm = (insn) & 0xf;
936 shift = (insn >> 7) & 0x1f;
937 shiftop = (insn >> 5) & 3;
938 offset = load_reg(s, rm);
939 gen_arm_shift_im(offset, shiftop, shift, 0);
940 if (!(insn & (1 << 23)))
941 tcg_gen_sub_i32(var, var, offset);
942 else
943 tcg_gen_add_i32(var, var, offset);
944 tcg_temp_free_i32(offset);
945 }
946 }
947
948 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
949 int extra, TCGv_i32 var)
950 {
951 int val, rm;
952 TCGv_i32 offset;
953
954 if (insn & (1 << 22)) {
955 /* immediate */
956 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
957 if (!(insn & (1 << 23)))
958 val = -val;
959 val += extra;
960 if (val != 0)
961 tcg_gen_addi_i32(var, var, val);
962 } else {
963 /* register */
964 if (extra)
965 tcg_gen_addi_i32(var, var, extra);
966 rm = (insn) & 0xf;
967 offset = load_reg(s, rm);
968 if (!(insn & (1 << 23)))
969 tcg_gen_sub_i32(var, var, offset);
970 else
971 tcg_gen_add_i32(var, var, offset);
972 tcg_temp_free_i32(offset);
973 }
974 }
975
976 static TCGv_ptr get_fpstatus_ptr(int neon)
977 {
978 TCGv_ptr statusptr = tcg_temp_new_ptr();
979 int offset;
980 if (neon) {
981 offset = offsetof(CPUARMState, vfp.standard_fp_status);
982 } else {
983 offset = offsetof(CPUARMState, vfp.fp_status);
984 }
985 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
986 return statusptr;
987 }
988
989 #define VFP_OP2(name) \
990 static inline void gen_vfp_##name(int dp) \
991 { \
992 TCGv_ptr fpst = get_fpstatus_ptr(0); \
993 if (dp) { \
994 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
995 } else { \
996 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
997 } \
998 tcg_temp_free_ptr(fpst); \
999 }
1000
1001 VFP_OP2(add)
1002 VFP_OP2(sub)
1003 VFP_OP2(mul)
1004 VFP_OP2(div)
1005
1006 #undef VFP_OP2
1007
1008 static inline void gen_vfp_F1_mul(int dp)
1009 {
1010 /* Like gen_vfp_mul() but put result in F1 */
1011 TCGv_ptr fpst = get_fpstatus_ptr(0);
1012 if (dp) {
1013 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
1014 } else {
1015 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
1016 }
1017 tcg_temp_free_ptr(fpst);
1018 }
1019
1020 static inline void gen_vfp_F1_neg(int dp)
1021 {
1022 /* Like gen_vfp_neg() but put result in F1 */
1023 if (dp) {
1024 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1025 } else {
1026 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1027 }
1028 }
1029
1030 static inline void gen_vfp_abs(int dp)
1031 {
1032 if (dp)
1033 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1034 else
1035 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1036 }
1037
1038 static inline void gen_vfp_neg(int dp)
1039 {
1040 if (dp)
1041 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1042 else
1043 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1044 }
1045
1046 static inline void gen_vfp_sqrt(int dp)
1047 {
1048 if (dp)
1049 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1050 else
1051 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1052 }
1053
1054 static inline void gen_vfp_cmp(int dp)
1055 {
1056 if (dp)
1057 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1058 else
1059 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1060 }
1061
1062 static inline void gen_vfp_cmpe(int dp)
1063 {
1064 if (dp)
1065 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1066 else
1067 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1068 }
1069
1070 static inline void gen_vfp_F1_ld0(int dp)
1071 {
1072 if (dp)
1073 tcg_gen_movi_i64(cpu_F1d, 0);
1074 else
1075 tcg_gen_movi_i32(cpu_F1s, 0);
1076 }
1077
1078 #define VFP_GEN_ITOF(name) \
1079 static inline void gen_vfp_##name(int dp, int neon) \
1080 { \
1081 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1082 if (dp) { \
1083 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1084 } else { \
1085 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1086 } \
1087 tcg_temp_free_ptr(statusptr); \
1088 }
1089
1090 VFP_GEN_ITOF(uito)
1091 VFP_GEN_ITOF(sito)
1092 #undef VFP_GEN_ITOF
1093
1094 #define VFP_GEN_FTOI(name) \
1095 static inline void gen_vfp_##name(int dp, int neon) \
1096 { \
1097 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1098 if (dp) { \
1099 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1100 } else { \
1101 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1102 } \
1103 tcg_temp_free_ptr(statusptr); \
1104 }
1105
1106 VFP_GEN_FTOI(toui)
1107 VFP_GEN_FTOI(touiz)
1108 VFP_GEN_FTOI(tosi)
1109 VFP_GEN_FTOI(tosiz)
1110 #undef VFP_GEN_FTOI
1111
1112 #define VFP_GEN_FIX(name) \
1113 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1114 { \
1115 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
1116 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
1117 if (dp) { \
1118 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
1119 } else { \
1120 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
1121 } \
1122 tcg_temp_free_i32(tmp_shift); \
1123 tcg_temp_free_ptr(statusptr); \
1124 }
1125 VFP_GEN_FIX(tosh)
1126 VFP_GEN_FIX(tosl)
1127 VFP_GEN_FIX(touh)
1128 VFP_GEN_FIX(toul)
1129 VFP_GEN_FIX(shto)
1130 VFP_GEN_FIX(slto)
1131 VFP_GEN_FIX(uhto)
1132 VFP_GEN_FIX(ulto)
1133 #undef VFP_GEN_FIX
1134
1135 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
1136 {
1137 if (dp) {
1138 gen_aa32_ld64(cpu_F0d, addr, IS_USER(s));
1139 } else {
1140 gen_aa32_ld32u(cpu_F0s, addr, IS_USER(s));
1141 }
1142 }
1143
1144 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
1145 {
1146 if (dp) {
1147 gen_aa32_st64(cpu_F0d, addr, IS_USER(s));
1148 } else {
1149 gen_aa32_st32(cpu_F0s, addr, IS_USER(s));
1150 }
1151 }
1152
1153 static inline long
1154 vfp_reg_offset (int dp, int reg)
1155 {
1156 if (dp)
1157 return offsetof(CPUARMState, vfp.regs[reg]);
1158 else if (reg & 1) {
1159 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1160 + offsetof(CPU_DoubleU, l.upper);
1161 } else {
1162 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1163 + offsetof(CPU_DoubleU, l.lower);
1164 }
1165 }
1166
1167 /* Return the offset of a 32-bit piece of a NEON register.
1168 zero is the least significant end of the register. */
1169 static inline long
1170 neon_reg_offset (int reg, int n)
1171 {
1172 int sreg;
1173 sreg = reg * 2 + n;
1174 return vfp_reg_offset(0, sreg);
1175 }
1176
1177 static TCGv_i32 neon_load_reg(int reg, int pass)
1178 {
1179 TCGv_i32 tmp = tcg_temp_new_i32();
1180 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1181 return tmp;
1182 }
1183
1184 static void neon_store_reg(int reg, int pass, TCGv_i32 var)
1185 {
1186 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1187 tcg_temp_free_i32(var);
1188 }
1189
1190 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1191 {
1192 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1193 }
1194
1195 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1196 {
1197 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1198 }
1199
1200 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1201 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1202 #define tcg_gen_st_f32 tcg_gen_st_i32
1203 #define tcg_gen_st_f64 tcg_gen_st_i64
1204
1205 static inline void gen_mov_F0_vreg(int dp, int reg)
1206 {
1207 if (dp)
1208 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1209 else
1210 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1211 }
1212
1213 static inline void gen_mov_F1_vreg(int dp, int reg)
1214 {
1215 if (dp)
1216 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1217 else
1218 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1219 }
1220
1221 static inline void gen_mov_vreg_F0(int dp, int reg)
1222 {
1223 if (dp)
1224 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1225 else
1226 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1227 }
1228
1229 #define ARM_CP_RW_BIT (1 << 20)
1230
1231 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1232 {
1233 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1234 }
1235
1236 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1237 {
1238 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
1239 }
1240
1241 static inline TCGv_i32 iwmmxt_load_creg(int reg)
1242 {
1243 TCGv_i32 var = tcg_temp_new_i32();
1244 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1245 return var;
1246 }
1247
1248 static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
1249 {
1250 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
1251 tcg_temp_free_i32(var);
1252 }
1253
1254 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1255 {
1256 iwmmxt_store_reg(cpu_M0, rn);
1257 }
1258
1259 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1260 {
1261 iwmmxt_load_reg(cpu_M0, rn);
1262 }
1263
1264 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1265 {
1266 iwmmxt_load_reg(cpu_V1, rn);
1267 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1268 }
1269
1270 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1271 {
1272 iwmmxt_load_reg(cpu_V1, rn);
1273 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1274 }
1275
1276 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1277 {
1278 iwmmxt_load_reg(cpu_V1, rn);
1279 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1280 }
1281
1282 #define IWMMXT_OP(name) \
1283 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1284 { \
1285 iwmmxt_load_reg(cpu_V1, rn); \
1286 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1287 }
1288
1289 #define IWMMXT_OP_ENV(name) \
1290 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1291 { \
1292 iwmmxt_load_reg(cpu_V1, rn); \
1293 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1294 }
1295
1296 #define IWMMXT_OP_ENV_SIZE(name) \
1297 IWMMXT_OP_ENV(name##b) \
1298 IWMMXT_OP_ENV(name##w) \
1299 IWMMXT_OP_ENV(name##l)
1300
1301 #define IWMMXT_OP_ENV1(name) \
1302 static inline void gen_op_iwmmxt_##name##_M0(void) \
1303 { \
1304 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1305 }
1306
1307 IWMMXT_OP(maddsq)
1308 IWMMXT_OP(madduq)
1309 IWMMXT_OP(sadb)
1310 IWMMXT_OP(sadw)
1311 IWMMXT_OP(mulslw)
1312 IWMMXT_OP(mulshw)
1313 IWMMXT_OP(mululw)
1314 IWMMXT_OP(muluhw)
1315 IWMMXT_OP(macsw)
1316 IWMMXT_OP(macuw)
1317
1318 IWMMXT_OP_ENV_SIZE(unpackl)
1319 IWMMXT_OP_ENV_SIZE(unpackh)
1320
1321 IWMMXT_OP_ENV1(unpacklub)
1322 IWMMXT_OP_ENV1(unpackluw)
1323 IWMMXT_OP_ENV1(unpacklul)
1324 IWMMXT_OP_ENV1(unpackhub)
1325 IWMMXT_OP_ENV1(unpackhuw)
1326 IWMMXT_OP_ENV1(unpackhul)
1327 IWMMXT_OP_ENV1(unpacklsb)
1328 IWMMXT_OP_ENV1(unpacklsw)
1329 IWMMXT_OP_ENV1(unpacklsl)
1330 IWMMXT_OP_ENV1(unpackhsb)
1331 IWMMXT_OP_ENV1(unpackhsw)
1332 IWMMXT_OP_ENV1(unpackhsl)
1333
1334 IWMMXT_OP_ENV_SIZE(cmpeq)
1335 IWMMXT_OP_ENV_SIZE(cmpgtu)
1336 IWMMXT_OP_ENV_SIZE(cmpgts)
1337
1338 IWMMXT_OP_ENV_SIZE(mins)
1339 IWMMXT_OP_ENV_SIZE(minu)
1340 IWMMXT_OP_ENV_SIZE(maxs)
1341 IWMMXT_OP_ENV_SIZE(maxu)
1342
1343 IWMMXT_OP_ENV_SIZE(subn)
1344 IWMMXT_OP_ENV_SIZE(addn)
1345 IWMMXT_OP_ENV_SIZE(subu)
1346 IWMMXT_OP_ENV_SIZE(addu)
1347 IWMMXT_OP_ENV_SIZE(subs)
1348 IWMMXT_OP_ENV_SIZE(adds)
1349
1350 IWMMXT_OP_ENV(avgb0)
1351 IWMMXT_OP_ENV(avgb1)
1352 IWMMXT_OP_ENV(avgw0)
1353 IWMMXT_OP_ENV(avgw1)
1354
1355 IWMMXT_OP(msadb)
1356
1357 IWMMXT_OP_ENV(packuw)
1358 IWMMXT_OP_ENV(packul)
1359 IWMMXT_OP_ENV(packuq)
1360 IWMMXT_OP_ENV(packsw)
1361 IWMMXT_OP_ENV(packsl)
1362 IWMMXT_OP_ENV(packsq)
1363
1364 static void gen_op_iwmmxt_set_mup(void)
1365 {
1366 TCGv_i32 tmp;
1367 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1368 tcg_gen_ori_i32(tmp, tmp, 2);
1369 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1370 }
1371
1372 static void gen_op_iwmmxt_set_cup(void)
1373 {
1374 TCGv_i32 tmp;
1375 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1376 tcg_gen_ori_i32(tmp, tmp, 1);
1377 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1378 }
1379
1380 static void gen_op_iwmmxt_setpsr_nz(void)
1381 {
1382 TCGv_i32 tmp = tcg_temp_new_i32();
1383 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1384 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1385 }
1386
1387 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1388 {
1389 iwmmxt_load_reg(cpu_V1, rn);
1390 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1391 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1392 }
1393
1394 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1395 TCGv_i32 dest)
1396 {
1397 int rd;
1398 uint32_t offset;
1399 TCGv_i32 tmp;
1400
1401 rd = (insn >> 16) & 0xf;
1402 tmp = load_reg(s, rd);
1403
1404 offset = (insn & 0xff) << ((insn >> 7) & 2);
1405 if (insn & (1 << 24)) {
1406 /* Pre indexed */
1407 if (insn & (1 << 23))
1408 tcg_gen_addi_i32(tmp, tmp, offset);
1409 else
1410 tcg_gen_addi_i32(tmp, tmp, -offset);
1411 tcg_gen_mov_i32(dest, tmp);
1412 if (insn & (1 << 21))
1413 store_reg(s, rd, tmp);
1414 else
1415 tcg_temp_free_i32(tmp);
1416 } else if (insn & (1 << 21)) {
1417 /* Post indexed */
1418 tcg_gen_mov_i32(dest, tmp);
1419 if (insn & (1 << 23))
1420 tcg_gen_addi_i32(tmp, tmp, offset);
1421 else
1422 tcg_gen_addi_i32(tmp, tmp, -offset);
1423 store_reg(s, rd, tmp);
1424 } else if (!(insn & (1 << 23)))
1425 return 1;
1426 return 0;
1427 }
1428
1429 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
1430 {
1431 int rd = (insn >> 0) & 0xf;
1432 TCGv_i32 tmp;
1433
1434 if (insn & (1 << 8)) {
1435 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1436 return 1;
1437 } else {
1438 tmp = iwmmxt_load_creg(rd);
1439 }
1440 } else {
1441 tmp = tcg_temp_new_i32();
1442 iwmmxt_load_reg(cpu_V0, rd);
1443 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1444 }
1445 tcg_gen_andi_i32(tmp, tmp, mask);
1446 tcg_gen_mov_i32(dest, tmp);
1447 tcg_temp_free_i32(tmp);
1448 return 0;
1449 }
1450
1451 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
1452 (ie. an undefined instruction). */
1453 static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
1454 {
1455 int rd, wrd;
1456 int rdhi, rdlo, rd0, rd1, i;
1457 TCGv_i32 addr;
1458 TCGv_i32 tmp, tmp2, tmp3;
1459
1460 if ((insn & 0x0e000e00) == 0x0c000000) {
1461 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1462 wrd = insn & 0xf;
1463 rdlo = (insn >> 12) & 0xf;
1464 rdhi = (insn >> 16) & 0xf;
1465 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1466 iwmmxt_load_reg(cpu_V0, wrd);
1467 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1468 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1469 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1470 } else { /* TMCRR */
1471 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1472 iwmmxt_store_reg(cpu_V0, wrd);
1473 gen_op_iwmmxt_set_mup();
1474 }
1475 return 0;
1476 }
1477
1478 wrd = (insn >> 12) & 0xf;
1479 addr = tcg_temp_new_i32();
1480 if (gen_iwmmxt_address(s, insn, addr)) {
1481 tcg_temp_free_i32(addr);
1482 return 1;
1483 }
1484 if (insn & ARM_CP_RW_BIT) {
1485 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1486 tmp = tcg_temp_new_i32();
1487 gen_aa32_ld32u(tmp, addr, IS_USER(s));
1488 iwmmxt_store_creg(wrd, tmp);
1489 } else {
1490 i = 1;
1491 if (insn & (1 << 8)) {
1492 if (insn & (1 << 22)) { /* WLDRD */
1493 gen_aa32_ld64(cpu_M0, addr, IS_USER(s));
1494 i = 0;
1495 } else { /* WLDRW wRd */
1496 tmp = tcg_temp_new_i32();
1497 gen_aa32_ld32u(tmp, addr, IS_USER(s));
1498 }
1499 } else {
1500 tmp = tcg_temp_new_i32();
1501 if (insn & (1 << 22)) { /* WLDRH */
1502 gen_aa32_ld16u(tmp, addr, IS_USER(s));
1503 } else { /* WLDRB */
1504 gen_aa32_ld8u(tmp, addr, IS_USER(s));
1505 }
1506 }
1507 if (i) {
1508 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1509 tcg_temp_free_i32(tmp);
1510 }
1511 gen_op_iwmmxt_movq_wRn_M0(wrd);
1512 }
1513 } else {
1514 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1515 tmp = iwmmxt_load_creg(wrd);
1516 gen_aa32_st32(tmp, addr, IS_USER(s));
1517 } else {
1518 gen_op_iwmmxt_movq_M0_wRn(wrd);
1519 tmp = tcg_temp_new_i32();
1520 if (insn & (1 << 8)) {
1521 if (insn & (1 << 22)) { /* WSTRD */
1522 gen_aa32_st64(cpu_M0, addr, IS_USER(s));
1523 } else { /* WSTRW wRd */
1524 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1525 gen_aa32_st32(tmp, addr, IS_USER(s));
1526 }
1527 } else {
1528 if (insn & (1 << 22)) { /* WSTRH */
1529 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1530 gen_aa32_st16(tmp, addr, IS_USER(s));
1531 } else { /* WSTRB */
1532 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1533 gen_aa32_st8(tmp, addr, IS_USER(s));
1534 }
1535 }
1536 }
1537 tcg_temp_free_i32(tmp);
1538 }
1539 tcg_temp_free_i32(addr);
1540 return 0;
1541 }
1542
1543 if ((insn & 0x0f000000) != 0x0e000000)
1544 return 1;
1545
1546 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1547 case 0x000: /* WOR */
1548 wrd = (insn >> 12) & 0xf;
1549 rd0 = (insn >> 0) & 0xf;
1550 rd1 = (insn >> 16) & 0xf;
1551 gen_op_iwmmxt_movq_M0_wRn(rd0);
1552 gen_op_iwmmxt_orq_M0_wRn(rd1);
1553 gen_op_iwmmxt_setpsr_nz();
1554 gen_op_iwmmxt_movq_wRn_M0(wrd);
1555 gen_op_iwmmxt_set_mup();
1556 gen_op_iwmmxt_set_cup();
1557 break;
1558 case 0x011: /* TMCR */
1559 if (insn & 0xf)
1560 return 1;
1561 rd = (insn >> 12) & 0xf;
1562 wrd = (insn >> 16) & 0xf;
1563 switch (wrd) {
1564 case ARM_IWMMXT_wCID:
1565 case ARM_IWMMXT_wCASF:
1566 break;
1567 case ARM_IWMMXT_wCon:
1568 gen_op_iwmmxt_set_cup();
1569 /* Fall through. */
1570 case ARM_IWMMXT_wCSSF:
1571 tmp = iwmmxt_load_creg(wrd);
1572 tmp2 = load_reg(s, rd);
1573 tcg_gen_andc_i32(tmp, tmp, tmp2);
1574 tcg_temp_free_i32(tmp2);
1575 iwmmxt_store_creg(wrd, tmp);
1576 break;
1577 case ARM_IWMMXT_wCGR0:
1578 case ARM_IWMMXT_wCGR1:
1579 case ARM_IWMMXT_wCGR2:
1580 case ARM_IWMMXT_wCGR3:
1581 gen_op_iwmmxt_set_cup();
1582 tmp = load_reg(s, rd);
1583 iwmmxt_store_creg(wrd, tmp);
1584 break;
1585 default:
1586 return 1;
1587 }
1588 break;
1589 case 0x100: /* WXOR */
1590 wrd = (insn >> 12) & 0xf;
1591 rd0 = (insn >> 0) & 0xf;
1592 rd1 = (insn >> 16) & 0xf;
1593 gen_op_iwmmxt_movq_M0_wRn(rd0);
1594 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1595 gen_op_iwmmxt_setpsr_nz();
1596 gen_op_iwmmxt_movq_wRn_M0(wrd);
1597 gen_op_iwmmxt_set_mup();
1598 gen_op_iwmmxt_set_cup();
1599 break;
1600 case 0x111: /* TMRC */
1601 if (insn & 0xf)
1602 return 1;
1603 rd = (insn >> 12) & 0xf;
1604 wrd = (insn >> 16) & 0xf;
1605 tmp = iwmmxt_load_creg(wrd);
1606 store_reg(s, rd, tmp);
1607 break;
1608 case 0x300: /* WANDN */
1609 wrd = (insn >> 12) & 0xf;
1610 rd0 = (insn >> 0) & 0xf;
1611 rd1 = (insn >> 16) & 0xf;
1612 gen_op_iwmmxt_movq_M0_wRn(rd0);
1613 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1614 gen_op_iwmmxt_andq_M0_wRn(rd1);
1615 gen_op_iwmmxt_setpsr_nz();
1616 gen_op_iwmmxt_movq_wRn_M0(wrd);
1617 gen_op_iwmmxt_set_mup();
1618 gen_op_iwmmxt_set_cup();
1619 break;
1620 case 0x200: /* WAND */
1621 wrd = (insn >> 12) & 0xf;
1622 rd0 = (insn >> 0) & 0xf;
1623 rd1 = (insn >> 16) & 0xf;
1624 gen_op_iwmmxt_movq_M0_wRn(rd0);
1625 gen_op_iwmmxt_andq_M0_wRn(rd1);
1626 gen_op_iwmmxt_setpsr_nz();
1627 gen_op_iwmmxt_movq_wRn_M0(wrd);
1628 gen_op_iwmmxt_set_mup();
1629 gen_op_iwmmxt_set_cup();
1630 break;
1631 case 0x810: case 0xa10: /* WMADD */
1632 wrd = (insn >> 12) & 0xf;
1633 rd0 = (insn >> 0) & 0xf;
1634 rd1 = (insn >> 16) & 0xf;
1635 gen_op_iwmmxt_movq_M0_wRn(rd0);
1636 if (insn & (1 << 21))
1637 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1638 else
1639 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1640 gen_op_iwmmxt_movq_wRn_M0(wrd);
1641 gen_op_iwmmxt_set_mup();
1642 break;
1643 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1644 wrd = (insn >> 12) & 0xf;
1645 rd0 = (insn >> 16) & 0xf;
1646 rd1 = (insn >> 0) & 0xf;
1647 gen_op_iwmmxt_movq_M0_wRn(rd0);
1648 switch ((insn >> 22) & 3) {
1649 case 0:
1650 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1651 break;
1652 case 1:
1653 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1654 break;
1655 case 2:
1656 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1657 break;
1658 case 3:
1659 return 1;
1660 }
1661 gen_op_iwmmxt_movq_wRn_M0(wrd);
1662 gen_op_iwmmxt_set_mup();
1663 gen_op_iwmmxt_set_cup();
1664 break;
1665 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1666 wrd = (insn >> 12) & 0xf;
1667 rd0 = (insn >> 16) & 0xf;
1668 rd1 = (insn >> 0) & 0xf;
1669 gen_op_iwmmxt_movq_M0_wRn(rd0);
1670 switch ((insn >> 22) & 3) {
1671 case 0:
1672 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1673 break;
1674 case 1:
1675 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1676 break;
1677 case 2:
1678 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1679 break;
1680 case 3:
1681 return 1;
1682 }
1683 gen_op_iwmmxt_movq_wRn_M0(wrd);
1684 gen_op_iwmmxt_set_mup();
1685 gen_op_iwmmxt_set_cup();
1686 break;
1687 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1688 wrd = (insn >> 12) & 0xf;
1689 rd0 = (insn >> 16) & 0xf;
1690 rd1 = (insn >> 0) & 0xf;
1691 gen_op_iwmmxt_movq_M0_wRn(rd0);
1692 if (insn & (1 << 22))
1693 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1694 else
1695 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1696 if (!(insn & (1 << 20)))
1697 gen_op_iwmmxt_addl_M0_wRn(wrd);
1698 gen_op_iwmmxt_movq_wRn_M0(wrd);
1699 gen_op_iwmmxt_set_mup();
1700 break;
1701 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1702 wrd = (insn >> 12) & 0xf;
1703 rd0 = (insn >> 16) & 0xf;
1704 rd1 = (insn >> 0) & 0xf;
1705 gen_op_iwmmxt_movq_M0_wRn(rd0);
1706 if (insn & (1 << 21)) {
1707 if (insn & (1 << 20))
1708 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1709 else
1710 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1711 } else {
1712 if (insn & (1 << 20))
1713 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1714 else
1715 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1716 }
1717 gen_op_iwmmxt_movq_wRn_M0(wrd);
1718 gen_op_iwmmxt_set_mup();
1719 break;
1720 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1721 wrd = (insn >> 12) & 0xf;
1722 rd0 = (insn >> 16) & 0xf;
1723 rd1 = (insn >> 0) & 0xf;
1724 gen_op_iwmmxt_movq_M0_wRn(rd0);
1725 if (insn & (1 << 21))
1726 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1727 else
1728 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1729 if (!(insn & (1 << 20))) {
1730 iwmmxt_load_reg(cpu_V1, wrd);
1731 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1732 }
1733 gen_op_iwmmxt_movq_wRn_M0(wrd);
1734 gen_op_iwmmxt_set_mup();
1735 break;
1736 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1737 wrd = (insn >> 12) & 0xf;
1738 rd0 = (insn >> 16) & 0xf;
1739 rd1 = (insn >> 0) & 0xf;
1740 gen_op_iwmmxt_movq_M0_wRn(rd0);
1741 switch ((insn >> 22) & 3) {
1742 case 0:
1743 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1744 break;
1745 case 1:
1746 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1747 break;
1748 case 2:
1749 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1750 break;
1751 case 3:
1752 return 1;
1753 }
1754 gen_op_iwmmxt_movq_wRn_M0(wrd);
1755 gen_op_iwmmxt_set_mup();
1756 gen_op_iwmmxt_set_cup();
1757 break;
1758 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1759 wrd = (insn >> 12) & 0xf;
1760 rd0 = (insn >> 16) & 0xf;
1761 rd1 = (insn >> 0) & 0xf;
1762 gen_op_iwmmxt_movq_M0_wRn(rd0);
1763 if (insn & (1 << 22)) {
1764 if (insn & (1 << 20))
1765 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1766 else
1767 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1768 } else {
1769 if (insn & (1 << 20))
1770 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1771 else
1772 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1773 }
1774 gen_op_iwmmxt_movq_wRn_M0(wrd);
1775 gen_op_iwmmxt_set_mup();
1776 gen_op_iwmmxt_set_cup();
1777 break;
1778 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1779 wrd = (insn >> 12) & 0xf;
1780 rd0 = (insn >> 16) & 0xf;
1781 rd1 = (insn >> 0) & 0xf;
1782 gen_op_iwmmxt_movq_M0_wRn(rd0);
1783 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1784 tcg_gen_andi_i32(tmp, tmp, 7);
1785 iwmmxt_load_reg(cpu_V1, rd1);
1786 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1787 tcg_temp_free_i32(tmp);
1788 gen_op_iwmmxt_movq_wRn_M0(wrd);
1789 gen_op_iwmmxt_set_mup();
1790 break;
1791 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1792 if (((insn >> 6) & 3) == 3)
1793 return 1;
1794 rd = (insn >> 12) & 0xf;
1795 wrd = (insn >> 16) & 0xf;
1796 tmp = load_reg(s, rd);
1797 gen_op_iwmmxt_movq_M0_wRn(wrd);
1798 switch ((insn >> 6) & 3) {
1799 case 0:
1800 tmp2 = tcg_const_i32(0xff);
1801 tmp3 = tcg_const_i32((insn & 7) << 3);
1802 break;
1803 case 1:
1804 tmp2 = tcg_const_i32(0xffff);
1805 tmp3 = tcg_const_i32((insn & 3) << 4);
1806 break;
1807 case 2:
1808 tmp2 = tcg_const_i32(0xffffffff);
1809 tmp3 = tcg_const_i32((insn & 1) << 5);
1810 break;
1811 default:
1812 TCGV_UNUSED_I32(tmp2);
1813 TCGV_UNUSED_I32(tmp3);
1814 }
1815 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1816 tcg_temp_free_i32(tmp3);
1817 tcg_temp_free_i32(tmp2);
1818 tcg_temp_free_i32(tmp);
1819 gen_op_iwmmxt_movq_wRn_M0(wrd);
1820 gen_op_iwmmxt_set_mup();
1821 break;
1822 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1823 rd = (insn >> 12) & 0xf;
1824 wrd = (insn >> 16) & 0xf;
1825 if (rd == 15 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 gen_op_iwmmxt_movq_M0_wRn(wrd);
1828 tmp = tcg_temp_new_i32();
1829 switch ((insn >> 22) & 3) {
1830 case 0:
1831 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1832 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1833 if (insn & 8) {
1834 tcg_gen_ext8s_i32(tmp, tmp);
1835 } else {
1836 tcg_gen_andi_i32(tmp, tmp, 0xff);
1837 }
1838 break;
1839 case 1:
1840 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1841 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1842 if (insn & 8) {
1843 tcg_gen_ext16s_i32(tmp, tmp);
1844 } else {
1845 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1846 }
1847 break;
1848 case 2:
1849 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1850 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1851 break;
1852 }
1853 store_reg(s, rd, tmp);
1854 break;
1855 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1856 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1857 return 1;
1858 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1862 break;
1863 case 1:
1864 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1865 break;
1866 case 2:
1867 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1868 break;
1869 }
1870 tcg_gen_shli_i32(tmp, tmp, 28);
1871 gen_set_nzcv(tmp);
1872 tcg_temp_free_i32(tmp);
1873 break;
1874 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1875 if (((insn >> 6) & 3) == 3)
1876 return 1;
1877 rd = (insn >> 12) & 0xf;
1878 wrd = (insn >> 16) & 0xf;
1879 tmp = load_reg(s, rd);
1880 switch ((insn >> 6) & 3) {
1881 case 0:
1882 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1883 break;
1884 case 1:
1885 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1886 break;
1887 case 2:
1888 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1889 break;
1890 }
1891 tcg_temp_free_i32(tmp);
1892 gen_op_iwmmxt_movq_wRn_M0(wrd);
1893 gen_op_iwmmxt_set_mup();
1894 break;
1895 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1896 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1897 return 1;
1898 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1899 tmp2 = tcg_temp_new_i32();
1900 tcg_gen_mov_i32(tmp2, tmp);
1901 switch ((insn >> 22) & 3) {
1902 case 0:
1903 for (i = 0; i < 7; i ++) {
1904 tcg_gen_shli_i32(tmp2, tmp2, 4);
1905 tcg_gen_and_i32(tmp, tmp, tmp2);
1906 }
1907 break;
1908 case 1:
1909 for (i = 0; i < 3; i ++) {
1910 tcg_gen_shli_i32(tmp2, tmp2, 8);
1911 tcg_gen_and_i32(tmp, tmp, tmp2);
1912 }
1913 break;
1914 case 2:
1915 tcg_gen_shli_i32(tmp2, tmp2, 16);
1916 tcg_gen_and_i32(tmp, tmp, tmp2);
1917 break;
1918 }
1919 gen_set_nzcv(tmp);
1920 tcg_temp_free_i32(tmp2);
1921 tcg_temp_free_i32(tmp);
1922 break;
1923 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1924 wrd = (insn >> 12) & 0xf;
1925 rd0 = (insn >> 16) & 0xf;
1926 gen_op_iwmmxt_movq_M0_wRn(rd0);
1927 switch ((insn >> 22) & 3) {
1928 case 0:
1929 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1930 break;
1931 case 1:
1932 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1933 break;
1934 case 2:
1935 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1936 break;
1937 case 3:
1938 return 1;
1939 }
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 break;
1943 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1944 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1945 return 1;
1946 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1947 tmp2 = tcg_temp_new_i32();
1948 tcg_gen_mov_i32(tmp2, tmp);
1949 switch ((insn >> 22) & 3) {
1950 case 0:
1951 for (i = 0; i < 7; i ++) {
1952 tcg_gen_shli_i32(tmp2, tmp2, 4);
1953 tcg_gen_or_i32(tmp, tmp, tmp2);
1954 }
1955 break;
1956 case 1:
1957 for (i = 0; i < 3; i ++) {
1958 tcg_gen_shli_i32(tmp2, tmp2, 8);
1959 tcg_gen_or_i32(tmp, tmp, tmp2);
1960 }
1961 break;
1962 case 2:
1963 tcg_gen_shli_i32(tmp2, tmp2, 16);
1964 tcg_gen_or_i32(tmp, tmp, tmp2);
1965 break;
1966 }
1967 gen_set_nzcv(tmp);
1968 tcg_temp_free_i32(tmp2);
1969 tcg_temp_free_i32(tmp);
1970 break;
1971 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1972 rd = (insn >> 12) & 0xf;
1973 rd0 = (insn >> 16) & 0xf;
1974 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1975 return 1;
1976 gen_op_iwmmxt_movq_M0_wRn(rd0);
1977 tmp = tcg_temp_new_i32();
1978 switch ((insn >> 22) & 3) {
1979 case 0:
1980 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1981 break;
1982 case 1:
1983 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1984 break;
1985 case 2:
1986 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1987 break;
1988 }
1989 store_reg(s, rd, tmp);
1990 break;
1991 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1992 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1993 wrd = (insn >> 12) & 0xf;
1994 rd0 = (insn >> 16) & 0xf;
1995 rd1 = (insn >> 0) & 0xf;
1996 gen_op_iwmmxt_movq_M0_wRn(rd0);
1997 switch ((insn >> 22) & 3) {
1998 case 0:
1999 if (insn & (1 << 21))
2000 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2001 else
2002 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2003 break;
2004 case 1:
2005 if (insn & (1 << 21))
2006 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2007 else
2008 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2009 break;
2010 case 2:
2011 if (insn & (1 << 21))
2012 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2013 else
2014 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2015 break;
2016 case 3:
2017 return 1;
2018 }
2019 gen_op_iwmmxt_movq_wRn_M0(wrd);
2020 gen_op_iwmmxt_set_mup();
2021 gen_op_iwmmxt_set_cup();
2022 break;
2023 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2024 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2025 wrd = (insn >> 12) & 0xf;
2026 rd0 = (insn >> 16) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2029 case 0:
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_unpacklsb_M0();
2032 else
2033 gen_op_iwmmxt_unpacklub_M0();
2034 break;
2035 case 1:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_unpacklsw_M0();
2038 else
2039 gen_op_iwmmxt_unpackluw_M0();
2040 break;
2041 case 2:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_unpacklsl_M0();
2044 else
2045 gen_op_iwmmxt_unpacklul_M0();
2046 break;
2047 case 3:
2048 return 1;
2049 }
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2055 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2060 case 0:
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpackhsb_M0();
2063 else
2064 gen_op_iwmmxt_unpackhub_M0();
2065 break;
2066 case 1:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpackhsw_M0();
2069 else
2070 gen_op_iwmmxt_unpackhuw_M0();
2071 break;
2072 case 2:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpackhsl_M0();
2075 else
2076 gen_op_iwmmxt_unpackhul_M0();
2077 break;
2078 case 3:
2079 return 1;
2080 }
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2086 case 0x214: case 0x614: case 0xa14: case 0xe14:
2087 if (((insn >> 22) & 3) == 0)
2088 return 1;
2089 wrd = (insn >> 12) & 0xf;
2090 rd0 = (insn >> 16) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 tmp = tcg_temp_new_i32();
2093 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2094 tcg_temp_free_i32(tmp);
2095 return 1;
2096 }
2097 switch ((insn >> 22) & 3) {
2098 case 1:
2099 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
2100 break;
2101 case 2:
2102 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
2103 break;
2104 case 3:
2105 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
2106 break;
2107 }
2108 tcg_temp_free_i32(tmp);
2109 gen_op_iwmmxt_movq_wRn_M0(wrd);
2110 gen_op_iwmmxt_set_mup();
2111 gen_op_iwmmxt_set_cup();
2112 break;
2113 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2114 case 0x014: case 0x414: case 0x814: case 0xc14:
2115 if (((insn >> 22) & 3) == 0)
2116 return 1;
2117 wrd = (insn >> 12) & 0xf;
2118 rd0 = (insn >> 16) & 0xf;
2119 gen_op_iwmmxt_movq_M0_wRn(rd0);
2120 tmp = tcg_temp_new_i32();
2121 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2122 tcg_temp_free_i32(tmp);
2123 return 1;
2124 }
2125 switch ((insn >> 22) & 3) {
2126 case 1:
2127 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2128 break;
2129 case 2:
2130 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2131 break;
2132 case 3:
2133 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2134 break;
2135 }
2136 tcg_temp_free_i32(tmp);
2137 gen_op_iwmmxt_movq_wRn_M0(wrd);
2138 gen_op_iwmmxt_set_mup();
2139 gen_op_iwmmxt_set_cup();
2140 break;
2141 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2142 case 0x114: case 0x514: case 0x914: case 0xd14:
2143 if (((insn >> 22) & 3) == 0)
2144 return 1;
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 tmp = tcg_temp_new_i32();
2149 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2150 tcg_temp_free_i32(tmp);
2151 return 1;
2152 }
2153 switch ((insn >> 22) & 3) {
2154 case 1:
2155 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2156 break;
2157 case 2:
2158 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2159 break;
2160 case 3:
2161 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2162 break;
2163 }
2164 tcg_temp_free_i32(tmp);
2165 gen_op_iwmmxt_movq_wRn_M0(wrd);
2166 gen_op_iwmmxt_set_mup();
2167 gen_op_iwmmxt_set_cup();
2168 break;
2169 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2170 case 0x314: case 0x714: case 0xb14: case 0xf14:
2171 if (((insn >> 22) & 3) == 0)
2172 return 1;
2173 wrd = (insn >> 12) & 0xf;
2174 rd0 = (insn >> 16) & 0xf;
2175 gen_op_iwmmxt_movq_M0_wRn(rd0);
2176 tmp = tcg_temp_new_i32();
2177 switch ((insn >> 22) & 3) {
2178 case 1:
2179 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2180 tcg_temp_free_i32(tmp);
2181 return 1;
2182 }
2183 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2184 break;
2185 case 2:
2186 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2187 tcg_temp_free_i32(tmp);
2188 return 1;
2189 }
2190 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2191 break;
2192 case 3:
2193 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2194 tcg_temp_free_i32(tmp);
2195 return 1;
2196 }
2197 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2198 break;
2199 }
2200 tcg_temp_free_i32(tmp);
2201 gen_op_iwmmxt_movq_wRn_M0(wrd);
2202 gen_op_iwmmxt_set_mup();
2203 gen_op_iwmmxt_set_cup();
2204 break;
2205 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2206 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2207 wrd = (insn >> 12) & 0xf;
2208 rd0 = (insn >> 16) & 0xf;
2209 rd1 = (insn >> 0) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 switch ((insn >> 22) & 3) {
2212 case 0:
2213 if (insn & (1 << 21))
2214 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2215 else
2216 gen_op_iwmmxt_minub_M0_wRn(rd1);
2217 break;
2218 case 1:
2219 if (insn & (1 << 21))
2220 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2221 else
2222 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2223 break;
2224 case 2:
2225 if (insn & (1 << 21))
2226 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2227 else
2228 gen_op_iwmmxt_minul_M0_wRn(rd1);
2229 break;
2230 case 3:
2231 return 1;
2232 }
2233 gen_op_iwmmxt_movq_wRn_M0(wrd);
2234 gen_op_iwmmxt_set_mup();
2235 break;
2236 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2237 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2238 wrd = (insn >> 12) & 0xf;
2239 rd0 = (insn >> 16) & 0xf;
2240 rd1 = (insn >> 0) & 0xf;
2241 gen_op_iwmmxt_movq_M0_wRn(rd0);
2242 switch ((insn >> 22) & 3) {
2243 case 0:
2244 if (insn & (1 << 21))
2245 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2246 else
2247 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2248 break;
2249 case 1:
2250 if (insn & (1 << 21))
2251 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2252 else
2253 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2254 break;
2255 case 2:
2256 if (insn & (1 << 21))
2257 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2258 else
2259 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2260 break;
2261 case 3:
2262 return 1;
2263 }
2264 gen_op_iwmmxt_movq_wRn_M0(wrd);
2265 gen_op_iwmmxt_set_mup();
2266 break;
2267 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2268 case 0x402: case 0x502: case 0x602: case 0x702:
2269 wrd = (insn >> 12) & 0xf;
2270 rd0 = (insn >> 16) & 0xf;
2271 rd1 = (insn >> 0) & 0xf;
2272 gen_op_iwmmxt_movq_M0_wRn(rd0);
2273 tmp = tcg_const_i32((insn >> 20) & 3);
2274 iwmmxt_load_reg(cpu_V1, rd1);
2275 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2276 tcg_temp_free_i32(tmp);
2277 gen_op_iwmmxt_movq_wRn_M0(wrd);
2278 gen_op_iwmmxt_set_mup();
2279 break;
2280 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2281 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2282 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2283 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2284 wrd = (insn >> 12) & 0xf;
2285 rd0 = (insn >> 16) & 0xf;
2286 rd1 = (insn >> 0) & 0xf;
2287 gen_op_iwmmxt_movq_M0_wRn(rd0);
2288 switch ((insn >> 20) & 0xf) {
2289 case 0x0:
2290 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2291 break;
2292 case 0x1:
2293 gen_op_iwmmxt_subub_M0_wRn(rd1);
2294 break;
2295 case 0x3:
2296 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2297 break;
2298 case 0x4:
2299 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2300 break;
2301 case 0x5:
2302 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2303 break;
2304 case 0x7:
2305 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2306 break;
2307 case 0x8:
2308 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2309 break;
2310 case 0x9:
2311 gen_op_iwmmxt_subul_M0_wRn(rd1);
2312 break;
2313 case 0xb:
2314 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2315 break;
2316 default:
2317 return 1;
2318 }
2319 gen_op_iwmmxt_movq_wRn_M0(wrd);
2320 gen_op_iwmmxt_set_mup();
2321 gen_op_iwmmxt_set_cup();
2322 break;
2323 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2324 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2325 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2326 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2327 wrd = (insn >> 12) & 0xf;
2328 rd0 = (insn >> 16) & 0xf;
2329 gen_op_iwmmxt_movq_M0_wRn(rd0);
2330 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2331 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2332 tcg_temp_free_i32(tmp);
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 gen_op_iwmmxt_set_cup();
2336 break;
2337 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2338 case 0x418: case 0x518: case 0x618: case 0x718:
2339 case 0x818: case 0x918: case 0xa18: case 0xb18:
2340 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2341 wrd = (insn >> 12) & 0xf;
2342 rd0 = (insn >> 16) & 0xf;
2343 rd1 = (insn >> 0) & 0xf;
2344 gen_op_iwmmxt_movq_M0_wRn(rd0);
2345 switch ((insn >> 20) & 0xf) {
2346 case 0x0:
2347 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2348 break;
2349 case 0x1:
2350 gen_op_iwmmxt_addub_M0_wRn(rd1);
2351 break;
2352 case 0x3:
2353 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2354 break;
2355 case 0x4:
2356 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2357 break;
2358 case 0x5:
2359 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2360 break;
2361 case 0x7:
2362 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2363 break;
2364 case 0x8:
2365 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2366 break;
2367 case 0x9:
2368 gen_op_iwmmxt_addul_M0_wRn(rd1);
2369 break;
2370 case 0xb:
2371 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2372 break;
2373 default:
2374 return 1;
2375 }
2376 gen_op_iwmmxt_movq_wRn_M0(wrd);
2377 gen_op_iwmmxt_set_mup();
2378 gen_op_iwmmxt_set_cup();
2379 break;
2380 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2381 case 0x408: case 0x508: case 0x608: case 0x708:
2382 case 0x808: case 0x908: case 0xa08: case 0xb08:
2383 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2384 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2385 return 1;
2386 wrd = (insn >> 12) & 0xf;
2387 rd0 = (insn >> 16) & 0xf;
2388 rd1 = (insn >> 0) & 0xf;
2389 gen_op_iwmmxt_movq_M0_wRn(rd0);
2390 switch ((insn >> 22) & 3) {
2391 case 1:
2392 if (insn & (1 << 21))
2393 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2394 else
2395 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2396 break;
2397 case 2:
2398 if (insn & (1 << 21))
2399 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2400 else
2401 gen_op_iwmmxt_packul_M0_wRn(rd1);
2402 break;
2403 case 3:
2404 if (insn & (1 << 21))
2405 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2406 else
2407 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2408 break;
2409 }
2410 gen_op_iwmmxt_movq_wRn_M0(wrd);
2411 gen_op_iwmmxt_set_mup();
2412 gen_op_iwmmxt_set_cup();
2413 break;
2414 case 0x201: case 0x203: case 0x205: case 0x207:
2415 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2416 case 0x211: case 0x213: case 0x215: case 0x217:
2417 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2418 wrd = (insn >> 5) & 0xf;
2419 rd0 = (insn >> 12) & 0xf;
2420 rd1 = (insn >> 0) & 0xf;
2421 if (rd0 == 0xf || rd1 == 0xf)
2422 return 1;
2423 gen_op_iwmmxt_movq_M0_wRn(wrd);
2424 tmp = load_reg(s, rd0);
2425 tmp2 = load_reg(s, rd1);
2426 switch ((insn >> 16) & 0xf) {
2427 case 0x0: /* TMIA */
2428 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2429 break;
2430 case 0x8: /* TMIAPH */
2431 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2432 break;
2433 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2434 if (insn & (1 << 16))
2435 tcg_gen_shri_i32(tmp, tmp, 16);
2436 if (insn & (1 << 17))
2437 tcg_gen_shri_i32(tmp2, tmp2, 16);
2438 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2439 break;
2440 default:
2441 tcg_temp_free_i32(tmp2);
2442 tcg_temp_free_i32(tmp);
2443 return 1;
2444 }
2445 tcg_temp_free_i32(tmp2);
2446 tcg_temp_free_i32(tmp);
2447 gen_op_iwmmxt_movq_wRn_M0(wrd);
2448 gen_op_iwmmxt_set_mup();
2449 break;
2450 default:
2451 return 1;
2452 }
2453
2454 return 0;
2455 }
2456
2457 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
2458 (ie. an undefined instruction). */
2459 static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
2460 {
2461 int acc, rd0, rd1, rdhi, rdlo;
2462 TCGv_i32 tmp, tmp2;
2463
2464 if ((insn & 0x0ff00f10) == 0x0e200010) {
2465 /* Multiply with Internal Accumulate Format */
2466 rd0 = (insn >> 12) & 0xf;
2467 rd1 = insn & 0xf;
2468 acc = (insn >> 5) & 7;
2469
2470 if (acc != 0)
2471 return 1;
2472
2473 tmp = load_reg(s, rd0);
2474 tmp2 = load_reg(s, rd1);
2475 switch ((insn >> 16) & 0xf) {
2476 case 0x0: /* MIA */
2477 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2478 break;
2479 case 0x8: /* MIAPH */
2480 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2481 break;
2482 case 0xc: /* MIABB */
2483 case 0xd: /* MIABT */
2484 case 0xe: /* MIATB */
2485 case 0xf: /* MIATT */
2486 if (insn & (1 << 16))
2487 tcg_gen_shri_i32(tmp, tmp, 16);
2488 if (insn & (1 << 17))
2489 tcg_gen_shri_i32(tmp2, tmp2, 16);
2490 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2491 break;
2492 default:
2493 return 1;
2494 }
2495 tcg_temp_free_i32(tmp2);
2496 tcg_temp_free_i32(tmp);
2497
2498 gen_op_iwmmxt_movq_wRn_M0(acc);
2499 return 0;
2500 }
2501
2502 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2503 /* Internal Accumulator Access Format */
2504 rdhi = (insn >> 16) & 0xf;
2505 rdlo = (insn >> 12) & 0xf;
2506 acc = insn & 7;
2507
2508 if (acc != 0)
2509 return 1;
2510
2511 if (insn & ARM_CP_RW_BIT) { /* MRA */
2512 iwmmxt_load_reg(cpu_V0, acc);
2513 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2514 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2515 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2516 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2517 } else { /* MAR */
2518 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2519 iwmmxt_store_reg(cpu_V0, acc);
2520 }
2521 return 0;
2522 }
2523
2524 return 1;
2525 }
2526
2527 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2528 #define VFP_SREG(insn, bigbit, smallbit) \
2529 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2530 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2531 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2532 reg = (((insn) >> (bigbit)) & 0x0f) \
2533 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2534 } else { \
2535 if (insn & (1 << (smallbit))) \
2536 return 1; \
2537 reg = ((insn) >> (bigbit)) & 0x0f; \
2538 }} while (0)
2539
2540 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2541 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2542 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2543 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2544 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2545 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2546
2547 /* Move between integer and VFP cores. */
2548 static TCGv_i32 gen_vfp_mrs(void)
2549 {
2550 TCGv_i32 tmp = tcg_temp_new_i32();
2551 tcg_gen_mov_i32(tmp, cpu_F0s);
2552 return tmp;
2553 }
2554
2555 static void gen_vfp_msr(TCGv_i32 tmp)
2556 {
2557 tcg_gen_mov_i32(cpu_F0s, tmp);
2558 tcg_temp_free_i32(tmp);
2559 }
2560
2561 static void gen_neon_dup_u8(TCGv_i32 var, int shift)
2562 {
2563 TCGv_i32 tmp = tcg_temp_new_i32();
2564 if (shift)
2565 tcg_gen_shri_i32(var, var, shift);
2566 tcg_gen_ext8u_i32(var, var);
2567 tcg_gen_shli_i32(tmp, var, 8);
2568 tcg_gen_or_i32(var, var, tmp);
2569 tcg_gen_shli_i32(tmp, var, 16);
2570 tcg_gen_or_i32(var, var, tmp);
2571 tcg_temp_free_i32(tmp);
2572 }
2573
2574 static void gen_neon_dup_low16(TCGv_i32 var)
2575 {
2576 TCGv_i32 tmp = tcg_temp_new_i32();
2577 tcg_gen_ext16u_i32(var, var);
2578 tcg_gen_shli_i32(tmp, var, 16);
2579 tcg_gen_or_i32(var, var, tmp);
2580 tcg_temp_free_i32(tmp);
2581 }
2582
2583 static void gen_neon_dup_high16(TCGv_i32 var)
2584 {
2585 TCGv_i32 tmp = tcg_temp_new_i32();
2586 tcg_gen_andi_i32(var, var, 0xffff0000);
2587 tcg_gen_shri_i32(tmp, var, 16);
2588 tcg_gen_or_i32(var, var, tmp);
2589 tcg_temp_free_i32(tmp);
2590 }
2591
2592 static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
2593 {
2594 /* Load a single Neon element and replicate into a 32 bit TCG reg */
2595 TCGv_i32 tmp = tcg_temp_new_i32();
2596 switch (size) {
2597 case 0:
2598 gen_aa32_ld8u(tmp, addr, IS_USER(s));
2599 gen_neon_dup_u8(tmp, 0);
2600 break;
2601 case 1:
2602 gen_aa32_ld16u(tmp, addr, IS_USER(s));
2603 gen_neon_dup_low16(tmp);
2604 break;
2605 case 2:
2606 gen_aa32_ld32u(tmp, addr, IS_USER(s));
2607 break;
2608 default: /* Avoid compiler warnings. */
2609 abort();
2610 }
2611 return tmp;
2612 }
2613
2614 /* Disassemble a VFP instruction. Returns nonzero if an error occurred
2615 (ie. an undefined instruction). */
2616 static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
2617 {
2618 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2619 int dp, veclen;
2620 TCGv_i32 addr;
2621 TCGv_i32 tmp;
2622 TCGv_i32 tmp2;
2623
2624 if (!arm_feature(env, ARM_FEATURE_VFP))
2625 return 1;
2626
2627 if (!s->vfp_enabled) {
2628 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2629 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2630 return 1;
2631 rn = (insn >> 16) & 0xf;
2632 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2633 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2634 return 1;
2635 }
2636 dp = ((insn & 0xf00) == 0xb00);
2637 switch ((insn >> 24) & 0xf) {
2638 case 0xe:
2639 if (insn & (1 << 4)) {
2640 /* single register transfer */
2641 rd = (insn >> 12) & 0xf;
2642 if (dp) {
2643 int size;
2644 int pass;
2645
2646 VFP_DREG_N(rn, insn);
2647 if (insn & 0xf)
2648 return 1;
2649 if (insn & 0x00c00060
2650 && !arm_feature(env, ARM_FEATURE_NEON))
2651 return 1;
2652
2653 pass = (insn >> 21) & 1;
2654 if (insn & (1 << 22)) {
2655 size = 0;
2656 offset = ((insn >> 5) & 3) * 8;
2657 } else if (insn & (1 << 5)) {
2658 size = 1;
2659 offset = (insn & (1 << 6)) ? 16 : 0;
2660 } else {
2661 size = 2;
2662 offset = 0;
2663 }
2664 if (insn & ARM_CP_RW_BIT) {
2665 /* vfp->arm */
2666 tmp = neon_load_reg(rn, pass);
2667 switch (size) {
2668 case 0:
2669 if (offset)
2670 tcg_gen_shri_i32(tmp, tmp, offset);
2671 if (insn & (1 << 23))
2672 gen_uxtb(tmp);
2673 else
2674 gen_sxtb(tmp);
2675 break;
2676 case 1:
2677 if (insn & (1 << 23)) {
2678 if (offset) {
2679 tcg_gen_shri_i32(tmp, tmp, 16);
2680 } else {
2681 gen_uxth(tmp);
2682 }
2683 } else {
2684 if (offset) {
2685 tcg_gen_sari_i32(tmp, tmp, 16);
2686 } else {
2687 gen_sxth(tmp);
2688 }
2689 }
2690 break;
2691 case 2:
2692 break;
2693 }
2694 store_reg(s, rd, tmp);
2695 } else {
2696 /* arm->vfp */
2697 tmp = load_reg(s, rd);
2698 if (insn & (1 << 23)) {
2699 /* VDUP */
2700 if (size == 0) {
2701 gen_neon_dup_u8(tmp, 0);
2702 } else if (size == 1) {
2703 gen_neon_dup_low16(tmp);
2704 }
2705 for (n = 0; n <= pass * 2; n++) {
2706 tmp2 = tcg_temp_new_i32();
2707 tcg_gen_mov_i32(tmp2, tmp);
2708 neon_store_reg(rn, n, tmp2);
2709 }
2710 neon_store_reg(rn, n, tmp);
2711 } else {
2712 /* VMOV */
2713 switch (size) {
2714 case 0:
2715 tmp2 = neon_load_reg(rn, pass);
2716 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
2717 tcg_temp_free_i32(tmp2);
2718 break;
2719 case 1:
2720 tmp2 = neon_load_reg(rn, pass);
2721 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
2722 tcg_temp_free_i32(tmp2);
2723 break;
2724 case 2:
2725 break;
2726 }
2727 neon_store_reg(rn, pass, tmp);
2728 }
2729 }
2730 } else { /* !dp */
2731 if ((insn & 0x6f) != 0x00)
2732 return 1;
2733 rn = VFP_SREG_N(insn);
2734 if (insn & ARM_CP_RW_BIT) {
2735 /* vfp->arm */
2736 if (insn & (1 << 21)) {
2737 /* system register */
2738 rn >>= 1;
2739
2740 switch (rn) {
2741 case ARM_VFP_FPSID:
2742 /* VFP2 allows access to FSID from userspace.
2743 VFP3 restricts all id registers to privileged
2744 accesses. */
2745 if (IS_USER(s)
2746 && arm_feature(env, ARM_FEATURE_VFP3))
2747 return 1;
2748 tmp = load_cpu_field(vfp.xregs[rn]);
2749 break;
2750 case ARM_VFP_FPEXC:
2751 if (IS_USER(s))
2752 return 1;
2753 tmp = load_cpu_field(vfp.xregs[rn]);
2754 break;
2755 case ARM_VFP_FPINST:
2756 case ARM_VFP_FPINST2:
2757 /* Not present in VFP3. */
2758 if (IS_USER(s)
2759 || arm_feature(env, ARM_FEATURE_VFP3))
2760 return 1;
2761 tmp = load_cpu_field(vfp.xregs[rn]);
2762 break;
2763 case ARM_VFP_FPSCR:
2764 if (rd == 15) {
2765 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2766 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2767 } else {
2768 tmp = tcg_temp_new_i32();
2769 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2770 }
2771 break;
2772 case ARM_VFP_MVFR0:
2773 case ARM_VFP_MVFR1:
2774 if (IS_USER(s)
2775 || !arm_feature(env, ARM_FEATURE_MVFR))
2776 return 1;
2777 tmp = load_cpu_field(vfp.xregs[rn]);
2778 break;
2779 default:
2780 return 1;
2781 }
2782 } else {
2783 gen_mov_F0_vreg(0, rn);
2784 tmp = gen_vfp_mrs();
2785 }
2786 if (rd == 15) {
2787 /* Set the 4 flag bits in the CPSR. */
2788 gen_set_nzcv(tmp);
2789 tcg_temp_free_i32(tmp);
2790 } else {
2791 store_reg(s, rd, tmp);
2792 }
2793 } else {
2794 /* arm->vfp */
2795 if (insn & (1 << 21)) {
2796 rn >>= 1;
2797 /* system register */
2798 switch (rn) {
2799 case ARM_VFP_FPSID:
2800 case ARM_VFP_MVFR0:
2801 case ARM_VFP_MVFR1:
2802 /* Writes are ignored. */
2803 break;
2804 case ARM_VFP_FPSCR:
2805 tmp = load_reg(s, rd);
2806 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2807 tcg_temp_free_i32(tmp);
2808 gen_lookup_tb(s);
2809 break;
2810 case ARM_VFP_FPEXC:
2811 if (IS_USER(s))
2812 return 1;
2813 /* TODO: VFP subarchitecture support.
2814 * For now, keep the EN bit only */
2815 tmp = load_reg(s, rd);
2816 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2817 store_cpu_field(tmp, vfp.xregs[rn]);
2818 gen_lookup_tb(s);
2819 break;
2820 case ARM_VFP_FPINST:
2821 case ARM_VFP_FPINST2:
2822 tmp = load_reg(s, rd);
2823 store_cpu_field(tmp, vfp.xregs[rn]);
2824 break;
2825 default:
2826 return 1;
2827 }
2828 } else {
2829 tmp = load_reg(s, rd);
2830 gen_vfp_msr(tmp);
2831 gen_mov_vreg_F0(0, rn);
2832 }
2833 }
2834 }
2835 } else {
2836 /* data processing */
2837 /* The opcode is in bits 23, 21, 20 and 6. */
2838 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2839 if (dp) {
2840 if (op == 15) {
2841 /* rn is opcode */
2842 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2843 } else {
2844 /* rn is register number */
2845 VFP_DREG_N(rn, insn);
2846 }
2847
2848 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2849 /* Integer or single precision destination. */
2850 rd = VFP_SREG_D(insn);
2851 } else {
2852 VFP_DREG_D(rd, insn);
2853 }
2854 if (op == 15 &&
2855 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2856 /* VCVT from int is always from S reg regardless of dp bit.
2857 * VCVT with immediate frac_bits has same format as SREG_M
2858 */
2859 rm = VFP_SREG_M(insn);
2860 } else {
2861 VFP_DREG_M(rm, insn);
2862 }
2863 } else {
2864 rn = VFP_SREG_N(insn);
2865 if (op == 15 && rn == 15) {
2866 /* Double precision destination. */
2867 VFP_DREG_D(rd, insn);
2868 } else {
2869 rd = VFP_SREG_D(insn);
2870 }
2871 /* NB that we implicitly rely on the encoding for the frac_bits
2872 * in VCVT of fixed to float being the same as that of an SREG_M
2873 */
2874 rm = VFP_SREG_M(insn);
2875 }
2876
2877 veclen = s->vec_len;
2878 if (op == 15 && rn > 3)
2879 veclen = 0;
2880
2881 /* Shut up compiler warnings. */
2882 delta_m = 0;
2883 delta_d = 0;
2884 bank_mask = 0;
2885
2886 if (veclen > 0) {
2887 if (dp)
2888 bank_mask = 0xc;
2889 else
2890 bank_mask = 0x18;
2891
2892 /* Figure out what type of vector operation this is. */
2893 if ((rd & bank_mask) == 0) {
2894 /* scalar */
2895 veclen = 0;
2896 } else {
2897 if (dp)
2898 delta_d = (s->vec_stride >> 1) + 1;
2899 else
2900 delta_d = s->vec_stride + 1;
2901
2902 if ((rm & bank_mask) == 0) {
2903 /* mixed scalar/vector */
2904 delta_m = 0;
2905 } else {
2906 /* vector */
2907 delta_m = delta_d;
2908 }
2909 }
2910 }
2911
2912 /* Load the initial operands. */
2913 if (op == 15) {
2914 switch (rn) {
2915 case 16:
2916 case 17:
2917 /* Integer source */
2918 gen_mov_F0_vreg(0, rm);
2919 break;
2920 case 8:
2921 case 9:
2922 /* Compare */
2923 gen_mov_F0_vreg(dp, rd);
2924 gen_mov_F1_vreg(dp, rm);
2925 break;
2926 case 10:
2927 case 11:
2928 /* Compare with zero */
2929 gen_mov_F0_vreg(dp, rd);
2930 gen_vfp_F1_ld0(dp);
2931 break;
2932 case 20:
2933 case 21:
2934 case 22:
2935 case 23:
2936 case 28:
2937 case 29:
2938 case 30:
2939 case 31:
2940 /* Source and destination the same. */
2941 gen_mov_F0_vreg(dp, rd);
2942 break;
2943 case 4:
2944 case 5:
2945 case 6:
2946 case 7:
2947 /* VCVTB, VCVTT: only present with the halfprec extension,
2948 * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
2949 */
2950 if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
2951 return 1;
2952 }
2953 /* Otherwise fall through */
2954 default:
2955 /* One source operand. */
2956 gen_mov_F0_vreg(dp, rm);
2957 break;
2958 }
2959 } else {
2960 /* Two source operands. */
2961 gen_mov_F0_vreg(dp, rn);
2962 gen_mov_F1_vreg(dp, rm);
2963 }
2964
2965 for (;;) {
2966 /* Perform the calculation. */
2967 switch (op) {
2968 case 0: /* VMLA: fd + (fn * fm) */
2969 /* Note that order of inputs to the add matters for NaNs */
2970 gen_vfp_F1_mul(dp);
2971 gen_mov_F0_vreg(dp, rd);
2972 gen_vfp_add(dp);
2973 break;
2974 case 1: /* VMLS: fd + -(fn * fm) */
2975 gen_vfp_mul(dp);
2976 gen_vfp_F1_neg(dp);
2977 gen_mov_F0_vreg(dp, rd);
2978 gen_vfp_add(dp);
2979 break;
2980 case 2: /* VNMLS: -fd + (fn * fm) */
2981 /* Note that it isn't valid to replace (-A + B) with (B - A)
2982 * or similar plausible looking simplifications
2983 * because this will give wrong results for NaNs.
2984 */
2985 gen_vfp_F1_mul(dp);
2986 gen_mov_F0_vreg(dp, rd);
2987 gen_vfp_neg(dp);
2988 gen_vfp_add(dp);
2989 break;
2990 case 3: /* VNMLA: -fd + -(fn * fm) */
2991 gen_vfp_mul(dp);
2992 gen_vfp_F1_neg(dp);
2993 gen_mov_F0_vreg(dp, rd);
2994 gen_vfp_neg(dp);
2995 gen_vfp_add(dp);
2996 break;
2997 case 4: /* mul: fn * fm */
2998 gen_vfp_mul(dp);
2999 break;
3000 case 5: /* nmul: -(fn * fm) */
3001 gen_vfp_mul(dp);
3002 gen_vfp_neg(dp);
3003 break;
3004 case 6: /* add: fn + fm */
3005 gen_vfp_add(dp);
3006 break;
3007 case 7: /* sub: fn - fm */
3008 gen_vfp_sub(dp);
3009 break;
3010 case 8: /* div: fn / fm */
3011 gen_vfp_div(dp);
3012 break;
3013 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3014 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3015 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3016 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3017 /* These are fused multiply-add, and must be done as one
3018 * floating point operation with no rounding between the
3019 * multiplication and addition steps.
3020 * NB that doing the negations here as separate steps is
3021 * correct : an input NaN should come out with its sign bit
3022 * flipped if it is a negated-input.
3023 */
3024 if (!arm_feature(env, ARM_FEATURE_VFP4)) {
3025 return 1;
3026 }
3027 if (dp) {
3028 TCGv_ptr fpst;
3029 TCGv_i64 frd;
3030 if (op & 1) {
3031 /* VFNMS, VFMS */
3032 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3033 }
3034 frd = tcg_temp_new_i64();
3035 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3036 if (op & 2) {
3037 /* VFNMA, VFNMS */
3038 gen_helper_vfp_negd(frd, frd);
3039 }
3040 fpst = get_fpstatus_ptr(0);
3041 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3042 cpu_F1d, frd, fpst);
3043 tcg_temp_free_ptr(fpst);
3044 tcg_temp_free_i64(frd);
3045 } else {
3046 TCGv_ptr fpst;
3047 TCGv_i32 frd;
3048 if (op & 1) {
3049 /* VFNMS, VFMS */
3050 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3051 }
3052 frd = tcg_temp_new_i32();
3053 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3054 if (op & 2) {
3055 gen_helper_vfp_negs(frd, frd);
3056 }
3057 fpst = get_fpstatus_ptr(0);
3058 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3059 cpu_F1s, frd, fpst);
3060 tcg_temp_free_ptr(fpst);
3061 tcg_temp_free_i32(frd);
3062 }
3063 break;
3064 case 14: /* fconst */
3065 if (!arm_feature(env, ARM_FEATURE_VFP3))
3066 return 1;
3067
3068 n = (insn << 12) & 0x80000000;
3069 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3070 if (dp) {
3071 if (i & 0x40)
3072 i |= 0x3f80;
3073 else
3074 i |= 0x4000;
3075 n |= i << 16;
3076 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3077 } else {
3078 if (i & 0x40)
3079 i |= 0x780;
3080 else
3081 i |= 0x800;
3082 n |= i << 19;
3083 tcg_gen_movi_i32(cpu_F0s, n);
3084 }
3085 break;
3086 case 15: /* extension space */
3087 switch (rn) {
3088 case 0: /* cpy */
3089 /* no-op */
3090 break;
3091 case 1: /* abs */
3092 gen_vfp_abs(dp);
3093 break;
3094 case 2: /* neg */
3095 gen_vfp_neg(dp);
3096 break;
3097 case 3: /* sqrt */
3098 gen_vfp_sqrt(dp);
3099 break;
3100 case 4: /* vcvtb.f32.f16 */
3101 tmp = gen_vfp_mrs();
3102 tcg_gen_ext16u_i32(tmp, tmp);
3103 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3104 tcg_temp_free_i32(tmp);
3105 break;
3106 case 5: /* vcvtt.f32.f16 */
3107 tmp = gen_vfp_mrs();
3108 tcg_gen_shri_i32(tmp, tmp, 16);
3109 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3110 tcg_temp_free_i32(tmp);
3111 break;
3112 case 6: /* vcvtb.f16.f32 */
3113 tmp = tcg_temp_new_i32();
3114 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3115 gen_mov_F0_vreg(0, rd);
3116 tmp2 = gen_vfp_mrs();
3117 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3118 tcg_gen_or_i32(tmp, tmp, tmp2);
3119 tcg_temp_free_i32(tmp2);
3120 gen_vfp_msr(tmp);
3121 break;
3122 case 7: /* vcvtt.f16.f32 */
3123 tmp = tcg_temp_new_i32();
3124 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3125 tcg_gen_shli_i32(tmp, tmp, 16);
3126 gen_mov_F0_vreg(0, rd);
3127 tmp2 = gen_vfp_mrs();
3128 tcg_gen_ext16u_i32(tmp2, tmp2);
3129 tcg_gen_or_i32(tmp, tmp, tmp2);
3130 tcg_temp_free_i32(tmp2);
3131 gen_vfp_msr(tmp);
3132 break;
3133 case 8: /* cmp */
3134 gen_vfp_cmp(dp);
3135 break;
3136 case 9: /* cmpe */
3137 gen_vfp_cmpe(dp);
3138 break;
3139 case 10: /* cmpz */
3140 gen_vfp_cmp(dp);
3141 break;
3142 case 11: /* cmpez */
3143 gen_vfp_F1_ld0(dp);
3144 gen_vfp_cmpe(dp);
3145 break;
3146 case 15: /* single<->double conversion */
3147 if (dp)
3148 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3149 else
3150 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3151 break;
3152 case 16: /* fuito */
3153 gen_vfp_uito(dp, 0);
3154 break;
3155 case 17: /* fsito */
3156 gen_vfp_sito(dp, 0);
3157 break;
3158 case 20: /* fshto */
3159 if (!arm_feature(env, ARM_FEATURE_VFP3))
3160 return 1;
3161 gen_vfp_shto(dp, 16 - rm, 0);
3162 break;
3163 case 21: /* fslto */
3164 if (!arm_feature(env, ARM_FEATURE_VFP3))
3165 return 1;
3166 gen_vfp_slto(dp, 32 - rm, 0);
3167 break;
3168 case 22: /* fuhto */
3169 if (!arm_feature(env, ARM_FEATURE_VFP3))
3170 return 1;
3171 gen_vfp_uhto(dp, 16 - rm, 0);
3172 break;
3173 case 23: /* fulto */
3174 if (!arm_feature(env, ARM_FEATURE_VFP3))
3175 return 1;
3176 gen_vfp_ulto(dp, 32 - rm, 0);
3177 break;
3178 case 24: /* ftoui */
3179 gen_vfp_toui(dp, 0);
3180 break;
3181 case 25: /* ftouiz */
3182 gen_vfp_touiz(dp, 0);
3183 break;
3184 case 26: /* ftosi */
3185 gen_vfp_tosi(dp, 0);
3186 break;
3187 case 27: /* ftosiz */
3188 gen_vfp_tosiz(dp, 0);
3189 break;
3190 case 28: /* ftosh */
3191 if (!arm_feature(env, ARM_FEATURE_VFP3))
3192 return 1;
3193 gen_vfp_tosh(dp, 16 - rm, 0);
3194 break;
3195 case 29: /* ftosl */
3196 if (!arm_feature(env, ARM_FEATURE_VFP3))
3197 return 1;
3198 gen_vfp_tosl(dp, 32 - rm, 0);
3199 break;
3200 case 30: /* ftouh */
3201 if (!arm_feature(env, ARM_FEATURE_VFP3))
3202 return 1;
3203 gen_vfp_touh(dp, 16 - rm, 0);
3204 break;
3205 case 31: /* ftoul */
3206 if (!arm_feature(env, ARM_FEATURE_VFP3))
3207 return 1;
3208 gen_vfp_toul(dp, 32 - rm, 0);
3209 break;
3210 default: /* undefined */
3211 return 1;
3212 }
3213 break;
3214 default: /* undefined */
3215 return 1;
3216 }
3217
3218 /* Write back the result. */
3219 if (op == 15 && (rn >= 8 && rn <= 11))
3220 ; /* Comparison, do nothing. */
3221 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3222 /* VCVT double to int: always integer result. */
3223 gen_mov_vreg_F0(0, rd);
3224 else if (op == 15 && rn == 15)
3225 /* conversion */
3226 gen_mov_vreg_F0(!dp, rd);
3227 else
3228 gen_mov_vreg_F0(dp, rd);
3229
3230 /* break out of the loop if we have finished */
3231 if (veclen == 0)
3232 break;
3233
3234 if (op == 15 && delta_m == 0) {
3235 /* single source one-many */
3236 while (veclen--) {
3237 rd = ((rd + delta_d) & (bank_mask - 1))
3238 | (rd & bank_mask);
3239 gen_mov_vreg_F0(dp, rd);
3240 }
3241 break;
3242 }
3243 /* Setup the next operands. */
3244 veclen--;
3245 rd = ((rd + delta_d) & (bank_mask - 1))
3246 | (rd & bank_mask);
3247
3248 if (op == 15) {
3249 /* One source operand. */
3250 rm = ((rm + delta_m) & (bank_mask - 1))
3251 | (rm & bank_mask);
3252 gen_mov_F0_vreg(dp, rm);
3253 } else {
3254 /* Two source operands. */
3255 rn = ((rn + delta_d) & (bank_mask - 1))
3256 | (rn & bank_mask);
3257 gen_mov_F0_vreg(dp, rn);
3258 if (delta_m) {
3259 rm = ((rm + delta_m) & (bank_mask - 1))
3260 | (rm & bank_mask);
3261 gen_mov_F1_vreg(dp, rm);
3262 }
3263 }
3264 }
3265 }
3266 break;
3267 case 0xc:
3268 case 0xd:
3269 if ((insn & 0x03e00000) == 0x00400000) {
3270 /* two-register transfer */
3271 rn = (insn >> 16) & 0xf;
3272 rd = (insn >> 12) & 0xf;
3273 if (dp) {
3274 VFP_DREG_M(rm, insn);
3275 } else {
3276 rm = VFP_SREG_M(insn);
3277 }
3278
3279 if (insn & ARM_CP_RW_BIT) {
3280 /* vfp->arm */
3281 if (dp) {
3282 gen_mov_F0_vreg(0, rm * 2);
3283 tmp = gen_vfp_mrs();
3284 store_reg(s, rd, tmp);
3285 gen_mov_F0_vreg(0, rm * 2 + 1);
3286 tmp = gen_vfp_mrs();
3287 store_reg(s, rn, tmp);
3288 } else {
3289 gen_mov_F0_vreg(0, rm);
3290 tmp = gen_vfp_mrs();
3291 store_reg(s, rd, tmp);
3292 gen_mov_F0_vreg(0, rm + 1);
3293 tmp = gen_vfp_mrs();
3294 store_reg(s, rn, tmp);
3295 }
3296 } else {
3297 /* arm->vfp */
3298 if (dp) {
3299 tmp = load_reg(s, rd);
3300 gen_vfp_msr(tmp);
3301 gen_mov_vreg_F0(0, rm * 2);
3302 tmp = load_reg(s, rn);
3303 gen_vfp_msr(tmp);
3304 gen_mov_vreg_F0(0, rm * 2 + 1);
3305 } else {
3306 tmp = load_reg(s, rd);
3307 gen_vfp_msr(tmp);
3308 gen_mov_vreg_F0(0, rm);
3309 tmp = load_reg(s, rn);
3310 gen_vfp_msr(tmp);
3311 gen_mov_vreg_F0(0, rm + 1);
3312 }
3313 }
3314 } else {
3315 /* Load/store */
3316 rn = (insn >> 16) & 0xf;
3317 if (dp)
3318 VFP_DREG_D(rd, insn);
3319 else
3320 rd = VFP_SREG_D(insn);
3321 if ((insn & 0x01200000) == 0x01000000) {
3322 /* Single load/store */
3323 offset = (insn & 0xff) << 2;
3324 if ((insn & (1 << 23)) == 0)
3325 offset = -offset;
3326 if (s->thumb && rn == 15) {
3327 /* This is actually UNPREDICTABLE */
3328 addr = tcg_temp_new_i32();
3329 tcg_gen_movi_i32(addr, s->pc & ~2);
3330 } else {
3331 addr = load_reg(s, rn);
3332 }
3333 tcg_gen_addi_i32(addr, addr, offset);
3334 if (insn & (1 << 20)) {
3335 gen_vfp_ld(s, dp, addr);
3336 gen_mov_vreg_F0(dp, rd);
3337 } else {
3338 gen_mov_F0_vreg(dp, rd);
3339 gen_vfp_st(s, dp, addr);
3340 }
3341 tcg_temp_free_i32(addr);
3342 } else {
3343 /* load/store multiple */
3344 int w = insn & (1 << 21);
3345 if (dp)
3346 n = (insn >> 1) & 0x7f;
3347 else
3348 n = insn & 0xff;
3349
3350 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3351 /* P == U , W == 1 => UNDEF */
3352 return 1;
3353 }
3354 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3355 /* UNPREDICTABLE cases for bad immediates: we choose to
3356 * UNDEF to avoid generating huge numbers of TCG ops
3357 */
3358 return 1;
3359 }
3360 if (rn == 15 && w) {
3361 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3362 return 1;
3363 }
3364
3365 if (s->thumb && rn == 15) {
3366 /* This is actually UNPREDICTABLE */
3367 addr = tcg_temp_new_i32();
3368 tcg_gen_movi_i32(addr, s->pc & ~2);
3369 } else {
3370 addr = load_reg(s, rn);
3371 }
3372 if (insn & (1 << 24)) /* pre-decrement */
3373 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3374
3375 if (dp)
3376 offset = 8;
3377 else
3378 offset = 4;
3379 for (i = 0; i < n; i++) {
3380 if (insn & ARM_CP_RW_BIT) {
3381 /* load */
3382 gen_vfp_ld(s, dp, addr);
3383 gen_mov_vreg_F0(dp, rd + i);
3384 } else {
3385 /* store */
3386 gen_mov_F0_vreg(dp, rd + i);
3387 gen_vfp_st(s, dp, addr);
3388 }
3389 tcg_gen_addi_i32(addr, addr, offset);
3390 }
3391 if (w) {
3392 /* writeback */
3393 if (insn & (1 << 24))
3394 offset = -offset * n;
3395 else if (dp && (insn & 1))
3396 offset = 4;
3397 else
3398 offset = 0;
3399
3400 if (offset != 0)
3401 tcg_gen_addi_i32(addr, addr, offset);
3402 store_reg(s, rn, addr);
3403 } else {
3404 tcg_temp_free_i32(addr);
3405 }
3406 }
3407 }
3408 break;
3409 default:
3410 /* Should never happen. */
3411 return 1;
3412 }
3413 return 0;
3414 }
3415
3416 static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
3417 {
3418 TranslationBlock *tb;
3419
3420 tb = s->tb;
3421 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3422 tcg_gen_goto_tb(n);
3423 gen_set_pc_im(s, dest);
3424 tcg_gen_exit_tb((uintptr_t)tb + n);
3425 } else {
3426 gen_set_pc_im(s, dest);
3427 tcg_gen_exit_tb(0);
3428 }
3429 }
3430
3431 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3432 {
3433 if (unlikely(s->singlestep_enabled)) {
3434 /* An indirect jump so that we still trigger the debug exception. */
3435 if (s->thumb)
3436 dest |= 1;
3437 gen_bx_im(s, dest);
3438 } else {
3439 gen_goto_tb(s, 0, dest);
3440 s->is_jmp = DISAS_TB_JUMP;
3441 }
3442 }
3443
3444 static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
3445 {
3446 if (x)
3447 tcg_gen_sari_i32(t0, t0, 16);
3448 else
3449 gen_sxth(t0);
3450 if (y)
3451 tcg_gen_sari_i32(t1, t1, 16);
3452 else
3453 gen_sxth(t1);
3454 tcg_gen_mul_i32(t0, t0, t1);
3455 }
3456
3457 /* Return the mask of PSR bits set by a MSR instruction. */
3458 static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
3459 uint32_t mask;
3460
3461 mask = 0;
3462 if (flags & (1 << 0))
3463 mask |= 0xff;
3464 if (flags & (1 << 1))
3465 mask |= 0xff00;
3466 if (flags & (1 << 2))
3467 mask |= 0xff0000;
3468 if (flags & (1 << 3))
3469 mask |= 0xff000000;
3470
3471 /* Mask out undefined bits. */
3472 mask &= ~CPSR_RESERVED;
3473 if (!arm_feature(env, ARM_FEATURE_V4T))
3474 mask &= ~CPSR_T;
3475 if (!arm_feature(env, ARM_FEATURE_V5))
3476 mask &= ~CPSR_Q; /* V5TE in reality*/
3477 if (!arm_feature(env, ARM_FEATURE_V6))
3478 mask &= ~(CPSR_E | CPSR_GE);
3479 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3480 mask &= ~CPSR_IT;
3481 /* Mask out execution state bits. */
3482 if (!spsr)
3483 mask &= ~CPSR_EXEC;
3484 /* Mask out privileged bits. */
3485 if (IS_USER(s))
3486 mask &= CPSR_USER;
3487 return mask;
3488 }
3489
3490 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3491 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
3492 {
3493 TCGv_i32 tmp;
3494 if (spsr) {
3495 /* ??? This is also undefined in system mode. */
3496 if (IS_USER(s))
3497 return 1;
3498
3499 tmp = load_cpu_field(spsr);
3500 tcg_gen_andi_i32(tmp, tmp, ~mask);
3501 tcg_gen_andi_i32(t0, t0, mask);
3502 tcg_gen_or_i32(tmp, tmp, t0);
3503 store_cpu_field(tmp, spsr);
3504 } else {
3505 gen_set_cpsr(t0, mask);
3506 }
3507 tcg_temp_free_i32(t0);
3508 gen_lookup_tb(s);
3509 return 0;
3510 }
3511
3512 /* Returns nonzero if access to the PSR is not permitted. */
3513 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3514 {
3515 TCGv_i32 tmp;
3516 tmp = tcg_temp_new_i32();
3517 tcg_gen_movi_i32(tmp, val);
3518 return gen_set_psr(s, mask, spsr, tmp);
3519 }
3520
3521 /* Generate an old-style exception return. Marks pc as dead. */
3522 static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
3523 {
3524 TCGv_i32 tmp;
3525 store_reg(s, 15, pc);
3526 tmp = load_cpu_field(spsr);
3527 gen_set_cpsr(tmp, 0xffffffff);
3528 tcg_temp_free_i32(tmp);
3529 s->is_jmp = DISAS_UPDATE;
3530 }
3531
3532 /* Generate a v6 exception return. Marks both values as dead. */
3533 static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
3534 {
3535 gen_set_cpsr(cpsr, 0xffffffff);
3536 tcg_temp_free_i32(cpsr);
3537 store_reg(s, 15, pc);
3538 s->is_jmp = DISAS_UPDATE;
3539 }
3540
3541 static inline void
3542 gen_set_condexec (DisasContext *s)
3543 {
3544 if (s->condexec_mask) {
3545 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3546 TCGv_i32 tmp = tcg_temp_new_i32();
3547 tcg_gen_movi_i32(tmp, val);
3548 store_cpu_field(tmp, condexec_bits);
3549 }
3550 }
3551
3552 static void gen_exception_insn(DisasContext *s, int offset, int excp)
3553 {
3554 gen_set_condexec(s);
3555 gen_set_pc_im(s, s->pc - offset);
3556 gen_exception(excp);
3557 s->is_jmp = DISAS_JUMP;
3558 }
3559
3560 static void gen_nop_hint(DisasContext *s, int val)
3561 {
3562 switch (val) {
3563 case 3: /* wfi */
3564 gen_set_pc_im(s, s->pc);
3565 s->is_jmp = DISAS_WFI;
3566 break;
3567 case 2: /* wfe */
3568 case 4: /* sev */
3569 case 5: /* sevl */
3570 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
3571 default: /* nop */
3572 break;
3573 }
3574 }
3575
3576 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3577
3578 static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
3579 {
3580 switch (size) {
3581 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3582 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3583 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3584 default: abort();
3585 }
3586 }
3587
3588 static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
3589 {
3590 switch (size) {
3591 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3592 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3593 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3594 default: return;
3595 }
3596 }
3597
3598 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3599 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3600 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3601 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3602 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3603
3604 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3605 switch ((size << 1) | u) { \
3606 case 0: \
3607 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3608 break; \
3609 case 1: \
3610 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3611 break; \
3612 case 2: \
3613 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3614 break; \
3615 case 3: \
3616 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3617 break; \
3618 case 4: \
3619 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3620 break; \
3621 case 5: \
3622 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3623 break; \
3624 default: return 1; \
3625 }} while (0)
3626
3627 #define GEN_NEON_INTEGER_OP(name) do { \
3628 switch ((size << 1) | u) { \
3629 case 0: \
3630 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3631 break; \
3632 case 1: \
3633 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3634 break; \
3635 case 2: \
3636 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3637 break; \
3638 case 3: \
3639 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3640 break; \
3641 case 4: \
3642 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3643 break; \
3644 case 5: \
3645 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3646 break; \
3647 default: return 1; \
3648 }} while (0)
3649
3650 static TCGv_i32 neon_load_scratch(int scratch)
3651 {
3652 TCGv_i32 tmp = tcg_temp_new_i32();
3653 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3654 return tmp;
3655 }
3656
3657 static void neon_store_scratch(int scratch, TCGv_i32 var)
3658 {
3659 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3660 tcg_temp_free_i32(var);
3661 }
3662
3663 static inline TCGv_i32 neon_get_scalar(int size, int reg)
3664 {
3665 TCGv_i32 tmp;
3666 if (size == 1) {
3667 tmp = neon_load_reg(reg & 7, reg >> 4);
3668 if (reg & 8) {
3669 gen_neon_dup_high16(tmp);
3670 } else {
3671 gen_neon_dup_low16(tmp);
3672 }
3673 } else {
3674 tmp = neon_load_reg(reg & 15, reg >> 4);
3675 }
3676 return tmp;
3677 }
3678
3679 static int gen_neon_unzip(int rd, int rm, int size, int q)
3680 {
3681 TCGv_i32 tmp, tmp2;
3682 if (!q && size == 2) {
3683 return 1;
3684 }
3685 tmp = tcg_const_i32(rd);
3686 tmp2 = tcg_const_i32(rm);
3687 if (q) {
3688 switch (size) {
3689 case 0:
3690 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
3691 break;
3692 case 1:
3693 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
3694 break;
3695 case 2:
3696 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
3697 break;
3698 default:
3699 abort();
3700 }
3701 } else {
3702 switch (size) {
3703 case 0:
3704 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
3705 break;
3706 case 1:
3707 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
3708 break;
3709 default:
3710 abort();
3711 }
3712 }
3713 tcg_temp_free_i32(tmp);
3714 tcg_temp_free_i32(tmp2);
3715 return 0;
3716 }
3717
3718 static int gen_neon_zip(int rd, int rm, int size, int q)
3719 {
3720 TCGv_i32 tmp, tmp2;
3721 if (!q && size == 2) {
3722 return 1;
3723 }
3724 tmp = tcg_const_i32(rd);
3725 tmp2 = tcg_const_i32(rm);
3726 if (q) {
3727 switch (size) {
3728 case 0:
3729 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
3730 break;
3731 case 1:
3732 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
3733 break;
3734 case 2:
3735 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
3736 break;
3737 default:
3738 abort();
3739 }
3740 } else {
3741 switch (size) {
3742 case 0:
3743 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
3744 break;
3745 case 1:
3746 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
3747 break;
3748 default:
3749 abort();
3750 }
3751 }
3752 tcg_temp_free_i32(tmp);
3753 tcg_temp_free_i32(tmp2);
3754 return 0;
3755 }
3756
3757 static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
3758 {
3759 TCGv_i32 rd, tmp;
3760
3761 rd = tcg_temp_new_i32();
3762 tmp = tcg_temp_new_i32();
3763
3764 tcg_gen_shli_i32(rd, t0, 8);
3765 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3766 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3767 tcg_gen_or_i32(rd, rd, tmp);
3768
3769 tcg_gen_shri_i32(t1, t1, 8);
3770 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3771 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3772 tcg_gen_or_i32(t1, t1, tmp);
3773 tcg_gen_mov_i32(t0, rd);
3774
3775 tcg_temp_free_i32(tmp);
3776 tcg_temp_free_i32(rd);
3777 }
3778
3779 static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
3780 {
3781 TCGv_i32 rd, tmp;
3782
3783 rd = tcg_temp_new_i32();
3784 tmp = tcg_temp_new_i32();
3785
3786 tcg_gen_shli_i32(rd, t0, 16);
3787 tcg_gen_andi_i32(tmp, t1, 0xffff);
3788 tcg_gen_or_i32(rd, rd, tmp);
3789 tcg_gen_shri_i32(t1, t1, 16);
3790 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3791 tcg_gen_or_i32(t1, t1, tmp);
3792 tcg_gen_mov_i32(t0, rd);
3793
3794 tcg_temp_free_i32(tmp);
3795 tcg_temp_free_i32(rd);
3796 }
3797
3798
3799 static struct {
3800 int nregs;
3801 int interleave;
3802 int spacing;
3803 } neon_ls_element_type[11] = {
3804 {4, 4, 1},
3805 {4, 4, 2},
3806 {4, 1, 1},
3807 {4, 2, 1},
3808 {3, 3, 1},
3809 {3, 3, 2},
3810 {3, 1, 1},
3811 {1, 1, 1},
3812 {2, 2, 1},
3813 {2, 2, 2},
3814 {2, 1, 1}
3815 };
3816
3817 /* Translate a NEON load/store element instruction. Return nonzero if the
3818 instruction is invalid. */
3819 static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
3820 {
3821 int rd, rn, rm;
3822 int op;
3823 int nregs;
3824 int interleave;
3825 int spacing;
3826 int stride;
3827 int size;
3828 int reg;
3829 int pass;
3830 int load;
3831 int shift;
3832 int n;
3833 TCGv_i32 addr;
3834 TCGv_i32 tmp;
3835 TCGv_i32 tmp2;
3836 TCGv_i64 tmp64;
3837
3838 if (!s->vfp_enabled)
3839 return 1;
3840 VFP_DREG_D(rd, insn);
3841 rn = (insn >> 16) & 0xf;
3842 rm = insn & 0xf;
3843 load = (insn & (1 << 21)) != 0;
3844 if ((insn & (1 << 23)) == 0) {
3845 /* Load store all elements. */
3846 op = (insn >> 8) & 0xf;
3847 size = (insn >> 6) & 3;
3848 if (op > 10)
3849 return 1;
3850 /* Catch UNDEF cases for bad values of align field */
3851 switch (op & 0xc) {
3852 case 4:
3853 if (((insn >> 5) & 1) == 1) {
3854 return 1;
3855 }
3856 break;
3857 case 8:
3858 if (((insn >> 4) & 3) == 3) {
3859 return 1;
3860 }
3861 break;
3862 default:
3863 break;
3864 }
3865 nregs = neon_ls_element_type[op].nregs;
3866 interleave = neon_ls_element_type[op].interleave;
3867 spacing = neon_ls_element_type[op].spacing;
3868 if (size == 3 && (interleave | spacing) != 1)
3869 return 1;
3870 addr = tcg_temp_new_i32();
3871 load_reg_var(s, addr, rn);
3872 stride = (1 << size) * interleave;
3873 for (reg = 0; reg < nregs; reg++) {
3874 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3875 load_reg_var(s, addr, rn);
3876 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3877 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3878 load_reg_var(s, addr, rn);
3879 tcg_gen_addi_i32(addr, addr, 1 << size);
3880 }
3881 if (size == 3) {
3882 tmp64 = tcg_temp_new_i64();
3883 if (load) {
3884 gen_aa32_ld64(tmp64, addr, IS_USER(s));
3885 neon_store_reg64(tmp64, rd);
3886 } else {
3887 neon_load_reg64(tmp64, rd);
3888 gen_aa32_st64(tmp64, addr, IS_USER(s));
3889 }
3890 tcg_temp_free_i64(tmp64);
3891 tcg_gen_addi_i32(addr, addr, stride);
3892 } else {
3893 for (pass = 0; pass < 2; pass++) {
3894 if (size == 2) {
3895 if (load) {
3896 tmp = tcg_temp_new_i32();
3897 gen_aa32_ld32u(tmp, addr, IS_USER(s));
3898 neon_store_reg(rd, pass, tmp);
3899 } else {
3900 tmp = neon_load_reg(rd, pass);
3901 gen_aa32_st32(tmp, addr, IS_USER(s));
3902 tcg_temp_free_i32(tmp);
3903 }
3904 tcg_gen_addi_i32(addr, addr, stride);
3905 } else if (size == 1) {
3906 if (load) {
3907 tmp = tcg_temp_new_i32();
3908 gen_aa32_ld16u(tmp, addr, IS_USER(s));
3909 tcg_gen_addi_i32(addr, addr, stride);
3910 tmp2 = tcg_temp_new_i32();
3911 gen_aa32_ld16u(tmp2, addr, IS_USER(s));
3912 tcg_gen_addi_i32(addr, addr, stride);
3913 tcg_gen_shli_i32(tmp2, tmp2, 16);
3914 tcg_gen_or_i32(tmp, tmp, tmp2);
3915 tcg_temp_free_i32(tmp2);
3916 neon_store_reg(rd, pass, tmp);
3917 } else {
3918 tmp = neon_load_reg(rd, pass);
3919 tmp2 = tcg_temp_new_i32();
3920 tcg_gen_shri_i32(tmp2, tmp, 16);
3921 gen_aa32_st16(tmp, addr, IS_USER(s));
3922 tcg_temp_free_i32(tmp);
3923 tcg_gen_addi_i32(addr, addr, stride);
3924 gen_aa32_st16(tmp2, addr, IS_USER(s));
3925 tcg_temp_free_i32(tmp2);
3926 tcg_gen_addi_i32(addr, addr, stride);
3927 }
3928 } else /* size == 0 */ {
3929 if (load) {
3930 TCGV_UNUSED_I32(tmp2);
3931 for (n = 0; n < 4; n++) {
3932 tmp = tcg_temp_new_i32();
3933 gen_aa32_ld8u(tmp, addr, IS_USER(s));
3934 tcg_gen_addi_i32(addr, addr, stride);
3935 if (n == 0) {
3936 tmp2 = tmp;
3937 } else {
3938 tcg_gen_shli_i32(tmp, tmp, n * 8);
3939 tcg_gen_or_i32(tmp2, tmp2, tmp);
3940 tcg_temp_free_i32(tmp);
3941 }
3942 }
3943 neon_store_reg(rd, pass, tmp2);
3944 } else {
3945 tmp2 = neon_load_reg(rd, pass);
3946 for (n = 0; n < 4; n++) {
3947 tmp = tcg_temp_new_i32();
3948 if (n == 0) {
3949 tcg_gen_mov_i32(tmp, tmp2);
3950 } else {
3951 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3952 }
3953 gen_aa32_st8(tmp, addr, IS_USER(s));
3954 tcg_temp_free_i32(tmp);
3955 tcg_gen_addi_i32(addr, addr, stride);
3956 }
3957 tcg_temp_free_i32(tmp2);
3958 }
3959 }
3960 }
3961 }
3962 rd += spacing;
3963 }
3964 tcg_temp_free_i32(addr);
3965 stride = nregs * 8;
3966 } else {
3967 size = (insn >> 10) & 3;
3968 if (size == 3) {
3969 /* Load single element to all lanes. */
3970 int a = (insn >> 4) & 1;
3971 if (!load) {
3972 return 1;
3973 }
3974 size = (insn >> 6) & 3;
3975 nregs = ((insn >> 8) & 3) + 1;
3976
3977 if (size == 3) {
3978 if (nregs != 4 || a == 0) {
3979 return 1;
3980 }
3981 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
3982 size = 2;
3983 }
3984 if (nregs == 1 && a == 1 && size == 0) {
3985 return 1;
3986 }
3987 if (nregs == 3 && a == 1) {
3988 return 1;
3989 }
3990 addr = tcg_temp_new_i32();
3991 load_reg_var(s, addr, rn);
3992 if (nregs == 1) {
3993 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
3994 tmp = gen_load_and_replicate(s, addr, size);
3995 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
3996 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
3997 if (insn & (1 << 5)) {
3998 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
3999 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4000 }
4001 tcg_temp_free_i32(tmp);
4002 } else {
4003 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4004 stride = (insn & (1 << 5)) ? 2 : 1;
4005 for (reg = 0; reg < nregs; reg++) {
4006 tmp = gen_load_and_replicate(s, addr, size);
4007 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4008 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4009 tcg_temp_free_i32(tmp);
4010 tcg_gen_addi_i32(addr, addr, 1 << size);
4011 rd += stride;
4012 }
4013 }
4014 tcg_temp_free_i32(addr);
4015 stride = (1 << size) * nregs;
4016 } else {
4017 /* Single element. */
4018 int idx = (insn >> 4) & 0xf;
4019 pass = (insn >> 7) & 1;
4020 switch (size) {
4021 case 0:
4022 shift = ((insn >> 5) & 3) * 8;
4023 stride = 1;
4024 break;
4025 case 1:
4026 shift = ((insn >> 6) & 1) * 16;
4027 stride = (insn & (1 << 5)) ? 2 : 1;
4028 break;
4029 case 2:
4030 shift = 0;
4031 stride = (insn & (1 << 6)) ? 2 : 1;
4032 break;
4033 default:
4034 abort();
4035 }
4036 nregs = ((insn >> 8) & 3) + 1;
4037 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4038 switch (nregs) {
4039 case 1:
4040 if (((idx & (1 << size)) != 0) ||
4041 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4042 return 1;
4043 }
4044 break;
4045 case 3:
4046 if ((idx & 1) != 0) {
4047 return 1;
4048 }
4049 /* fall through */
4050 case 2:
4051 if (size == 2 && (idx & 2) != 0) {
4052 return 1;
4053 }
4054 break;
4055 case 4:
4056 if ((size == 2) && ((idx & 3) == 3)) {
4057 return 1;
4058 }
4059 break;
4060 default:
4061 abort();
4062 }
4063 if ((rd + stride * (nregs - 1)) > 31) {
4064 /* Attempts to write off the end of the register file
4065 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4066 * the neon_load_reg() would write off the end of the array.
4067 */
4068 return 1;
4069 }
4070 addr = tcg_temp_new_i32();
4071 load_reg_var(s, addr, rn);
4072 for (reg = 0; reg < nregs; reg++) {
4073 if (load) {
4074 tmp = tcg_temp_new_i32();
4075 switch (size) {
4076 case 0:
4077 gen_aa32_ld8u(tmp, addr, IS_USER(s));
4078 break;
4079 case 1:
4080 gen_aa32_ld16u(tmp, addr, IS_USER(s));
4081 break;
4082 case 2:
4083 gen_aa32_ld32u(tmp, addr, IS_USER(s));
4084 break;
4085 default: /* Avoid compiler warnings. */
4086 abort();
4087 }
4088 if (size != 2) {
4089 tmp2 = neon_load_reg(rd, pass);
4090 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4091 shift, size ? 16 : 8);
4092 tcg_temp_free_i32(tmp2);
4093 }
4094 neon_store_reg(rd, pass, tmp);
4095 } else { /* Store */
4096 tmp = neon_load_reg(rd, pass);
4097 if (shift)
4098 tcg_gen_shri_i32(tmp, tmp, shift);
4099 switch (size) {
4100 case 0:
4101 gen_aa32_st8(tmp, addr, IS_USER(s));
4102 break;
4103 case 1:
4104 gen_aa32_st16(tmp, addr, IS_USER(s));
4105 break;
4106 case 2:
4107 gen_aa32_st32(tmp, addr, IS_USER(s));
4108 break;
4109 }
4110 tcg_temp_free_i32(tmp);
4111 }
4112 rd += stride;
4113 tcg_gen_addi_i32(addr, addr, 1 << size);
4114 }
4115 tcg_temp_free_i32(addr);
4116 stride = nregs * (1 << size);
4117 }
4118 }
4119 if (rm != 15) {
4120 TCGv_i32 base;
4121
4122 base = load_reg(s, rn);
4123 if (rm == 13) {
4124 tcg_gen_addi_i32(base, base, stride);
4125 } else {
4126 TCGv_i32 index;
4127 index = load_reg(s, rm);
4128 tcg_gen_add_i32(base, base, index);
4129 tcg_temp_free_i32(index);
4130 }
4131 store_reg(s, rn, base);
4132 }
4133 return 0;
4134 }
4135
4136 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4137 static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
4138 {
4139 tcg_gen_and_i32(t, t, c);
4140 tcg_gen_andc_i32(f, f, c);
4141 tcg_gen_or_i32(dest, t, f);
4142 }
4143
4144 static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
4145 {
4146 switch (size) {
4147 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4148 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4149 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4150 default: abort();
4151 }
4152 }
4153
4154 static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4155 {
4156 switch (size) {
4157 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4158 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4159 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4160 default: abort();
4161 }
4162 }
4163
4164 static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
4165 {
4166 switch (size) {
4167 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4168 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4169 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4170 default: abort();
4171 }
4172 }
4173
4174 static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
4175 {
4176 switch (size) {
4177 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4178 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4179 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
4180 default: abort();
4181 }
4182 }
4183
4184 static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
4185 int q, int u)
4186 {
4187 if (q) {
4188 if (u) {
4189 switch (size) {
4190 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4191 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4192 default: abort();
4193 }
4194 } else {
4195 switch (size) {
4196 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4197 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4198 default: abort();
4199 }
4200 }
4201 } else {
4202 if (u) {
4203 switch (size) {
4204 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4205 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4206 default: abort();
4207 }
4208 } else {
4209 switch (size) {
4210 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4211 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4212 default: abort();
4213 }
4214 }
4215 }
4216 }
4217
4218 static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
4219 {
4220 if (u) {
4221 switch (size) {
4222 case 0: gen_helper_neon_widen_u8(dest, src); break;
4223 case 1: gen_helper_neon_widen_u16(dest, src); break;
4224 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4225 default: abort();
4226 }
4227 } else {
4228 switch (size) {
4229 case 0: gen_helper_neon_widen_s8(dest, src); break;
4230 case 1: gen_helper_neon_widen_s16(dest, src); break;
4231 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4232 default: abort();
4233 }
4234 }
4235 tcg_temp_free_i32(src);
4236 }
4237
4238 static inline void gen_neon_addl(int size)
4239 {
4240 switch (size) {
4241 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4242 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4243 case 2: tcg_gen_add_i64(CPU_V001); break;
4244 default: abort();
4245 }
4246 }
4247
4248 static inline void gen_neon_subl(int size)
4249 {
4250 switch (size) {
4251 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4252 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4253 case 2: tcg_gen_sub_i64(CPU_V001); break;
4254 default: abort();
4255 }
4256 }
4257
4258 static inline void gen_neon_negl(TCGv_i64 var, int size)
4259 {
4260 switch (size) {
4261 case 0: gen_helper_neon_negl_u16(var, var); break;
4262 case 1: gen_helper_neon_negl_u32(var, var); break;
4263 case 2:
4264 tcg_gen_neg_i64(var, var);
4265 break;
4266 default: abort();
4267 }
4268 }
4269
4270 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4271 {
4272 switch (size) {
4273 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4274 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4275 default: abort();
4276 }
4277 }
4278
4279 static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4280 int size, int u)
4281 {
4282 TCGv_i64 tmp;
4283
4284 switch ((size << 1) | u) {
4285 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4286 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4287 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4288 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4289 case 4:
4290 tmp = gen_muls_i64_i32(a, b);
4291 tcg_gen_mov_i64(dest, tmp);
4292 tcg_temp_free_i64(tmp);
4293 break;
4294 case 5:
4295 tmp = gen_mulu_i64_i32(a, b);
4296 tcg_gen_mov_i64(dest, tmp);
4297 tcg_temp_free_i64(tmp);
4298 break;
4299 default: abort();
4300 }
4301
4302 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4303 Don't forget to clean them now. */
4304 if (size < 2) {
4305 tcg_temp_free_i32(a);
4306 tcg_temp_free_i32(b);
4307 }
4308 }
4309
4310 static void gen_neon_narrow_op(int op, int u, int size,
4311 TCGv_i32 dest, TCGv_i64 src)
4312 {
4313 if (op) {
4314 if (u) {
4315 gen_neon_unarrow_sats(size, dest, src);
4316 } else {
4317 gen_neon_narrow(size, dest, src);
4318 }
4319 } else {
4320 if (u) {
4321 gen_neon_narrow_satu(size, dest, src);
4322 } else {
4323 gen_neon_narrow_sats(size, dest, src);
4324 }
4325 }
4326 }
4327
4328 /* Symbolic constants for op fields for Neon 3-register same-length.
4329 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4330 * table A7-9.
4331 */
4332 #define NEON_3R_VHADD 0
4333 #define NEON_3R_VQADD 1
4334 #define NEON_3R_VRHADD 2
4335 #define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4336 #define NEON_3R_VHSUB 4
4337 #define NEON_3R_VQSUB 5
4338 #define NEON_3R_VCGT 6
4339 #define NEON_3R_VCGE 7
4340 #define NEON_3R_VSHL 8
4341 #define NEON_3R_VQSHL 9
4342 #define NEON_3R_VRSHL 10
4343 #define NEON_3R_VQRSHL 11
4344 #define NEON_3R_VMAX 12
4345 #define NEON_3R_VMIN 13
4346 #define NEON_3R_VABD 14
4347 #define NEON_3R_VABA 15
4348 #define NEON_3R_VADD_VSUB 16
4349 #define NEON_3R_VTST_VCEQ 17
4350 #define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4351 #define NEON_3R_VMUL 19
4352 #define NEON_3R_VPMAX 20
4353 #define NEON_3R_VPMIN 21
4354 #define NEON_3R_VQDMULH_VQRDMULH 22
4355 #define NEON_3R_VPADD 23
4356 #define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
4357 #define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4358 #define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4359 #define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4360 #define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4361 #define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
4362 #define NEON_3R_VRECPS_VRSQRTS 31 /* float VRECPS, VRSQRTS */
4363
4364 static const uint8_t neon_3r_sizes[] = {
4365 [NEON_3R_VHADD] = 0x7,
4366 [NEON_3R_VQADD] = 0xf,
4367 [NEON_3R_VRHADD] = 0x7,
4368 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4369 [NEON_3R_VHSUB] = 0x7,
4370 [NEON_3R_VQSUB] = 0xf,
4371 [NEON_3R_VCGT] = 0x7,
4372 [NEON_3R_VCGE] = 0x7,
4373 [NEON_3R_VSHL] = 0xf,
4374 [NEON_3R_VQSHL] = 0xf,
4375 [NEON_3R_VRSHL] = 0xf,
4376 [NEON_3R_VQRSHL] = 0xf,
4377 [NEON_3R_VMAX] = 0x7,
4378 [NEON_3R_VMIN] = 0x7,
4379 [NEON_3R_VABD] = 0x7,
4380 [NEON_3R_VABA] = 0x7,
4381 [NEON_3R_VADD_VSUB] = 0xf,
4382 [NEON_3R_VTST_VCEQ] = 0x7,
4383 [NEON_3R_VML] = 0x7,
4384 [NEON_3R_VMUL] = 0x7,
4385 [NEON_3R_VPMAX] = 0x7,
4386 [NEON_3R_VPMIN] = 0x7,
4387 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4388 [NEON_3R_VPADD] = 0x7,
4389 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
4390 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4391 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4392 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4393 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4394 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
4395 [NEON_3R_VRECPS_VRSQRTS] = 0x5, /* size bit 1 encodes op */
4396 };
4397
4398 /* Symbolic constants for op fields for Neon 2-register miscellaneous.
4399 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4400 * table A7-13.
4401 */
4402 #define NEON_2RM_VREV64 0
4403 #define NEON_2RM_VREV32 1
4404 #define NEON_2RM_VREV16 2
4405 #define NEON_2RM_VPADDL 4
4406 #define NEON_2RM_VPADDL_U 5
4407 #define NEON_2RM_VCLS 8
4408 #define NEON_2RM_VCLZ 9
4409 #define NEON_2RM_VCNT 10
4410 #define NEON_2RM_VMVN 11
4411 #define NEON_2RM_VPADAL 12
4412 #define NEON_2RM_VPADAL_U 13
4413 #define NEON_2RM_VQABS 14
4414 #define NEON_2RM_VQNEG 15
4415 #define NEON_2RM_VCGT0 16
4416 #define NEON_2RM_VCGE0 17
4417 #define NEON_2RM_VCEQ0 18
4418 #define NEON_2RM_VCLE0 19
4419 #define NEON_2RM_VCLT0 20
4420 #define NEON_2RM_VABS 22
4421 #define NEON_2RM_VNEG 23
4422 #define NEON_2RM_VCGT0_F 24
4423 #define NEON_2RM_VCGE0_F 25
4424 #define NEON_2RM_VCEQ0_F 26
4425 #define NEON_2RM_VCLE0_F 27
4426 #define NEON_2RM_VCLT0_F 28
4427 #define NEON_2RM_VABS_F 30
4428 #define NEON_2RM_VNEG_F 31
4429 #define NEON_2RM_VSWP 32
4430 #define NEON_2RM_VTRN 33
4431 #define NEON_2RM_VUZP 34
4432 #define NEON_2RM_VZIP 35
4433 #define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4434 #define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4435 #define NEON_2RM_VSHLL 38
4436 #define NEON_2RM_VCVT_F16_F32 44
4437 #define NEON_2RM_VCVT_F32_F16 46
4438 #define NEON_2RM_VRECPE 56
4439 #define NEON_2RM_VRSQRTE 57
4440 #define NEON_2RM_VRECPE_F 58
4441 #define NEON_2RM_VRSQRTE_F 59
4442 #define NEON_2RM_VCVT_FS 60
4443 #define NEON_2RM_VCVT_FU 61
4444 #define NEON_2RM_VCVT_SF 62
4445 #define NEON_2RM_VCVT_UF 63
4446
4447 static int neon_2rm_is_float_op(int op)
4448 {
4449 /* Return true if this neon 2reg-misc op is float-to-float */
4450 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
4451 op >= NEON_2RM_VRECPE_F);
4452 }
4453
4454 /* Each entry in this array has bit n set if the insn allows
4455 * size value n (otherwise it will UNDEF). Since unallocated
4456 * op values will have no bits set they always UNDEF.
4457 */
4458 static const uint8_t neon_2rm_sizes[] = {
4459 [NEON_2RM_VREV64] = 0x7,
4460 [NEON_2RM_VREV32] = 0x3,
4461 [NEON_2RM_VREV16] = 0x1,
4462 [NEON_2RM_VPADDL] = 0x7,
4463 [NEON_2RM_VPADDL_U] = 0x7,
4464 [NEON_2RM_VCLS] = 0x7,
4465 [NEON_2RM_VCLZ] = 0x7,
4466 [NEON_2RM_VCNT] = 0x1,
4467 [NEON_2RM_VMVN] = 0x1,
4468 [NEON_2RM_VPADAL] = 0x7,
4469 [NEON_2RM_VPADAL_U] = 0x7,
4470 [NEON_2RM_VQABS] = 0x7,
4471 [NEON_2RM_VQNEG] = 0x7,
4472 [NEON_2RM_VCGT0] = 0x7,
4473 [NEON_2RM_VCGE0] = 0x7,
4474 [NEON_2RM_VCEQ0] = 0x7,
4475 [NEON_2RM_VCLE0] = 0x7,
4476 [NEON_2RM_VCLT0] = 0x7,
4477 [NEON_2RM_VABS] = 0x7,
4478 [NEON_2RM_VNEG] = 0x7,
4479 [NEON_2RM_VCGT0_F] = 0x4,
4480 [NEON_2RM_VCGE0_F] = 0x4,
4481 [NEON_2RM_VCEQ0_F] = 0x4,
4482 [NEON_2RM_VCLE0_F] = 0x4,
4483 [NEON_2RM_VCLT0_F] = 0x4,
4484 [NEON_2RM_VABS_F] = 0x4,
4485 [NEON_2RM_VNEG_F] = 0x4,
4486 [NEON_2RM_VSWP] = 0x1,
4487 [NEON_2RM_VTRN] = 0x7,
4488 [NEON_2RM_VUZP] = 0x7,
4489 [NEON_2RM_VZIP] = 0x7,
4490 [NEON_2RM_VMOVN] = 0x7,
4491 [NEON_2RM_VQMOVN] = 0x7,
4492 [NEON_2RM_VSHLL] = 0x7,
4493 [NEON_2RM_VCVT_F16_F32] = 0x2,
4494 [NEON_2RM_VCVT_F32_F16] = 0x2,
4495 [NEON_2RM_VRECPE] = 0x4,
4496 [NEON_2RM_VRSQRTE] = 0x4,
4497 [NEON_2RM_VRECPE_F] = 0x4,
4498 [NEON_2RM_VRSQRTE_F] = 0x4,
4499 [NEON_2RM_VCVT_FS] = 0x4,
4500 [NEON_2RM_VCVT_FU] = 0x4,
4501 [NEON_2RM_VCVT_SF] = 0x4,
4502 [NEON_2RM_VCVT_UF] = 0x4,
4503 };
4504
4505 /* Translate a NEON data processing instruction. Return nonzero if the
4506 instruction is invalid.
4507 We process data in a mixture of 32-bit and 64-bit chunks.
4508 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4509
4510 static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
4511 {
4512 int op;
4513 int q;
4514 int rd, rn, rm;
4515 int size;
4516 int shift;
4517 int pass;
4518 int count;
4519 int pairwise;
4520 int u;
4521 uint32_t imm, mask;
4522 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
4523 TCGv_i64 tmp64;
4524
4525 if (!s->vfp_enabled)
4526 return 1;
4527 q = (insn & (1 << 6)) != 0;
4528 u = (insn >> 24) & 1;
4529 VFP_DREG_D(rd, insn);
4530 VFP_DREG_N(rn, insn);
4531 VFP_DREG_M(rm, insn);
4532 size = (insn >> 20) & 3;
4533 if ((insn & (1 << 23)) == 0) {
4534 /* Three register same length. */
4535 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4536 /* Catch invalid op and bad size combinations: UNDEF */
4537 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
4538 return 1;
4539 }
4540 /* All insns of this form UNDEF for either this condition or the
4541 * superset of cases "Q==1"; we catch the latter later.
4542 */
4543 if (q && ((rd | rn | rm) & 1)) {
4544 return 1;
4545 }
4546 if (size == 3 && op != NEON_3R_LOGIC) {
4547 /* 64-bit element instructions. */
4548 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4549 neon_load_reg64(cpu_V0, rn + pass);
4550 neon_load_reg64(cpu_V1, rm + pass);
4551 switch (op) {
4552 case NEON_3R_VQADD:
4553 if (u) {
4554 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
4555 cpu_V0, cpu_V1);
4556 } else {
4557 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
4558 cpu_V0, cpu_V1);
4559 }
4560 break;
4561 case NEON_3R_VQSUB:
4562 if (u) {
4563 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
4564 cpu_V0, cpu_V1);
4565 } else {
4566 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
4567 cpu_V0, cpu_V1);
4568 }
4569 break;
4570 case NEON_3R_VSHL:
4571 if (u) {
4572 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4573 } else {
4574 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4575 }
4576 break;
4577 case NEON_3R_VQSHL:
4578 if (u) {
4579 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4580 cpu_V1, cpu_V0);
4581 } else {
4582 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4583 cpu_V1, cpu_V0);
4584 }
4585 break;
4586 case NEON_3R_VRSHL:
4587 if (u) {
4588 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4589 } else {
4590 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4591 }
4592 break;
4593 case NEON_3R_VQRSHL:
4594 if (u) {
4595 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4596 cpu_V1, cpu_V0);
4597 } else {
4598 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4599 cpu_V1, cpu_V0);
4600 }
4601 break;
4602 case NEON_3R_VADD_VSUB:
4603 if (u) {
4604 tcg_gen_sub_i64(CPU_V001);
4605 } else {
4606 tcg_gen_add_i64(CPU_V001);
4607 }
4608 break;
4609 default:
4610 abort();
4611 }
4612 neon_store_reg64(cpu_V0, rd + pass);
4613 }
4614 return 0;
4615 }
4616 pairwise = 0;
4617 switch (op) {
4618 case NEON_3R_VSHL:
4619 case NEON_3R_VQSHL:
4620 case NEON_3R_VRSHL:
4621 case NEON_3R_VQRSHL:
4622 {
4623 int rtmp;
4624 /* Shift instruction operands are reversed. */
4625 rtmp = rn;
4626 rn = rm;
4627 rm = rtmp;
4628 }
4629 break;
4630 case NEON_3R_VPADD:
4631 if (u) {
4632 return 1;
4633 }
4634 /* Fall through */
4635 case NEON_3R_VPMAX:
4636 case NEON_3R_VPMIN:
4637 pairwise = 1;
4638 break;
4639 case NEON_3R_FLOAT_ARITH:
4640 pairwise = (u && size < 2); /* if VPADD (float) */
4641 break;
4642 case NEON_3R_FLOAT_MINMAX:
4643 pairwise = u; /* if VPMIN/VPMAX (float) */
4644 break;
4645 case NEON_3R_FLOAT_CMP:
4646 if (!u && size) {
4647 /* no encoding for U=0 C=1x */
4648 return 1;
4649 }
4650 break;
4651 case NEON_3R_FLOAT_ACMP:
4652 if (!u) {
4653 return 1;
4654 }
4655 break;
4656 case NEON_3R_VRECPS_VRSQRTS:
4657 if (u) {
4658 return 1;
4659 }
4660 break;
4661 case NEON_3R_VMUL:
4662 if (u && (size != 0)) {
4663 /* UNDEF on invalid size for polynomial subcase */
4664 return 1;
4665 }
4666 break;
4667 case NEON_3R_VFM:
4668 if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
4669 return 1;
4670 }
4671 break;
4672 default:
4673 break;
4674 }
4675
4676 if (pairwise && q) {
4677 /* All the pairwise insns UNDEF if Q is set */
4678 return 1;
4679 }
4680
4681 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4682
4683 if (pairwise) {
4684 /* Pairwise. */
4685 if (pass < 1) {
4686 tmp = neon_load_reg(rn, 0);
4687 tmp2 = neon_load_reg(rn, 1);
4688 } else {
4689 tmp = neon_load_reg(rm, 0);
4690 tmp2 = neon_load_reg(rm, 1);
4691 }
4692 } else {
4693 /* Elementwise. */
4694 tmp = neon_load_reg(rn, pass);
4695 tmp2 = neon_load_reg(rm, pass);
4696 }
4697 switch (op) {
4698 case NEON_3R_VHADD:
4699 GEN_NEON_INTEGER_OP(hadd);
4700 break;
4701 case NEON_3R_VQADD:
4702 GEN_NEON_INTEGER_OP_ENV(qadd);
4703 break;
4704 case NEON_3R_VRHADD:
4705 GEN_NEON_INTEGER_OP(rhadd);
4706 break;
4707 case NEON_3R_LOGIC: /* Logic ops. */
4708 switch ((u << 2) | size) {
4709 case 0: /* VAND */
4710 tcg_gen_and_i32(tmp, tmp, tmp2);
4711 break;
4712 case 1: /* BIC */
4713 tcg_gen_andc_i32(tmp, tmp, tmp2);
4714 break;
4715 case 2: /* VORR */
4716 tcg_gen_or_i32(tmp, tmp, tmp2);
4717 break;
4718 case 3: /* VORN */
4719 tcg_gen_orc_i32(tmp, tmp, tmp2);
4720 break;
4721 case 4: /* VEOR */
4722 tcg_gen_xor_i32(tmp, tmp, tmp2);
4723 break;
4724 case 5: /* VBSL */
4725 tmp3 = neon_load_reg(rd, pass);
4726 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4727 tcg_temp_free_i32(tmp3);
4728 break;
4729 case 6: /* VBIT */
4730 tmp3 = neon_load_reg(rd, pass);
4731 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4732 tcg_temp_free_i32(tmp3);
4733 break;
4734 case 7: /* VBIF */
4735 tmp3 = neon_load_reg(rd, pass);
4736 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4737 tcg_temp_free_i32(tmp3);
4738 break;
4739 }
4740 break;
4741 case NEON_3R_VHSUB:
4742 GEN_NEON_INTEGER_OP(hsub);
4743 break;
4744 case NEON_3R_VQSUB:
4745 GEN_NEON_INTEGER_OP_ENV(qsub);
4746 break;
4747 case NEON_3R_VCGT:
4748 GEN_NEON_INTEGER_OP(cgt);
4749 break;
4750 case NEON_3R_VCGE:
4751 GEN_NEON_INTEGER_OP(cge);
4752 break;
4753 case NEON_3R_VSHL:
4754 GEN_NEON_INTEGER_OP(shl);
4755 break;
4756 case NEON_3R_VQSHL:
4757 GEN_NEON_INTEGER_OP_ENV(qshl);
4758 break;
4759 case NEON_3R_VRSHL:
4760 GEN_NEON_INTEGER_OP(rshl);
4761 break;
4762 case NEON_3R_VQRSHL:
4763 GEN_NEON_INTEGER_OP_ENV(qrshl);
4764 break;
4765 case NEON_3R_VMAX:
4766 GEN_NEON_INTEGER_OP(max);
4767 break;
4768 case NEON_3R_VMIN:
4769 GEN_NEON_INTEGER_OP(min);
4770 break;
4771 case NEON_3R_VABD:
4772 GEN_NEON_INTEGER_OP(abd);
4773 break;
4774 case NEON_3R_VABA:
4775 GEN_NEON_INTEGER_OP(abd);
4776 tcg_temp_free_i32(tmp2);
4777 tmp2 = neon_load_reg(rd, pass);
4778 gen_neon_add(size, tmp, tmp2);
4779 break;
4780 case NEON_3R_VADD_VSUB:
4781 if (!u) { /* VADD */
4782 gen_neon_add(size, tmp, tmp2);
4783 } else { /* VSUB */
4784 switch (size) {
4785 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4786 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4787 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4788 default: abort();
4789 }
4790 }
4791 break;
4792 case NEON_3R_VTST_VCEQ:
4793 if (!u) { /* VTST */
4794 switch (size) {
4795 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4796 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4797 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4798 default: abort();
4799 }
4800 } else { /* VCEQ */
4801 switch (size) {
4802 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4803 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4804 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4805 default: abort();
4806 }
4807 }
4808 break;
4809 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
4810 switch (size) {
4811 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4812 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4813 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4814 default: abort();
4815 }
4816 tcg_temp_free_i32(tmp2);
4817 tmp2 = neon_load_reg(rd, pass);
4818 if (u) { /* VMLS */
4819 gen_neon_rsb(size, tmp, tmp2);
4820 } else { /* VMLA */
4821 gen_neon_add(size, tmp, tmp2);
4822 }
4823 break;
4824 case NEON_3R_VMUL:
4825 if (u) { /* polynomial */
4826 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4827 } else { /* Integer */
4828 switch (size) {
4829 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4830 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4831 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4832 default: abort();
4833 }
4834 }
4835 break;
4836 case NEON_3R_VPMAX:
4837 GEN_NEON_INTEGER_OP(pmax);
4838 break;
4839 case NEON_3R_VPMIN:
4840 GEN_NEON_INTEGER_OP(pmin);
4841 break;
4842 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
4843 if (!u) { /* VQDMULH */
4844 switch (size) {
4845 case 1:
4846 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
4847 break;
4848 case 2:
4849 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
4850 break;
4851 default: abort();
4852 }
4853 } else { /* VQRDMULH */
4854 switch (size) {
4855 case 1:
4856 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
4857 break;
4858 case 2:
4859 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
4860 break;
4861 default: abort();
4862 }
4863 }
4864 break;
4865 case NEON_3R_VPADD:
4866 switch (size) {
4867 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4868 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4869 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4870 default: abort();
4871 }
4872 break;
4873 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
4874 {
4875 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4876 switch ((u << 2) | size) {
4877 case 0: /* VADD */
4878 case 4: /* VPADD */
4879 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4880 break;
4881 case 2: /* VSUB */
4882 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
4883 break;
4884 case 6: /* VABD */
4885 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
4886 break;
4887 default:
4888 abort();
4889 }
4890 tcg_temp_free_ptr(fpstatus);
4891 break;
4892 }
4893 case NEON_3R_FLOAT_MULTIPLY:
4894 {
4895 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4896 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
4897 if (!u) {
4898 tcg_temp_free_i32(tmp2);
4899 tmp2 = neon_load_reg(rd, pass);
4900 if (size == 0) {
4901 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
4902 } else {
4903 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
4904 }
4905 }
4906 tcg_temp_free_ptr(fpstatus);
4907 break;
4908 }
4909 case NEON_3R_FLOAT_CMP:
4910 {
4911 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4912 if (!u) {
4913 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
4914 } else {
4915 if (size == 0) {
4916 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
4917 } else {
4918 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
4919 }
4920 }
4921 tcg_temp_free_ptr(fpstatus);
4922 break;
4923 }
4924 case NEON_3R_FLOAT_ACMP:
4925 {
4926 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4927 if (size == 0) {
4928 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
4929 } else {
4930 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
4931 }
4932 tcg_temp_free_ptr(fpstatus);
4933 break;
4934 }
4935 case NEON_3R_FLOAT_MINMAX:
4936 {
4937 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4938 if (size == 0) {
4939 gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
4940 } else {
4941 gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
4942 }
4943 tcg_temp_free_ptr(fpstatus);
4944 break;
4945 }
4946 case NEON_3R_VRECPS_VRSQRTS:
4947 if (size == 0)
4948 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4949 else
4950 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4951 break;
4952 case NEON_3R_VFM:
4953 {
4954 /* VFMA, VFMS: fused multiply-add */
4955 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
4956 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
4957 if (size) {
4958 /* VFMS */
4959 gen_helper_vfp_negs(tmp, tmp);
4960 }
4961 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
4962 tcg_temp_free_i32(tmp3);
4963 tcg_temp_free_ptr(fpstatus);
4964 break;
4965 }
4966 default:
4967 abort();
4968 }
4969 tcg_temp_free_i32(tmp2);
4970
4971 /* Save the result. For elementwise operations we can put it
4972 straight into the destination register. For pairwise operations
4973 we have to be careful to avoid clobbering the source operands. */
4974 if (pairwise && rd == rm) {
4975 neon_store_scratch(pass, tmp);
4976 } else {
4977 neon_store_reg(rd, pass, tmp);
4978 }
4979
4980 } /* for pass */
4981 if (pairwise && rd == rm) {
4982 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4983 tmp = neon_load_scratch(pass);
4984 neon_store_reg(rd, pass, tmp);
4985 }
4986 }
4987 /* End of 3 register same size operations. */
4988 } else if (insn & (1 << 4)) {
4989 if ((insn & 0x00380080) != 0) {
4990 /* Two registers and shift. */
4991 op = (insn >> 8) & 0xf;
4992 if (insn & (1 << 7)) {
4993 /* 64-bit shift. */
4994 if (op > 7) {
4995 return 1;
4996 }
4997 size = 3;
4998 } else {
4999 size = 2;
5000 while ((insn & (1 << (size + 19))) == 0)
5001 size--;
5002 }
5003 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
5004 /* To avoid excessive duplication of ops we implement shift
5005 by immediate using the variable shift operations. */
5006 if (op < 8) {
5007 /* Shift by immediate:
5008 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
5009 if (q && ((rd | rm) & 1)) {
5010 return 1;
5011 }
5012 if (!u && (op == 4 || op == 6)) {
5013 return 1;
5014 }
5015 /* Right shifts are encoded as N - shift, where N is the
5016 element size in bits. */
5017 if (op <= 4)
5018 shift = shift - (1 << (size + 3));
5019 if (size == 3) {
5020 count = q + 1;
5021 } else {
5022 count = q ? 4: 2;
5023 }
5024 switch (size) {
5025 case 0:
5026 imm = (uint8_t) shift;
5027 imm |= imm << 8;
5028 imm |= imm << 16;
5029 break;
5030 case 1:
5031 imm = (uint16_t) shift;
5032 imm |= imm << 16;
5033 break;
5034 case 2:
5035 case 3:
5036 imm = shift;
5037 break;
5038 default:
5039 abort();
5040 }
5041
5042 for (pass = 0; pass < count; pass++) {
5043 if (size == 3) {
5044 neon_load_reg64(cpu_V0, rm + pass);
5045 tcg_gen_movi_i64(cpu_V1, imm);
5046 switch (op) {
5047 case 0: /* VSHR */
5048 case 1: /* VSRA */
5049 if (u)
5050 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5051 else
5052 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
5053 break;
5054 case 2: /* VRSHR */
5055 case 3: /* VRSRA */
5056 if (u)
5057 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
5058 else
5059 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
5060 break;
5061 case 4: /* VSRI */
5062 case 5: /* VSHL, VSLI */
5063 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5064 break;
5065 case 6: /* VQSHLU */
5066 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5067 cpu_V0, cpu_V1);
5068 break;
5069 case 7: /* VQSHL */
5070 if (u) {
5071 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5072 cpu_V0, cpu_V1);
5073 } else {
5074 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5075 cpu_V0, cpu_V1);
5076 }
5077 break;
5078 }
5079 if (op == 1 || op == 3) {
5080 /* Accumulate. */
5081 neon_load_reg64(cpu_V1, rd + pass);
5082 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5083 } else if (op == 4 || (op == 5 && u)) {
5084 /* Insert */
5085 neon_load_reg64(cpu_V1, rd + pass);
5086 uint64_t mask;
5087 if (shift < -63 || shift > 63) {
5088 mask = 0;
5089 } else {
5090 if (op == 4) {
5091 mask = 0xffffffffffffffffull >> -shift;
5092 } else {
5093 mask = 0xffffffffffffffffull << shift;
5094 }
5095 }
5096 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5097 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5098 }
5099 neon_store_reg64(cpu_V0, rd + pass);
5100 } else { /* size < 3 */
5101 /* Operands in T0 and T1. */
5102 tmp = neon_load_reg(rm, pass);
5103 tmp2 = tcg_temp_new_i32();
5104 tcg_gen_movi_i32(tmp2, imm);
5105 switch (op) {
5106 case 0: /* VSHR */
5107 case 1: /* VSRA */
5108 GEN_NEON_INTEGER_OP(shl);
5109 break;
5110 case 2: /* VRSHR */
5111 case 3: /* VRSRA */
5112 GEN_NEON_INTEGER_OP(rshl);
5113 break;
5114 case 4: /* VSRI */
5115 case 5: /* VSHL, VSLI */
5116 switch (size) {
5117 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5118 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5119 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
5120 default: abort();
5121 }
5122 break;
5123 case 6: /* VQSHLU */
5124 switch (size) {
5125 case 0:
5126 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5127 tmp, tmp2);
5128 break;
5129 case 1:
5130 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5131 tmp, tmp2);
5132 break;
5133 case 2:
5134 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5135 tmp, tmp2);
5136 break;
5137 default:
5138 abort();
5139 }
5140 break;
5141 case 7: /* VQSHL */
5142 GEN_NEON_INTEGER_OP_ENV(qshl);
5143 break;
5144 }
5145 tcg_temp_free_i32(tmp2);
5146
5147 if (op == 1 || op == 3) {
5148 /* Accumulate. */
5149 tmp2 = neon_load_reg(rd, pass);
5150 gen_neon_add(size, tmp, tmp2);
5151 tcg_temp_free_i32(tmp2);
5152 } else if (op == 4 || (op == 5 && u)) {
5153 /* Insert */
5154 switch (size) {
5155 case 0:
5156 if (op == 4)
5157 mask = 0xff >> -shift;
5158 else
5159 mask = (uint8_t)(0xff << shift);
5160 mask |= mask << 8;
5161 mask |= mask << 16;
5162 break;
5163 case 1:
5164 if (op == 4)
5165 mask = 0xffff >> -shift;
5166 else
5167 mask = (uint16_t)(0xffff << shift);
5168 mask |= mask << 16;
5169 break;
5170 case 2:
5171 if (shift < -31 || shift > 31) {
5172 mask = 0;
5173 } else {
5174 if (op == 4)
5175 mask = 0xffffffffu >> -shift;
5176 else
5177 mask = 0xffffffffu << shift;
5178 }
5179 break;
5180 default:
5181 abort();
5182 }
5183 tmp2 = neon_load_reg(rd, pass);
5184 tcg_gen_andi_i32(tmp, tmp, mask);
5185 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
5186 tcg_gen_or_i32(tmp, tmp, tmp2);
5187 tcg_temp_free_i32(tmp2);
5188 }
5189 neon_store_reg(rd, pass, tmp);
5190 }
5191 } /* for pass */
5192 } else if (op < 10) {
5193 /* Shift by immediate and narrow:
5194 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
5195 int input_unsigned = (op == 8) ? !u : u;
5196 if (rm & 1) {
5197 return 1;
5198 }
5199 shift = shift - (1 << (size + 3));
5200 size++;
5201 if (size == 3) {
5202 tmp64 = tcg_const_i64(shift);
5203 neon_load_reg64(cpu_V0, rm);
5204 neon_load_reg64(cpu_V1, rm + 1);
5205 for (pass = 0; pass < 2; pass++) {
5206 TCGv_i64 in;
5207 if (pass == 0) {
5208 in = cpu_V0;
5209 } else {
5210 in = cpu_V1;
5211 }
5212 if (q) {
5213 if (input_unsigned) {
5214 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
5215 } else {
5216 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
5217 }
5218 } else {
5219 if (input_unsigned) {
5220 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
5221 } else {
5222 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
5223 }
5224 }
5225 tmp = tcg_temp_new_i32();
5226 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5227 neon_store_reg(rd, pass, tmp);
5228 } /* for pass */
5229 tcg_temp_free_i64(tmp64);
5230 } else {
5231 if (size == 1) {
5232 imm = (uint16_t)shift;
5233 imm |= imm << 16;
5234 } else {
5235 /* size == 2 */
5236 imm = (uint32_t)shift;
5237 }
5238 tmp2 = tcg_const_i32(imm);
5239 tmp4 = neon_load_reg(rm + 1, 0);
5240 tmp5 = neon_load_reg(rm + 1, 1);
5241 for (pass = 0; pass < 2; pass++) {
5242 if (pass == 0) {
5243 tmp = neon_load_reg(rm, 0);
5244 } else {
5245 tmp = tmp4;
5246 }
5247 gen_neon_shift_narrow(size, tmp, tmp2, q,
5248 input_unsigned);
5249 if (pass == 0) {
5250 tmp3 = neon_load_reg(rm, 1);
5251 } else {
5252 tmp3 = tmp5;
5253 }
5254 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5255 input_unsigned);
5256 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
5257 tcg_temp_free_i32(tmp);
5258 tcg_temp_free_i32(tmp3);
5259 tmp = tcg_temp_new_i32();
5260 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5261 neon_store_reg(rd, pass, tmp);
5262 } /* for pass */
5263 tcg_temp_free_i32(tmp2);
5264 }
5265 } else if (op == 10) {
5266 /* VSHLL, VMOVL */
5267 if (q || (rd & 1)) {
5268 return 1;
5269 }
5270 tmp = neon_load_reg(rm, 0);
5271 tmp2 = neon_load_reg(rm, 1);
5272 for (pass = 0; pass < 2; pass++) {
5273 if (pass == 1)
5274 tmp = tmp2;
5275
5276 gen_neon_widen(cpu_V0, tmp, size, u);
5277
5278 if (shift != 0) {
5279 /* The shift is less than the width of the source
5280 type, so we can just shift the whole register. */
5281 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5282 /* Widen the result of shift: we need to clear
5283 * the potential overflow bits resulting from
5284 * left bits of the narrow input appearing as
5285 * right bits of left the neighbour narrow
5286 * input. */
5287 if (size < 2 || !u) {
5288 uint64_t imm64;
5289 if (size == 0) {
5290 imm = (0xffu >> (8 - shift));
5291 imm |= imm << 16;
5292 } else if (size == 1) {
5293 imm = 0xffff >> (16 - shift);
5294 } else {
5295 /* size == 2 */
5296 imm = 0xffffffff >> (32 - shift);
5297 }
5298 if (size < 2) {
5299 imm64 = imm | (((uint64_t)imm) << 32);
5300 } else {
5301 imm64 = imm;
5302 }
5303 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
5304 }
5305 }
5306 neon_store_reg64(cpu_V0, rd + pass);
5307 }
5308 } else if (op >= 14) {
5309 /* VCVT fixed-point. */
5310 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5311 return 1;
5312 }
5313 /* We have already masked out the must-be-1 top bit of imm6,
5314 * hence this 32-shift where the ARM ARM has 64-imm6.
5315 */
5316 shift = 32 - shift;
5317 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5318 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
5319 if (!(op & 1)) {
5320 if (u)
5321 gen_vfp_ulto(0, shift, 1);
5322 else
5323 gen_vfp_slto(0, shift, 1);
5324 } else {
5325 if (u)
5326 gen_vfp_toul(0, shift, 1);
5327 else
5328 gen_vfp_tosl(0, shift, 1);
5329 }
5330 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
5331 }
5332 } else {
5333 return 1;
5334 }
5335 } else { /* (insn & 0x00380080) == 0 */
5336 int invert;
5337 if (q && (rd & 1)) {
5338 return 1;
5339 }
5340
5341 op = (insn >> 8) & 0xf;
5342 /* One register and immediate. */
5343 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5344 invert = (insn & (1 << 5)) != 0;
5345 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5346 * We choose to not special-case this and will behave as if a
5347 * valid constant encoding of 0 had been given.
5348 */
5349 switch (op) {
5350 case 0: case 1:
5351 /* no-op */
5352 break;
5353 case 2: case 3:
5354 imm <<= 8;
5355 break;
5356 case 4: case 5:
5357 imm <<= 16;
5358 break;
5359 case 6: case 7:
5360 imm <<= 24;
5361 break;
5362 case 8: case 9:
5363 imm |= imm << 16;
5364 break;
5365 case 10: case 11:
5366 imm = (imm << 8) | (imm << 24);
5367 break;
5368 case 12:
5369 imm = (imm << 8) | 0xff;
5370 break;
5371 case 13:
5372 imm = (imm << 16) | 0xffff;
5373 break;
5374 case 14:
5375 imm |= (imm << 8) | (imm << 16) | (imm << 24);
5376 if (invert)
5377 imm = ~imm;
5378 break;
5379 case 15:
5380 if (invert) {
5381 return 1;
5382 }
5383 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
5384 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
5385 break;
5386 }
5387 if (invert)
5388 imm = ~imm;
5389
5390 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5391 if (op & 1 && op < 12) {
5392 tmp = neon_load_reg(rd, pass);
5393 if (invert) {
5394 /* The immediate value has already been inverted, so
5395 BIC becomes AND. */
5396 tcg_gen_andi_i32(tmp, tmp, imm);
5397 } else {
5398 tcg_gen_ori_i32(tmp, tmp, imm);
5399 }
5400 } else {
5401 /* VMOV, VMVN. */
5402 tmp = tcg_temp_new_i32();
5403 if (op == 14 && invert) {
5404 int n;
5405 uint32_t val;
5406 val = 0;
5407 for (n = 0; n < 4; n++) {
5408 if (imm & (1 << (n + (pass & 1) * 4)))
5409 val |= 0xff << (n * 8);
5410 }
5411 tcg_gen_movi_i32(tmp, val);
5412 } else {
5413 tcg_gen_movi_i32(tmp, imm);
5414 }
5415 }
5416 neon_store_reg(rd, pass, tmp);
5417 }
5418 }
5419 } else { /* (insn & 0x00800010 == 0x00800000) */
5420 if (size != 3) {
5421 op = (insn >> 8) & 0xf;
5422 if ((insn & (1 << 6)) == 0) {
5423 /* Three registers of different lengths. */
5424 int src1_wide;
5425 int src2_wide;
5426 int prewiden;
5427 /* undefreq: bit 0 : UNDEF if size != 0
5428 * bit 1 : UNDEF if size == 0
5429 * bit 2 : UNDEF if U == 1
5430 * Note that [1:0] set implies 'always UNDEF'
5431 */
5432 int undefreq;
5433 /* prewiden, src1_wide, src2_wide, undefreq */
5434 static const int neon_3reg_wide[16][4] = {
5435 {1, 0, 0, 0}, /* VADDL */
5436 {1, 1, 0, 0}, /* VADDW */
5437 {1, 0, 0, 0}, /* VSUBL */
5438 {1, 1, 0, 0}, /* VSUBW */
5439 {0, 1, 1, 0}, /* VADDHN */
5440 {0, 0, 0, 0}, /* VABAL */
5441 {0, 1, 1, 0}, /* VSUBHN */
5442 {0, 0, 0, 0}, /* VABDL */
5443 {0, 0, 0, 0}, /* VMLAL */
5444 {0, 0, 0, 6}, /* VQDMLAL */
5445 {0, 0, 0, 0}, /* VMLSL */
5446 {0, 0, 0, 6}, /* VQDMLSL */
5447 {0, 0, 0, 0}, /* Integer VMULL */
5448 {0, 0, 0, 2}, /* VQDMULL */
5449 {0, 0, 0, 5}, /* Polynomial VMULL */
5450 {0, 0, 0, 3}, /* Reserved: always UNDEF */
5451 };
5452
5453 prewiden = neon_3reg_wide[op][0];
5454 src1_wide = neon_3reg_wide[op][1];
5455 src2_wide = neon_3reg_wide[op][2];
5456 undefreq = neon_3reg_wide[op][3];
5457
5458 if (((undefreq & 1) && (size != 0)) ||
5459 ((undefreq & 2) && (size == 0)) ||
5460 ((undefreq & 4) && u)) {
5461 return 1;
5462 }
5463 if ((src1_wide && (rn & 1)) ||
5464 (src2_wide && (rm & 1)) ||
5465 (!src2_wide && (rd & 1))) {
5466 return 1;
5467 }
5468
5469 /* Avoid overlapping operands. Wide source operands are
5470 always aligned so will never overlap with wide
5471 destinations in problematic ways. */
5472 if (rd == rm && !src2_wide) {
5473 tmp = neon_load_reg(rm, 1);
5474 neon_store_scratch(2, tmp);
5475 } else if (rd == rn && !src1_wide) {
5476 tmp = neon_load_reg(rn, 1);
5477 neon_store_scratch(2, tmp);
5478 }
5479 TCGV_UNUSED_I32(tmp3);
5480 for (pass = 0; pass < 2; pass++) {
5481 if (src1_wide) {
5482 neon_load_reg64(cpu_V0, rn + pass);
5483 TCGV_UNUSED_I32(tmp);
5484 } else {
5485 if (pass == 1 && rd == rn) {
5486 tmp = neon_load_scratch(2);
5487 } else {
5488 tmp = neon_load_reg(rn, pass);
5489 }
5490 if (prewiden) {
5491 gen_neon_widen(cpu_V0, tmp, size, u);
5492 }
5493 }
5494 if (src2_wide) {
5495 neon_load_reg64(cpu_V1, rm + pass);
5496 TCGV_UNUSED_I32(tmp2);
5497 } else {
5498 if (pass == 1 && rd == rm) {
5499 tmp2 = neon_load_scratch(2);
5500 } else {
5501 tmp2 = neon_load_reg(rm, pass);
5502 }
5503 if (prewiden) {
5504 gen_neon_widen(cpu_V1, tmp2, size, u);
5505 }
5506 }
5507 switch (op) {
5508 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5509 gen_neon_addl(size);
5510 break;
5511 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5512 gen_neon_subl(size);
5513 break;
5514 case 5: case 7: /* VABAL, VABDL */
5515 switch ((size << 1) | u) {
5516 case 0:
5517 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5518 break;
5519 case 1:
5520 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5521 break;
5522 case 2:
5523 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5524 break;
5525 case 3:
5526 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5527 break;
5528 case 4:
5529 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5530 break;
5531 case 5:
5532 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5533 break;
5534 default: abort();
5535 }
5536 tcg_temp_free_i32(tmp2);
5537 tcg_temp_free_i32(tmp);
5538 break;
5539 case 8: case 9: case 10: case 11: case 12: case 13:
5540 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5541 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5542 break;
5543 case 14: /* Polynomial VMULL */
5544 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
5545 tcg_temp_free_i32(tmp2);
5546 tcg_temp_free_i32(tmp);
5547 break;
5548 default: /* 15 is RESERVED: caught earlier */
5549 abort();
5550 }
5551 if (op == 13) {
5552 /* VQDMULL */
5553 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5554 neon_store_reg64(cpu_V0, rd + pass);
5555 } else if (op == 5 || (op >= 8 && op <= 11)) {
5556 /* Accumulate. */
5557 neon_load_reg64(cpu_V1, rd + pass);
5558 switch (op) {
5559 case 10: /* VMLSL */
5560 gen_neon_negl(cpu_V0, size);
5561 /* Fall through */
5562 case 5: case 8: /* VABAL, VMLAL */
5563 gen_neon_addl(size);
5564 break;
5565 case 9: case 11: /* VQDMLAL, VQDMLSL */
5566 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5567 if (op == 11) {
5568 gen_neon_negl(cpu_V0, size);
5569 }
5570 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5571 break;
5572 default:
5573 abort();
5574 }
5575 neon_store_reg64(cpu_V0, rd + pass);
5576 } else if (op == 4 || op == 6) {
5577 /* Narrowing operation. */
5578 tmp = tcg_temp_new_i32();
5579 if (!u) {
5580 switch (size) {
5581 case 0:
5582 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5583 break;
5584 case 1:
5585 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5586 break;
5587 case 2:
5588 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5589 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5590 break;
5591 default: abort();
5592 }
5593 } else {
5594 switch (size) {
5595 case 0:
5596 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5597 break;
5598 case 1:
5599 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5600 break;
5601 case 2:
5602 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5603 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5604 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5605 break;
5606 default: abort();
5607 }
5608 }
5609 if (pass == 0) {
5610 tmp3 = tmp;
5611 } else {
5612 neon_store_reg(rd, 0, tmp3);
5613 neon_store_reg(rd, 1, tmp);
5614 }
5615 } else {
5616 /* Write back the result. */
5617 neon_store_reg64(cpu_V0, rd + pass);
5618 }
5619 }
5620 } else {
5621 /* Two registers and a scalar. NB that for ops of this form
5622 * the ARM ARM labels bit 24 as Q, but it is in our variable
5623 * 'u', not 'q'.
5624 */
5625 if (size == 0) {
5626 return 1;
5627 }
5628 switch (op) {
5629 case 1: /* Float VMLA scalar */
5630 case 5: /* Floating point VMLS scalar */
5631 case 9: /* Floating point VMUL scalar */
5632 if (size == 1) {
5633 return 1;
5634 }
5635 /* fall through */
5636 case 0: /* Integer VMLA scalar */
5637 case 4: /* Integer VMLS scalar */
5638 case 8: /* Integer VMUL scalar */
5639 case 12: /* VQDMULH scalar */
5640 case 13: /* VQRDMULH scalar */
5641 if (u && ((rd | rn) & 1)) {
5642 return 1;
5643 }
5644 tmp = neon_get_scalar(size, rm);
5645 neon_store_scratch(0, tmp);
5646 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5647 tmp = neon_load_scratch(0);
5648 tmp2 = neon_load_reg(rn, pass);
5649 if (op == 12) {
5650 if (size == 1) {
5651 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5652 } else {
5653 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5654 }
5655 } else if (op == 13) {
5656 if (size == 1) {
5657 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5658 } else {
5659 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5660 }
5661 } else if (op & 1) {
5662 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5663 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
5664 tcg_temp_free_ptr(fpstatus);
5665 } else {
5666 switch (size) {
5667 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5668 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5669 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5670 default: abort();
5671 }
5672 }
5673 tcg_temp_free_i32(tmp2);
5674 if (op < 8) {
5675 /* Accumulate. */
5676 tmp2 = neon_load_reg(rd, pass);
5677 switch (op) {
5678 case 0:
5679 gen_neon_add(size, tmp, tmp2);
5680 break;
5681 case 1:
5682 {
5683 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5684 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
5685 tcg_temp_free_ptr(fpstatus);
5686 break;
5687 }
5688 case 4:
5689 gen_neon_rsb(size, tmp, tmp2);
5690 break;
5691 case 5:
5692 {
5693 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5694 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
5695 tcg_temp_free_ptr(fpstatus);
5696 break;
5697 }
5698 default:
5699 abort();
5700 }
5701 tcg_temp_free_i32(tmp2);
5702 }
5703 neon_store_reg(rd, pass, tmp);
5704 }
5705 break;
5706 case 3: /* VQDMLAL scalar */
5707 case 7: /* VQDMLSL scalar */
5708 case 11: /* VQDMULL scalar */
5709 if (u == 1) {
5710 return 1;
5711 }
5712 /* fall through */
5713 case 2: /* VMLAL sclar */
5714 case 6: /* VMLSL scalar */
5715 case 10: /* VMULL scalar */
5716 if (rd & 1) {
5717 return 1;
5718 }
5719 tmp2 = neon_get_scalar(size, rm);
5720 /* We need a copy of tmp2 because gen_neon_mull
5721 * deletes it during pass 0. */
5722 tmp4 = tcg_temp_new_i32();
5723 tcg_gen_mov_i32(tmp4, tmp2);
5724 tmp3 = neon_load_reg(rn, 1);
5725
5726 for (pass = 0; pass < 2; pass++) {
5727 if (pass == 0) {
5728 tmp = neon_load_reg(rn, 0);
5729 } else {
5730 tmp = tmp3;
5731 tmp2 = tmp4;
5732 }
5733 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5734 if (op != 11) {
5735 neon_load_reg64(cpu_V1, rd + pass);
5736 }
5737 switch (op) {
5738 case 6:
5739 gen_neon_negl(cpu_V0, size);
5740 /* Fall through */
5741 case 2:
5742 gen_neon_addl(size);
5743 break;
5744 case 3: case 7:
5745 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5746 if (op == 7) {
5747 gen_neon_negl(cpu_V0, size);
5748 }
5749 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5750 break;
5751 case 10:
5752 /* no-op */
5753 break;
5754 case 11:
5755 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5756 break;
5757 default:
5758 abort();
5759 }
5760 neon_store_reg64(cpu_V0, rd + pass);
5761 }
5762
5763
5764 break;
5765 default: /* 14 and 15 are RESERVED */
5766 return 1;
5767 }
5768 }
5769 } else { /* size == 3 */
5770 if (!u) {
5771 /* Extract. */
5772 imm = (insn >> 8) & 0xf;
5773
5774 if (imm > 7 && !q)
5775 return 1;
5776
5777 if (q && ((rd | rn | rm) & 1)) {
5778 return 1;
5779 }
5780
5781 if (imm == 0) {
5782 neon_load_reg64(cpu_V0, rn);
5783 if (q) {
5784 neon_load_reg64(cpu_V1, rn + 1);
5785 }
5786 } else if (imm == 8) {
5787 neon_load_reg64(cpu_V0, rn + 1);
5788 if (q) {
5789 neon_load_reg64(cpu_V1, rm);
5790 }
5791 } else if (q) {
5792 tmp64 = tcg_temp_new_i64();
5793 if (imm < 8) {
5794 neon_load_reg64(cpu_V0, rn);
5795 neon_load_reg64(tmp64, rn + 1);
5796 } else {
5797 neon_load_reg64(cpu_V0, rn + 1);
5798 neon_load_reg64(tmp64, rm);
5799 }
5800 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5801 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5802 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5803 if (imm < 8) {
5804 neon_load_reg64(cpu_V1, rm);
5805 } else {
5806 neon_load_reg64(cpu_V1, rm + 1);
5807 imm -= 8;
5808 }
5809 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5810 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5811 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5812 tcg_temp_free_i64(tmp64);
5813 } else {
5814 /* BUGFIX */
5815 neon_load_reg64(cpu_V0, rn);
5816 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5817 neon_load_reg64(cpu_V1, rm);
5818 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5819 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5820 }
5821 neon_store_reg64(cpu_V0, rd);
5822 if (q) {
5823 neon_store_reg64(cpu_V1, rd + 1);
5824 }
5825 } else if ((insn & (1 << 11)) == 0) {
5826 /* Two register misc. */
5827 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5828 size = (insn >> 18) & 3;
5829 /* UNDEF for unknown op values and bad op-size combinations */
5830 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
5831 return 1;
5832 }
5833 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
5834 q && ((rm | rd) & 1)) {
5835 return 1;
5836 }
5837 switch (op) {
5838 case NEON_2RM_VREV64:
5839 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5840 tmp = neon_load_reg(rm, pass * 2);
5841 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5842 switch (size) {
5843 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5844 case 1: gen_swap_half(tmp); break;
5845 case 2: /* no-op */ break;
5846 default: abort();
5847 }
5848 neon_store_reg(rd, pass * 2 + 1, tmp);
5849 if (size == 2) {
5850 neon_store_reg(rd, pass * 2, tmp2);
5851 } else {
5852 switch (size) {
5853 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5854 case 1: gen_swap_half(tmp2); break;
5855 default: abort();
5856 }
5857 neon_store_reg(rd, pass * 2, tmp2);
5858 }
5859 }
5860 break;
5861 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
5862 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
5863 for (pass = 0; pass < q + 1; pass++) {
5864 tmp = neon_load_reg(rm, pass * 2);
5865 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5866 tmp = neon_load_reg(rm, pass * 2 + 1);
5867 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5868 switch (size) {
5869 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5870 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5871 case 2: tcg_gen_add_i64(CPU_V001); break;
5872 default: abort();
5873 }
5874 if (op >= NEON_2RM_VPADAL) {
5875 /* Accumulate. */
5876 neon_load_reg64(cpu_V1, rd + pass);
5877 gen_neon_addl(size);
5878 }
5879 neon_store_reg64(cpu_V0, rd + pass);
5880 }
5881 break;
5882 case NEON_2RM_VTRN:
5883 if (size == 2) {
5884 int n;
5885 for (n = 0; n < (q ? 4 : 2); n += 2) {
5886 tmp = neon_load_reg(rm, n);
5887 tmp2 = neon_load_reg(rd, n + 1);
5888 neon_store_reg(rm, n, tmp2);
5889 neon_store_reg(rd, n + 1, tmp);
5890 }
5891 } else {
5892 goto elementwise;
5893 }
5894 break;
5895 case NEON_2RM_VUZP:
5896 if (gen_neon_unzip(rd, rm, size, q)) {
5897 return 1;
5898 }
5899 break;
5900 case NEON_2RM_VZIP:
5901 if (gen_neon_zip(rd, rm, size, q)) {
5902 return 1;
5903 }
5904 break;
5905 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
5906 /* also VQMOVUN; op field and mnemonics don't line up */
5907 if (rm & 1) {
5908 return 1;
5909 }
5910 TCGV_UNUSED_I32(tmp2);
5911 for (pass = 0; pass < 2; pass++) {
5912 neon_load_reg64(cpu_V0, rm + pass);
5913 tmp = tcg_temp_new_i32();
5914 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
5915 tmp, cpu_V0);
5916 if (pass == 0) {
5917 tmp2 = tmp;
5918 } else {
5919 neon_store_reg(rd, 0, tmp2);
5920 neon_store_reg(rd, 1, tmp);
5921 }
5922 }
5923 break;
5924 case NEON_2RM_VSHLL:
5925 if (q || (rd & 1)) {
5926 return 1;
5927 }
5928 tmp = neon_load_reg(rm, 0);
5929 tmp2 = neon_load_reg(rm, 1);
5930 for (pass = 0; pass < 2; pass++) {
5931 if (pass == 1)
5932 tmp = tmp2;
5933 gen_neon_widen(cpu_V0, tmp, size, 1);
5934 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5935 neon_store_reg64(cpu_V0, rd + pass);
5936 }
5937 break;
5938 case NEON_2RM_VCVT_F16_F32:
5939 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5940 q || (rm & 1)) {
5941 return 1;
5942 }
5943 tmp = tcg_temp_new_i32();
5944 tmp2 = tcg_temp_new_i32();
5945 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5946 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5947 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5948 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5949 tcg_gen_shli_i32(tmp2, tmp2, 16);
5950 tcg_gen_or_i32(tmp2, tmp2, tmp);
5951 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5952 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5953 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5954 neon_store_reg(rd, 0, tmp2);
5955 tmp2 = tcg_temp_new_i32();
5956 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5957 tcg_gen_shli_i32(tmp2, tmp2, 16);
5958 tcg_gen_or_i32(tmp2, tmp2, tmp);
5959 neon_store_reg(rd, 1, tmp2);
5960 tcg_temp_free_i32(tmp);
5961 break;
5962 case NEON_2RM_VCVT_F32_F16:
5963 if (!arm_feature(env, ARM_FEATURE_VFP_FP16) ||
5964 q || (rd & 1)) {
5965 return 1;
5966 }
5967 tmp3 = tcg_temp_new_i32();
5968 tmp = neon_load_reg(rm, 0);
5969 tmp2 = neon_load_reg(rm, 1);
5970 tcg_gen_ext16u_i32(tmp3, tmp);
5971 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5972 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5973 tcg_gen_shri_i32(tmp3, tmp, 16);
5974 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5975 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5976 tcg_temp_free_i32(tmp);
5977 tcg_gen_ext16u_i32(tmp3, tmp2);
5978 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5979 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5980 tcg_gen_shri_i32(tmp3, tmp2, 16);
5981 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5982 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5983 tcg_temp_free_i32(tmp2);
5984 tcg_temp_free_i32(tmp3);
5985 break;
5986 default:
5987 elementwise:
5988 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5989 if (neon_2rm_is_float_op(op)) {
5990 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5991 neon_reg_offset(rm, pass));
5992 TCGV_UNUSED_I32(tmp);
5993 } else {
5994 tmp = neon_load_reg(rm, pass);
5995 }
5996 switch (op) {
5997 case NEON_2RM_VREV32:
5998 switch (size) {
5999 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6000 case 1: gen_swap_half(tmp); break;
6001 default: abort();
6002 }
6003 break;
6004 case NEON_2RM_VREV16:
6005 gen_rev16(tmp);
6006 break;
6007 case NEON_2RM_VCLS:
6008 switch (size) {
6009 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6010 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6011 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
6012 default: abort();
6013 }
6014 break;
6015 case NEON_2RM_VCLZ:
6016 switch (size) {
6017 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6018 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6019 case 2: gen_helper_clz(tmp, tmp); break;
6020 default: abort();
6021 }
6022 break;
6023 case NEON_2RM_VCNT:
6024 gen_helper_neon_cnt_u8(tmp, tmp);
6025 break;
6026 case NEON_2RM_VMVN:
6027 tcg_gen_not_i32(tmp, tmp);
6028 break;
6029 case NEON_2RM_VQABS:
6030 switch (size) {
6031 case 0:
6032 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6033 break;
6034 case 1:
6035 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6036 break;
6037 case 2:
6038 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6039 break;
6040 default: abort();
6041 }
6042 break;
6043 case NEON_2RM_VQNEG:
6044 switch (size) {
6045 case 0:
6046 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6047 break;
6048 case 1:
6049 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6050 break;
6051 case 2:
6052 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6053 break;
6054 default: abort();
6055 }
6056 break;
6057 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
6058 tmp2 = tcg_const_i32(0);
6059 switch(size) {
6060 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6061 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6062 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
6063 default: abort();
6064 }
6065 tcg_temp_free_i32(tmp2);
6066 if (op == NEON_2RM_VCLE0) {
6067 tcg_gen_not_i32(tmp, tmp);
6068 }
6069 break;
6070 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
6071 tmp2 = tcg_const_i32(0);
6072 switch(size) {
6073 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6074 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6075 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
6076 default: abort();
6077 }
6078 tcg_temp_free_i32(tmp2);
6079 if (op == NEON_2RM_VCLT0) {
6080 tcg_gen_not_i32(tmp, tmp);
6081 }
6082 break;
6083 case NEON_2RM_VCEQ0:
6084 tmp2 = tcg_const_i32(0);
6085 switch(size) {
6086 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6087 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6088 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
6089 default: abort();
6090 }
6091 tcg_temp_free_i32(tmp2);
6092 break;
6093 case NEON_2RM_VABS:
6094 switch(size) {
6095 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6096 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6097 case 2: tcg_gen_abs_i32(tmp, tmp); break;
6098 default: abort();
6099 }
6100 break;
6101 case NEON_2RM_VNEG:
6102 tmp2 = tcg_const_i32(0);
6103 gen_neon_rsb(size, tmp, tmp2);
6104 tcg_temp_free_i32(tmp2);
6105 break;
6106 case NEON_2RM_VCGT0_F:
6107 {
6108 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6109 tmp2 = tcg_const_i32(0);
6110 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
6111 tcg_temp_free_i32(tmp2);
6112 tcg_temp_free_ptr(fpstatus);
6113 break;
6114 }
6115 case NEON_2RM_VCGE0_F:
6116 {
6117 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6118 tmp2 = tcg_const_i32(0);
6119 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
6120 tcg_temp_free_i32(tmp2);
6121 tcg_temp_free_ptr(fpstatus);
6122 break;
6123 }
6124 case NEON_2RM_VCEQ0_F:
6125 {
6126 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6127 tmp2 = tcg_const_i32(0);
6128 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
6129 tcg_temp_free_i32(tmp2);
6130 tcg_temp_free_ptr(fpstatus);
6131 break;
6132 }
6133 case NEON_2RM_VCLE0_F:
6134 {
6135 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6136 tmp2 = tcg_const_i32(0);
6137 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
6138 tcg_temp_free_i32(tmp2);
6139 tcg_temp_free_ptr(fpstatus);
6140 break;
6141 }
6142 case NEON_2RM_VCLT0_F:
6143 {
6144 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6145 tmp2 = tcg_const_i32(0);
6146 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
6147 tcg_temp_free_i32(tmp2);
6148 tcg_temp_free_ptr(fpstatus);
6149 break;
6150 }
6151 case NEON_2RM_VABS_F:
6152 gen_vfp_abs(0);
6153 break;
6154 case NEON_2RM_VNEG_F:
6155 gen_vfp_neg(0);
6156 break;
6157 case NEON_2RM_VSWP:
6158 tmp2 = neon_load_reg(rd, pass);
6159 neon_store_reg(rm, pass, tmp2);
6160 break;
6161 case NEON_2RM_VTRN:
6162 tmp2 = neon_load_reg(rd, pass);
6163 switch (size) {
6164 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6165 case 1: gen_neon_trn_u16(tmp, tmp2); break;
6166 default: abort();
6167 }
6168 neon_store_reg(rm, pass, tmp2);
6169 break;
6170 case NEON_2RM_VRECPE:
6171 gen_helper_recpe_u32(tmp, tmp, cpu_env);
6172 break;
6173 case NEON_2RM_VRSQRTE:
6174 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
6175 break;
6176 case NEON_2RM_VRECPE_F:
6177 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
6178 break;
6179 case NEON_2RM_VRSQRTE_F:
6180 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
6181 break;
6182 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
6183 gen_vfp_sito(0, 1);
6184 break;
6185 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
6186 gen_vfp_uito(0, 1);
6187 break;
6188 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
6189 gen_vfp_tosiz(0, 1);
6190 break;
6191 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
6192 gen_vfp_touiz(0, 1);
6193 break;
6194 default:
6195 /* Reserved op values were caught by the
6196 * neon_2rm_sizes[] check earlier.
6197 */
6198 abort();
6199 }
6200 if (neon_2rm_is_float_op(op)) {
6201 tcg_gen_st_f32(cpu_F0s, cpu_env,
6202 neon_reg_offset(rd, pass));
6203 } else {
6204 neon_store_reg(rd, pass, tmp);
6205 }
6206 }
6207 break;
6208 }
6209 } else if ((insn & (1 << 10)) == 0) {
6210 /* VTBL, VTBX. */
6211 int n = ((insn >> 8) & 3) + 1;
6212 if ((rn + n) > 32) {
6213 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
6214 * helper function running off the end of the register file.
6215 */
6216 return 1;
6217 }
6218 n <<= 3;
6219 if (insn & (1 << 6)) {
6220 tmp = neon_load_reg(rd, 0);
6221 } else {
6222 tmp = tcg_temp_new_i32();
6223 tcg_gen_movi_i32(tmp, 0);
6224 }
6225 tmp2 = neon_load_reg(rm, 0);
6226 tmp4 = tcg_const_i32(rn);
6227 tmp5 = tcg_const_i32(n);
6228 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
6229 tcg_temp_free_i32(tmp);
6230 if (insn & (1 << 6)) {
6231 tmp = neon_load_reg(rd, 1);
6232 } else {
6233 tmp = tcg_temp_new_i32();
6234 tcg_gen_movi_i32(tmp, 0);
6235 }
6236 tmp3 = neon_load_reg(rm, 1);
6237 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
6238 tcg_temp_free_i32(tmp5);
6239 tcg_temp_free_i32(tmp4);
6240 neon_store_reg(rd, 0, tmp2);
6241 neon_store_reg(rd, 1, tmp3);
6242 tcg_temp_free_i32(tmp);
6243 } else if ((insn & 0x380) == 0) {
6244 /* VDUP */
6245 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
6246 return 1;
6247 }
6248 if (insn & (1 << 19)) {
6249 tmp = neon_load_reg(rm, 1);
6250 } else {
6251 tmp = neon_load_reg(rm, 0);
6252 }
6253 if (insn & (1 << 16)) {
6254 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
6255 } else if (insn & (1 << 17)) {
6256 if ((insn >> 18) & 1)
6257 gen_neon_dup_high16(tmp);
6258 else
6259 gen_neon_dup_low16(tmp);
6260 }
6261 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6262 tmp2 = tcg_temp_new_i32();
6263 tcg_gen_mov_i32(tmp2, tmp);
6264 neon_store_reg(rd, pass, tmp2);
6265 }
6266 tcg_temp_free_i32(tmp);
6267 } else {
6268 return 1;
6269 }
6270 }
6271 }
6272 return 0;
6273 }
6274
6275 static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
6276 {
6277 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
6278 const ARMCPRegInfo *ri;
6279 ARMCPU *cpu = arm_env_get_cpu(env);
6280
6281 cpnum = (insn >> 8) & 0xf;
6282 if (arm_feature(env, ARM_FEATURE_XSCALE)
6283 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
6284 return 1;
6285
6286 /* First check for coprocessor space used for actual instructions */
6287 switch (cpnum) {
6288 case 0:
6289 case 1:
6290 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6291 return disas_iwmmxt_insn(env, s, insn);
6292 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
6293 return disas_dsp_insn(env, s, insn);
6294 }
6295 return 1;
6296 case 10:
6297 case 11:
6298 return disas_vfp_insn (env, s, insn);
6299 default:
6300 break;
6301 }
6302
6303 /* Otherwise treat as a generic register access */
6304 is64 = (insn & (1 << 25)) == 0;
6305 if (!is64 && ((insn & (1 << 4)) == 0)) {
6306 /* cdp */
6307 return 1;
6308 }
6309
6310 crm = insn & 0xf;
6311 if (is64) {
6312 crn = 0;
6313 opc1 = (insn >> 4) & 0xf;
6314 opc2 = 0;
6315 rt2 = (insn >> 16) & 0xf;
6316 } else {
6317 crn = (insn >> 16) & 0xf;
6318 opc1 = (insn >> 21) & 7;
6319 opc2 = (insn >> 5) & 7;
6320 rt2 = 0;
6321 }
6322 isread = (insn >> 20) & 1;
6323 rt = (insn >> 12) & 0xf;
6324
6325 ri = get_arm_cp_reginfo(cpu,
6326 ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
6327 if (ri) {
6328 /* Check access permissions */
6329 if (!cp_access_ok(env, ri, isread)) {
6330 return 1;
6331 }
6332
6333 /* Handle special cases first */
6334 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
6335 case ARM_CP_NOP:
6336 return 0;
6337 case ARM_CP_WFI:
6338 if (isread) {
6339 return 1;
6340 }
6341 gen_set_pc_im(s, s->pc);
6342 s->is_jmp = DISAS_WFI;
6343 return 0;
6344 default:
6345 break;
6346 }
6347
6348 if (use_icount && (ri->type & ARM_CP_IO)) {
6349 gen_io_start();
6350 }
6351
6352 if (isread) {
6353 /* Read */
6354 if (is64) {
6355 TCGv_i64 tmp64;
6356 TCGv_i32 tmp;
6357 if (ri->type & ARM_CP_CONST) {
6358 tmp64 = tcg_const_i64(ri->resetvalue);
6359 } else if (ri->readfn) {
6360 TCGv_ptr tmpptr;
6361 gen_set_pc_im(s, s->pc);
6362 tmp64 = tcg_temp_new_i64();
6363 tmpptr = tcg_const_ptr(ri);
6364 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
6365 tcg_temp_free_ptr(tmpptr);
6366 } else {
6367 tmp64 = tcg_temp_new_i64();
6368 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
6369 }
6370 tmp = tcg_temp_new_i32();
6371 tcg_gen_trunc_i64_i32(tmp, tmp64);
6372 store_reg(s, rt, tmp);
6373 tcg_gen_shri_i64(tmp64, tmp64, 32);
6374 tmp = tcg_temp_new_i32();
6375 tcg_gen_trunc_i64_i32(tmp, tmp64);
6376 tcg_temp_free_i64(tmp64);
6377 store_reg(s, rt2, tmp);
6378 } else {
6379 TCGv_i32 tmp;
6380 if (ri->type & ARM_CP_CONST) {
6381 tmp = tcg_const_i32(ri->resetvalue);
6382 } else if (ri->readfn) {
6383 TCGv_ptr tmpptr;
6384 gen_set_pc_im(s, s->pc);
6385 tmp = tcg_temp_new_i32();
6386 tmpptr = tcg_const_ptr(ri);
6387 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
6388 tcg_temp_free_ptr(tmpptr);
6389 } else {
6390 tmp = load_cpu_offset(ri->fieldoffset);
6391 }
6392 if (rt == 15) {
6393 /* Destination register of r15 for 32 bit loads sets
6394 * the condition codes from the high 4 bits of the value
6395 */
6396 gen_set_nzcv(tmp);
6397 tcg_temp_free_i32(tmp);
6398 } else {
6399 store_reg(s, rt, tmp);
6400 }
6401 }
6402 } else {
6403 /* Write */
6404 if (ri->type & ARM_CP_CONST) {
6405 /* If not forbidden by access permissions, treat as WI */
6406 return 0;
6407 }
6408
6409 if (is64) {
6410 TCGv_i32 tmplo, tmphi;
6411 TCGv_i64 tmp64 = tcg_temp_new_i64();
6412 tmplo = load_reg(s, rt);
6413 tmphi = load_reg(s, rt2);
6414 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
6415 tcg_temp_free_i32(tmplo);
6416 tcg_temp_free_i32(tmphi);
6417 if (ri->writefn) {
6418 TCGv_ptr tmpptr = tcg_const_ptr(ri);
6419 gen_set_pc_im(s, s->pc);
6420 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
6421 tcg_temp_free_ptr(tmpptr);
6422 } else {
6423 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
6424 }
6425 tcg_temp_free_i64(tmp64);
6426 } else {
6427 if (ri->writefn) {
6428 TCGv_i32 tmp;
6429 TCGv_ptr tmpptr;
6430 gen_set_pc_im(s, s->pc);
6431 tmp = load_reg(s, rt);
6432 tmpptr = tcg_const_ptr(ri);
6433 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
6434 tcg_temp_free_ptr(tmpptr);
6435 tcg_temp_free_i32(tmp);
6436 } else {
6437 TCGv_i32 tmp = load_reg(s, rt);
6438 store_cpu_offset(tmp, ri->fieldoffset);
6439 }
6440 }
6441 }
6442
6443 if (use_icount && (ri->type & ARM_CP_IO)) {
6444 /* I/O operations must end the TB here (whether read or write) */
6445 gen_io_end();
6446 gen_lookup_tb(s);
6447 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
6448 /* We default to ending the TB on a coprocessor register write,
6449 * but allow this to be suppressed by the register definition
6450 * (usually only necessary to work around guest bugs).
6451 */
6452 gen_lookup_tb(s);
6453 }
6454
6455 return 0;
6456 }
6457
6458 return 1;
6459 }
6460
6461
6462 /* Store a 64-bit value to a register pair. Clobbers val. */
6463 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
6464 {
6465 TCGv_i32 tmp;
6466 tmp = tcg_temp_new_i32();
6467 tcg_gen_trunc_i64_i32(tmp, val);
6468 store_reg(s, rlow, tmp);
6469 tmp = tcg_temp_new_i32();
6470 tcg_gen_shri_i64(val, val, 32);
6471 tcg_gen_trunc_i64_i32(tmp, val);
6472 store_reg(s, rhigh, tmp);
6473 }
6474
6475 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
6476 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
6477 {
6478 TCGv_i64 tmp;
6479 TCGv_i32 tmp2;
6480
6481 /* Load value and extend to 64 bits. */
6482 tmp = tcg_temp_new_i64();
6483 tmp2 = load_reg(s, rlow);
6484 tcg_gen_extu_i32_i64(tmp, tmp2);
6485 tcg_temp_free_i32(tmp2);
6486 tcg_gen_add_i64(val, val, tmp);
6487 tcg_temp_free_i64(tmp);
6488 }
6489
6490 /* load and add a 64-bit value from a register pair. */
6491 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
6492 {
6493 TCGv_i64 tmp;
6494 TCGv_i32 tmpl;
6495 TCGv_i32 tmph;
6496
6497 /* Load 64-bit value rd:rn. */
6498 tmpl = load_reg(s, rlow);
6499 tmph = load_reg(s, rhigh);
6500 tmp = tcg_temp_new_i64();
6501 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
6502 tcg_temp_free_i32(tmpl);
6503 tcg_temp_free_i32(tmph);
6504 tcg_gen_add_i64(val, val, tmp);
6505 tcg_temp_free_i64(tmp);
6506 }
6507
6508 /* Set N and Z flags from hi|lo. */
6509 static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
6510 {
6511 tcg_gen_mov_i32(cpu_NF, hi);
6512 tcg_gen_or_i32(cpu_ZF, lo, hi);
6513 }
6514
6515 /* Load/Store exclusive instructions are implemented by remembering
6516 the value/address loaded, and seeing if these are the same
6517 when the store is performed. This should be sufficient to implement
6518 the architecturally mandated semantics, and avoids having to monitor
6519 regular stores.
6520
6521 In system emulation mode only one CPU will be running at once, so
6522 this sequence is effectively atomic. In user emulation mode we
6523 throw an exception and handle the atomic operation elsewhere. */
6524 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
6525 TCGv_i32 addr, int size)
6526 {
6527 TCGv_i32 tmp = tcg_temp_new_i32();
6528
6529 switch (size) {
6530 case 0:
6531 gen_aa32_ld8u(tmp, addr, IS_USER(s));
6532 break;
6533 case 1:
6534 gen_aa32_ld16u(tmp, addr, IS_USER(s));
6535 break;
6536 case 2:
6537 case 3:
6538 gen_aa32_ld32u(tmp, addr, IS_USER(s));
6539 break;
6540 default:
6541 abort();
6542 }
6543 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
6544 store_reg(s, rt, tmp);
6545 if (size == 3) {
6546 TCGv_i32 tmp2 = tcg_temp_new_i32();
6547 tcg_gen_addi_i32(tmp2, addr, 4);
6548 tmp = tcg_temp_new_i32();
6549 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
6550 tcg_temp_free_i32(tmp2);
6551 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
6552 store_reg(s, rt2, tmp);
6553 }
6554 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
6555 }
6556
6557 static void gen_clrex(DisasContext *s)
6558 {
6559 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6560 }
6561
6562 #ifdef CONFIG_USER_ONLY
6563 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6564 TCGv_i32 addr, int size)
6565 {
6566 tcg_gen_mov_i32(cpu_exclusive_test, addr);
6567 tcg_gen_movi_i32(cpu_exclusive_info,
6568 size | (rd << 4) | (rt << 8) | (rt2 << 12));
6569 gen_exception_insn(s, 4, EXCP_STREX);
6570 }
6571 #else
6572 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
6573 TCGv_i32 addr, int size)
6574 {
6575 TCGv_i32 tmp;
6576 int done_label;
6577 int fail_label;
6578
6579 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
6580 [addr] = {Rt};
6581 {Rd} = 0;
6582 } else {
6583 {Rd} = 1;
6584 } */
6585 fail_label = gen_new_label();
6586 done_label = gen_new_label();
6587 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
6588 tmp = tcg_temp_new_i32();
6589 switch (size) {
6590 case 0:
6591 gen_aa32_ld8u(tmp, addr, IS_USER(s));
6592 break;
6593 case 1:
6594 gen_aa32_ld16u(tmp, addr, IS_USER(s));
6595 break;
6596 case 2:
6597 case 3:
6598 gen_aa32_ld32u(tmp, addr, IS_USER(s));
6599 break;
6600 default:
6601 abort();
6602 }
6603 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
6604 tcg_temp_free_i32(tmp);
6605 if (size == 3) {
6606 TCGv_i32 tmp2 = tcg_temp_new_i32();
6607 tcg_gen_addi_i32(tmp2, addr, 4);
6608 tmp = tcg_temp_new_i32();
6609 gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
6610 tcg_temp_free_i32(tmp2);
6611 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6612 tcg_temp_free_i32(tmp);
6613 }
6614 tmp = load_reg(s, rt);
6615 switch (size) {
6616 case 0:
6617 gen_aa32_st8(tmp, addr, IS_USER(s));
6618 break;
6619 case 1:
6620 gen_aa32_st16(tmp, addr, IS_USER(s));
6621 break;
6622 case 2:
6623 case 3:
6624 gen_aa32_st32(tmp, addr, IS_USER(s));
6625 break;
6626 default:
6627 abort();
6628 }
6629 tcg_temp_free_i32(tmp);
6630 if (size == 3) {
6631 tcg_gen_addi_i32(addr, addr, 4);
6632 tmp = load_reg(s, rt2);
6633 gen_aa32_st32(tmp, addr, IS_USER(s));
6634 tcg_temp_free_i32(tmp);
6635 }
6636 tcg_gen_movi_i32(cpu_R[rd], 0);
6637 tcg_gen_br(done_label);
6638 gen_set_label(fail_label);
6639 tcg_gen_movi_i32(cpu_R[rd], 1);
6640 gen_set_label(done_label);
6641 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6642 }
6643 #endif
6644
6645 /* gen_srs:
6646 * @env: CPUARMState
6647 * @s: DisasContext
6648 * @mode: mode field from insn (which stack to store to)
6649 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
6650 * @writeback: true if writeback bit set
6651 *
6652 * Generate code for the SRS (Store Return State) insn.
6653 */
6654 static void gen_srs(DisasContext *s,
6655 uint32_t mode, uint32_t amode, bool writeback)
6656 {
6657 int32_t offset;
6658 TCGv_i32 addr = tcg_temp_new_i32();
6659 TCGv_i32 tmp = tcg_const_i32(mode);
6660 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6661 tcg_temp_free_i32(tmp);
6662 switch (amode) {
6663 case 0: /* DA */
6664 offset = -4;
6665 break;
6666 case 1: /* IA */
6667 offset = 0;
6668 break;
6669 case 2: /* DB */
6670 offset = -8;
6671 break;
6672 case 3: /* IB */
6673 offset = 4;
6674 break;
6675 default:
6676 abort();
6677 }
6678 tcg_gen_addi_i32(addr, addr, offset);
6679 tmp = load_reg(s, 14);
6680 gen_aa32_st32(tmp, addr, 0);
6681 tcg_temp_free_i32(tmp);
6682 tmp = load_cpu_field(spsr);
6683 tcg_gen_addi_i32(addr, addr, 4);
6684 gen_aa32_st32(tmp, addr, 0);
6685 tcg_temp_free_i32(tmp);
6686 if (writeback) {
6687 switch (amode) {
6688 case 0:
6689 offset = -8;
6690 break;
6691 case 1:
6692 offset = 4;
6693 break;
6694 case 2:
6695 offset = -4;
6696 break;
6697 case 3:
6698 offset = 0;
6699 break;
6700 default:
6701 abort();
6702 }
6703 tcg_gen_addi_i32(addr, addr, offset);
6704 tmp = tcg_const_i32(mode);
6705 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6706 tcg_temp_free_i32(tmp);
6707 }
6708 tcg_temp_free_i32(addr);
6709 }
6710
6711 static void disas_arm_insn(CPUARMState * env, DisasContext *s)
6712 {
6713 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6714 TCGv_i32 tmp;
6715 TCGv_i32 tmp2;
6716 TCGv_i32 tmp3;
6717 TCGv_i32 addr;
6718 TCGv_i64 tmp64;
6719
6720 insn = arm_ldl_code(env, s->pc, s->bswap_code);
6721 s->pc += 4;
6722
6723 /* M variants do not implement ARM mode. */
6724 if (IS_M(env))
6725 goto illegal_op;
6726 cond = insn >> 28;
6727 if (cond == 0xf){
6728 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
6729 * choose to UNDEF. In ARMv5 and above the space is used
6730 * for miscellaneous unconditional instructions.
6731 */
6732 ARCH(5);
6733
6734 /* Unconditional instructions. */
6735 if (((insn >> 25) & 7) == 1) {
6736 /* NEON Data processing. */
6737 if (!arm_feature(env, ARM_FEATURE_NEON))
6738 goto illegal_op;
6739
6740 if (disas_neon_data_insn(env, s, insn))
6741 goto illegal_op;
6742 return;
6743 }
6744 if ((insn & 0x0f100000) == 0x04000000) {
6745 /* NEON load/store. */
6746 if (!arm_feature(env, ARM_FEATURE_NEON))
6747 goto illegal_op;
6748
6749 if (disas_neon_ls_insn(env, s, insn))
6750 goto illegal_op;
6751 return;
6752 }
6753 if (((insn & 0x0f30f000) == 0x0510f000) ||
6754 ((insn & 0x0f30f010) == 0x0710f000)) {
6755 if ((insn & (1 << 22)) == 0) {
6756 /* PLDW; v7MP */
6757 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6758 goto illegal_op;
6759 }
6760 }
6761 /* Otherwise PLD; v5TE+ */
6762 ARCH(5TE);
6763 return;
6764 }
6765 if (((insn & 0x0f70f000) == 0x0450f000) ||
6766 ((insn & 0x0f70f010) == 0x0650f000)) {
6767 ARCH(7);
6768 return; /* PLI; V7 */
6769 }
6770 if (((insn & 0x0f700000) == 0x04100000) ||
6771 ((insn & 0x0f700010) == 0x06100000)) {
6772 if (!arm_feature(env, ARM_FEATURE_V7MP)) {
6773 goto illegal_op;
6774 }
6775 return; /* v7MP: Unallocated memory hint: must NOP */
6776 }
6777
6778 if ((insn & 0x0ffffdff) == 0x01010000) {
6779 ARCH(6);
6780 /* setend */
6781 if (((insn >> 9) & 1) != s->bswap_code) {
6782 /* Dynamic endianness switching not implemented. */
6783 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
6784 goto illegal_op;
6785 }
6786 return;
6787 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6788 switch ((insn >> 4) & 0xf) {
6789 case 1: /* clrex */
6790 ARCH(6K);
6791 gen_clrex(s);
6792 return;
6793 case 4: /* dsb */
6794 case 5: /* dmb */
6795 case 6: /* isb */
6796 ARCH(7);
6797 /* We don't emulate caches so these are a no-op. */
6798 return;
6799 default:
6800 goto illegal_op;
6801 }
6802 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6803 /* srs */
6804 if (IS_USER(s)) {
6805 goto illegal_op;
6806 }
6807 ARCH(6);
6808 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
6809 return;
6810 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6811 /* rfe */
6812 int32_t offset;
6813 if (IS_USER(s))
6814 goto illegal_op;
6815 ARCH(6);
6816 rn = (insn >> 16) & 0xf;
6817 addr = load_reg(s, rn);
6818 i = (insn >> 23) & 3;
6819 switch (i) {
6820 case 0: offset = -4; break; /* DA */
6821 case 1: offset = 0; break; /* IA */
6822 case 2: offset = -8; break; /* DB */
6823 case 3: offset = 4; break; /* IB */
6824 default: abort();
6825 }
6826 if (offset)
6827 tcg_gen_addi_i32(addr, addr, offset);
6828 /* Load PC into tmp and CPSR into tmp2. */
6829 tmp = tcg_temp_new_i32();
6830 gen_aa32_ld32u(tmp, addr, 0);
6831 tcg_gen_addi_i32(addr, addr, 4);
6832 tmp2 = tcg_temp_new_i32();
6833 gen_aa32_ld32u(tmp2, addr, 0);
6834 if (insn & (1 << 21)) {
6835 /* Base writeback. */
6836 switch (i) {
6837 case 0: offset = -8; break;
6838 case 1: offset = 4; break;
6839 case 2: offset = -4; break;
6840 case 3: offset = 0; break;
6841 default: abort();
6842 }
6843 if (offset)
6844 tcg_gen_addi_i32(addr, addr, offset);
6845 store_reg(s, rn, addr);
6846 } else {
6847 tcg_temp_free_i32(addr);
6848 }
6849 gen_rfe(s, tmp, tmp2);
6850 return;
6851 } else if ((insn & 0x0e000000) == 0x0a000000) {
6852 /* branch link and change to thumb (blx <offset>) */
6853 int32_t offset;
6854
6855 val = (uint32_t)s->pc;
6856 tmp = tcg_temp_new_i32();
6857 tcg_gen_movi_i32(tmp, val);
6858 store_reg(s, 14, tmp);
6859 /* Sign-extend the 24-bit offset */
6860 offset = (((int32_t)insn) << 8) >> 8;
6861 /* offset * 4 + bit24 * 2 + (thumb bit) */
6862 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6863 /* pipeline offset */
6864 val += 4;
6865 /* protected by ARCH(5); above, near the start of uncond block */
6866 gen_bx_im(s, val);
6867 return;
6868 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6869 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6870 /* iWMMXt register transfer. */
6871 if (env->cp15.c15_cpar & (1 << 1))
6872 if (!disas_iwmmxt_insn(env, s, insn))
6873 return;
6874 }
6875 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6876 /* Coprocessor double register transfer. */
6877 ARCH(5TE);
6878 } else if ((insn & 0x0f000010) == 0x0e000010) {
6879 /* Additional coprocessor register transfer. */
6880 } else if ((insn & 0x0ff10020) == 0x01000000) {
6881 uint32_t mask;
6882 uint32_t val;
6883 /* cps (privileged) */
6884 if (IS_USER(s))
6885 return;
6886 mask = val = 0;
6887 if (insn & (1 << 19)) {
6888 if (insn & (1 << 8))
6889 mask |= CPSR_A;
6890 if (insn & (1 << 7))
6891 mask |= CPSR_I;
6892 if (insn & (1 << 6))
6893 mask |= CPSR_F;
6894 if (insn & (1 << 18))
6895 val |= mask;
6896 }
6897 if (insn & (1 << 17)) {
6898 mask |= CPSR_M;
6899 val |= (insn & 0x1f);
6900 }
6901 if (mask) {
6902 gen_set_psr_im(s, mask, 0, val);
6903 }
6904 return;
6905 }
6906 goto illegal_op;
6907 }
6908 if (cond != 0xe) {
6909 /* if not always execute, we generate a conditional jump to
6910 next instruction */
6911 s->condlabel = gen_new_label();
6912 gen_test_cc(cond ^ 1, s->condlabel);
6913 s->condjmp = 1;
6914 }
6915 if ((insn & 0x0f900000) == 0x03000000) {
6916 if ((insn & (1 << 21)) == 0) {
6917 ARCH(6T2);
6918 rd = (insn >> 12) & 0xf;
6919 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6920 if ((insn & (1 << 22)) == 0) {
6921 /* MOVW */
6922 tmp = tcg_temp_new_i32();
6923 tcg_gen_movi_i32(tmp, val);
6924 } else {
6925 /* MOVT */
6926 tmp = load_reg(s, rd);
6927 tcg_gen_ext16u_i32(tmp, tmp);
6928 tcg_gen_ori_i32(tmp, tmp, val << 16);
6929 }
6930 store_reg(s, rd, tmp);
6931 } else {
6932 if (((insn >> 12) & 0xf) != 0xf)
6933 goto illegal_op;
6934 if (((insn >> 16) & 0xf) == 0) {
6935 gen_nop_hint(s, insn & 0xff);
6936 } else {
6937 /* CPSR = immediate */
6938 val = insn & 0xff;
6939 shift = ((insn >> 8) & 0xf) * 2;
6940 if (shift)
6941 val = (val >> shift) | (val << (32 - shift));
6942 i = ((insn & (1 << 22)) != 0);
6943 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6944 goto illegal_op;
6945 }
6946 }
6947 } else if ((insn & 0x0f900000) == 0x01000000
6948 && (insn & 0x00000090) != 0x00000090) {
6949 /* miscellaneous instructions */
6950 op1 = (insn >> 21) & 3;
6951 sh = (insn >> 4) & 0xf;
6952 rm = insn & 0xf;
6953 switch (sh) {
6954 case 0x0: /* move program status register */
6955 if (op1 & 1) {
6956 /* PSR = reg */
6957 tmp = load_reg(s, rm);
6958 i = ((op1 & 2) != 0);
6959 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6960 goto illegal_op;
6961 } else {
6962 /* reg = PSR */
6963 rd = (insn >> 12) & 0xf;
6964 if (op1 & 2) {
6965 if (IS_USER(s))
6966 goto illegal_op;
6967 tmp = load_cpu_field(spsr);
6968 } else {
6969 tmp = tcg_temp_new_i32();
6970 gen_helper_cpsr_read(tmp, cpu_env);
6971 }
6972 store_reg(s, rd, tmp);
6973 }
6974 break;
6975 case 0x1:
6976 if (op1 == 1) {
6977 /* branch/exchange thumb (bx). */
6978 ARCH(4T);
6979 tmp = load_reg(s, rm);
6980 gen_bx(s, tmp);
6981 } else if (op1 == 3) {
6982 /* clz */
6983 ARCH(5);
6984 rd = (insn >> 12) & 0xf;
6985 tmp = load_reg(s, rm);
6986 gen_helper_clz(tmp, tmp);
6987 store_reg(s, rd, tmp);
6988 } else {
6989 goto illegal_op;
6990 }
6991 break;
6992 case 0x2:
6993 if (op1 == 1) {
6994 ARCH(5J); /* bxj */
6995 /* Trivial implementation equivalent to bx. */
6996 tmp = load_reg(s, rm);
6997 gen_bx(s, tmp);
6998 } else {
6999 goto illegal_op;
7000 }
7001 break;
7002 case 0x3:
7003 if (op1 != 1)
7004 goto illegal_op;
7005
7006 ARCH(5);
7007 /* branch link/exchange thumb (blx) */
7008 tmp = load_reg(s, rm);
7009 tmp2 = tcg_temp_new_i32();
7010 tcg_gen_movi_i32(tmp2, s->pc);
7011 store_reg(s, 14, tmp2);
7012 gen_bx(s, tmp);
7013 break;
7014 case 0x5: /* saturating add/subtract */
7015 ARCH(5TE);
7016 rd = (insn >> 12) & 0xf;
7017 rn = (insn >> 16) & 0xf;
7018 tmp = load_reg(s, rm);
7019 tmp2 = load_reg(s, rn);
7020 if (op1 & 2)
7021 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
7022 if (op1 & 1)
7023 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
7024 else
7025 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7026 tcg_temp_free_i32(tmp2);
7027 store_reg(s, rd, tmp);
7028 break;
7029 case 7:
7030 /* SMC instruction (op1 == 3)
7031 and undefined instructions (op1 == 0 || op1 == 2)
7032 will trap */
7033 if (op1 != 1) {
7034 goto illegal_op;
7035 }
7036 /* bkpt */
7037 ARCH(5);
7038 gen_exception_insn(s, 4, EXCP_BKPT);
7039 break;
7040 case 0x8: /* signed multiply */
7041 case 0xa:
7042 case 0xc:
7043 case 0xe:
7044 ARCH(5TE);
7045 rs = (insn >> 8) & 0xf;
7046 rn = (insn >> 12) & 0xf;
7047 rd = (insn >> 16) & 0xf;
7048 if (op1 == 1) {
7049 /* (32 * 16) >> 16 */
7050 tmp = load_reg(s, rm);
7051 tmp2 = load_reg(s, rs);
7052 if (sh & 4)
7053 tcg_gen_sari_i32(tmp2, tmp2, 16);
7054 else
7055 gen_sxth(tmp2);
7056 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7057 tcg_gen_shri_i64(tmp64, tmp64, 16);
7058 tmp = tcg_temp_new_i32();
7059 tcg_gen_trunc_i64_i32(tmp, tmp64);
7060 tcg_temp_free_i64(tmp64);
7061 if ((sh & 2) == 0) {
7062 tmp2 = load_reg(s, rn);
7063 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7064 tcg_temp_free_i32(tmp2);
7065 }
7066 store_reg(s, rd, tmp);
7067 } else {
7068 /* 16 * 16 */
7069 tmp = load_reg(s, rm);
7070 tmp2 = load_reg(s, rs);
7071 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7072 tcg_temp_free_i32(tmp2);
7073 if (op1 == 2) {
7074 tmp64 = tcg_temp_new_i64();
7075 tcg_gen_ext_i32_i64(tmp64, tmp);
7076 tcg_temp_free_i32(tmp);
7077 gen_addq(s, tmp64, rn, rd);
7078 gen_storeq_reg(s, rn, rd, tmp64);
7079 tcg_temp_free_i64(tmp64);
7080 } else {
7081 if (op1 == 0) {
7082 tmp2 = load_reg(s, rn);
7083 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7084 tcg_temp_free_i32(tmp2);
7085 }
7086 store_reg(s, rd, tmp);
7087 }
7088 }
7089 break;
7090 default:
7091 goto illegal_op;
7092 }
7093 } else if (((insn & 0x0e000000) == 0 &&
7094 (insn & 0x00000090) != 0x90) ||
7095 ((insn & 0x0e000000) == (1 << 25))) {
7096 int set_cc, logic_cc, shiftop;
7097
7098 op1 = (insn >> 21) & 0xf;
7099 set_cc = (insn >> 20) & 1;
7100 logic_cc = table_logic_cc[op1] & set_cc;
7101
7102 /* data processing instruction */
7103 if (insn & (1 << 25)) {
7104 /* immediate operand */
7105 val = insn & 0xff;
7106 shift = ((insn >> 8) & 0xf) * 2;
7107 if (shift) {
7108 val = (val >> shift) | (val << (32 - shift));
7109 }
7110 tmp2 = tcg_temp_new_i32();
7111 tcg_gen_movi_i32(tmp2, val);
7112 if (logic_cc && shift) {
7113 gen_set_CF_bit31(tmp2);
7114 }
7115 } else {
7116 /* register */
7117 rm = (insn) & 0xf;
7118 tmp2 = load_reg(s, rm);
7119 shiftop = (insn >> 5) & 3;
7120 if (!(insn & (1 << 4))) {
7121 shift = (insn >> 7) & 0x1f;
7122 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7123 } else {
7124 rs = (insn >> 8) & 0xf;
7125 tmp = load_reg(s, rs);
7126 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
7127 }
7128 }
7129 if (op1 != 0x0f && op1 != 0x0d) {
7130 rn = (insn >> 16) & 0xf;
7131 tmp = load_reg(s, rn);
7132 } else {
7133 TCGV_UNUSED_I32(tmp);
7134 }
7135 rd = (insn >> 12) & 0xf;
7136 switch(op1) {
7137 case 0x00:
7138 tcg_gen_and_i32(tmp, tmp, tmp2);
7139 if (logic_cc) {
7140 gen_logic_CC(tmp);
7141 }
7142 store_reg_bx(env, s, rd, tmp);
7143 break;
7144 case 0x01:
7145 tcg_gen_xor_i32(tmp, tmp, tmp2);
7146 if (logic_cc) {
7147 gen_logic_CC(tmp);
7148 }
7149 store_reg_bx(env, s, rd, tmp);
7150 break;
7151 case 0x02:
7152 if (set_cc && rd == 15) {
7153 /* SUBS r15, ... is used for exception return. */
7154 if (IS_USER(s)) {
7155 goto illegal_op;
7156 }
7157 gen_sub_CC(tmp, tmp, tmp2);
7158 gen_exception_return(s, tmp);
7159 } else {
7160 if (set_cc) {
7161 gen_sub_CC(tmp, tmp, tmp2);
7162 } else {
7163 tcg_gen_sub_i32(tmp, tmp, tmp2);
7164 }
7165 store_reg_bx(env, s, rd, tmp);
7166 }
7167 break;
7168 case 0x03:
7169 if (set_cc) {
7170 gen_sub_CC(tmp, tmp2, tmp);
7171 } else {
7172 tcg_gen_sub_i32(tmp, tmp2, tmp);
7173 }
7174 store_reg_bx(env, s, rd, tmp);
7175 break;
7176 case 0x04:
7177 if (set_cc) {
7178 gen_add_CC(tmp, tmp, tmp2);
7179 } else {
7180 tcg_gen_add_i32(tmp, tmp, tmp2);
7181 }
7182 store_reg_bx(env, s, rd, tmp);
7183 break;
7184 case 0x05:
7185 if (set_cc) {
7186 gen_adc_CC(tmp, tmp, tmp2);
7187 } else {
7188 gen_add_carry(tmp, tmp, tmp2);
7189 }
7190 store_reg_bx(env, s, rd, tmp);
7191 break;
7192 case 0x06:
7193 if (set_cc) {
7194 gen_sbc_CC(tmp, tmp, tmp2);
7195 } else {
7196 gen_sub_carry(tmp, tmp, tmp2);
7197 }
7198 store_reg_bx(env, s, rd, tmp);
7199 break;
7200 case 0x07:
7201 if (set_cc) {
7202 gen_sbc_CC(tmp, tmp2, tmp);
7203 } else {
7204 gen_sub_carry(tmp, tmp2, tmp);
7205 }
7206 store_reg_bx(env, s, rd, tmp);
7207 break;
7208 case 0x08:
7209 if (set_cc) {
7210 tcg_gen_and_i32(tmp, tmp, tmp2);
7211 gen_logic_CC(tmp);
7212 }
7213 tcg_temp_free_i32(tmp);
7214 break;
7215 case 0x09:
7216 if (set_cc) {
7217 tcg_gen_xor_i32(tmp, tmp, tmp2);
7218 gen_logic_CC(tmp);
7219 }
7220 tcg_temp_free_i32(tmp);
7221 break;
7222 case 0x0a:
7223 if (set_cc) {
7224 gen_sub_CC(tmp, tmp, tmp2);
7225 }
7226 tcg_temp_free_i32(tmp);
7227 break;
7228 case 0x0b:
7229 if (set_cc) {
7230 gen_add_CC(tmp, tmp, tmp2);
7231 }
7232 tcg_temp_free_i32(tmp);
7233 break;
7234 case 0x0c:
7235 tcg_gen_or_i32(tmp, tmp, tmp2);
7236 if (logic_cc) {
7237 gen_logic_CC(tmp);
7238 }
7239 store_reg_bx(env, s, rd, tmp);
7240 break;
7241 case 0x0d:
7242 if (logic_cc && rd == 15) {
7243 /* MOVS r15, ... is used for exception return. */
7244 if (IS_USER(s)) {
7245 goto illegal_op;
7246 }
7247 gen_exception_return(s, tmp2);
7248 } else {
7249 if (logic_cc) {
7250 gen_logic_CC(tmp2);
7251 }
7252 store_reg_bx(env, s, rd, tmp2);
7253 }
7254 break;
7255 case 0x0e:
7256 tcg_gen_andc_i32(tmp, tmp, tmp2);
7257 if (logic_cc) {
7258 gen_logic_CC(tmp);
7259 }
7260 store_reg_bx(env, s, rd, tmp);
7261 break;
7262 default:
7263 case 0x0f:
7264 tcg_gen_not_i32(tmp2, tmp2);
7265 if (logic_cc) {
7266 gen_logic_CC(tmp2);
7267 }
7268 store_reg_bx(env, s, rd, tmp2);
7269 break;
7270 }
7271 if (op1 != 0x0f && op1 != 0x0d) {
7272 tcg_temp_free_i32(tmp2);
7273 }
7274 } else {
7275 /* other instructions */
7276 op1 = (insn >> 24) & 0xf;
7277 switch(op1) {
7278 case 0x0:
7279 case 0x1:
7280 /* multiplies, extra load/stores */
7281 sh = (insn >> 5) & 3;
7282 if (sh == 0) {
7283 if (op1 == 0x0) {
7284 rd = (insn >> 16) & 0xf;
7285 rn = (insn >> 12) & 0xf;
7286 rs = (insn >> 8) & 0xf;
7287 rm = (insn) & 0xf;
7288 op1 = (insn >> 20) & 0xf;
7289 switch (op1) {
7290 case 0: case 1: case 2: case 3: case 6:
7291 /* 32 bit mul */
7292 tmp = load_reg(s, rs);
7293 tmp2 = load_reg(s, rm);
7294 tcg_gen_mul_i32(tmp, tmp, tmp2);
7295 tcg_temp_free_i32(tmp2);
7296 if (insn & (1 << 22)) {
7297 /* Subtract (mls) */
7298 ARCH(6T2);
7299 tmp2 = load_reg(s, rn);
7300 tcg_gen_sub_i32(tmp, tmp2, tmp);
7301 tcg_temp_free_i32(tmp2);
7302 } else if (insn & (1 << 21)) {
7303 /* Add */
7304 tmp2 = load_reg(s, rn);
7305 tcg_gen_add_i32(tmp, tmp, tmp2);
7306 tcg_temp_free_i32(tmp2);
7307 }
7308 if (insn & (1 << 20))
7309 gen_logic_CC(tmp);
7310 store_reg(s, rd, tmp);
7311 break;
7312 case 4:
7313 /* 64 bit mul double accumulate (UMAAL) */
7314 ARCH(6);
7315 tmp = load_reg(s, rs);
7316 tmp2 = load_reg(s, rm);
7317 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7318 gen_addq_lo(s, tmp64, rn);
7319 gen_addq_lo(s, tmp64, rd);
7320 gen_storeq_reg(s, rn, rd, tmp64);
7321 tcg_temp_free_i64(tmp64);
7322 break;
7323 case 8: case 9: case 10: case 11:
7324 case 12: case 13: case 14: case 15:
7325 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
7326 tmp = load_reg(s, rs);
7327 tmp2 = load_reg(s, rm);
7328 if (insn & (1 << 22)) {
7329 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
7330 } else {
7331 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
7332 }
7333 if (insn & (1 << 21)) { /* mult accumulate */
7334 TCGv_i32 al = load_reg(s, rn);
7335 TCGv_i32 ah = load_reg(s, rd);
7336 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
7337 tcg_temp_free_i32(al);
7338 tcg_temp_free_i32(ah);
7339 }
7340 if (insn & (1 << 20)) {
7341 gen_logicq_cc(tmp, tmp2);
7342 }
7343 store_reg(s, rn, tmp);
7344 store_reg(s, rd, tmp2);
7345 break;
7346 default:
7347 goto illegal_op;
7348 }
7349 } else {
7350 rn = (insn >> 16) & 0xf;
7351 rd = (insn >> 12) & 0xf;
7352 if (insn & (1 << 23)) {
7353 /* load/store exclusive */
7354 int op2 = (insn >> 8) & 3;
7355 op1 = (insn >> 21) & 0x3;
7356
7357 switch (op2) {
7358 case 0: /* lda/stl */
7359 if (op1 == 1) {
7360 goto illegal_op;
7361 }
7362 ARCH(8);
7363 break;
7364 case 1: /* reserved */
7365 goto illegal_op;
7366 case 2: /* ldaex/stlex */
7367 ARCH(8);
7368 break;
7369 case 3: /* ldrex/strex */
7370 if (op1) {
7371 ARCH(6K);
7372 } else {
7373 ARCH(6);
7374 }
7375 break;
7376 }
7377
7378 addr = tcg_temp_local_new_i32();
7379 load_reg_var(s, addr, rn);
7380
7381 /* Since the emulation does not have barriers,
7382 the acquire/release semantics need no special
7383 handling */
7384 if (op2 == 0) {
7385 if (insn & (1 << 20)) {
7386 tmp = tcg_temp_new_i32();
7387 switch (op1) {
7388 case 0: /* lda */
7389 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7390 break;
7391 case 2: /* ldab */
7392 gen_aa32_ld8u(tmp, addr, IS_USER(s));
7393 break;
7394 case 3: /* ldah */
7395 gen_aa32_ld16u(tmp, addr, IS_USER(s));
7396 break;
7397 default:
7398 abort();
7399 }
7400 store_reg(s, rd, tmp);
7401 } else {
7402 rm = insn & 0xf;
7403 tmp = load_reg(s, rm);
7404 switch (op1) {
7405 case 0: /* stl */
7406 gen_aa32_st32(tmp, addr, IS_USER(s));
7407 break;
7408 case 2: /* stlb */
7409 gen_aa32_st8(tmp, addr, IS_USER(s));
7410 break;
7411 case 3: /* stlh */
7412 gen_aa32_st16(tmp, addr, IS_USER(s));
7413 break;
7414 default:
7415 abort();
7416 }
7417 tcg_temp_free_i32(tmp);
7418 }
7419 } else if (insn & (1 << 20)) {
7420 switch (op1) {
7421 case 0: /* ldrex */
7422 gen_load_exclusive(s, rd, 15, addr, 2);
7423 break;
7424 case 1: /* ldrexd */
7425 gen_load_exclusive(s, rd, rd + 1, addr, 3);
7426 break;
7427 case 2: /* ldrexb */
7428 gen_load_exclusive(s, rd, 15, addr, 0);
7429 break;
7430 case 3: /* ldrexh */
7431 gen_load_exclusive(s, rd, 15, addr, 1);
7432 break;
7433 default:
7434 abort();
7435 }
7436 } else {
7437 rm = insn & 0xf;
7438 switch (op1) {
7439 case 0: /* strex */
7440 gen_store_exclusive(s, rd, rm, 15, addr, 2);
7441 break;
7442 case 1: /* strexd */
7443 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
7444 break;
7445 case 2: /* strexb */
7446 gen_store_exclusive(s, rd, rm, 15, addr, 0);
7447 break;
7448 case 3: /* strexh */
7449 gen_store_exclusive(s, rd, rm, 15, addr, 1);
7450 break;
7451 default:
7452 abort();
7453 }
7454 }
7455 tcg_temp_free_i32(addr);
7456 } else {
7457 /* SWP instruction */
7458 rm = (insn) & 0xf;
7459
7460 /* ??? This is not really atomic. However we know
7461 we never have multiple CPUs running in parallel,
7462 so it is good enough. */
7463 addr = load_reg(s, rn);
7464 tmp = load_reg(s, rm);
7465 tmp2 = tcg_temp_new_i32();
7466 if (insn & (1 << 22)) {
7467 gen_aa32_ld8u(tmp2, addr, IS_USER(s));
7468 gen_aa32_st8(tmp, addr, IS_USER(s));
7469 } else {
7470 gen_aa32_ld32u(tmp2, addr, IS_USER(s));
7471 gen_aa32_st32(tmp, addr, IS_USER(s));
7472 }
7473 tcg_temp_free_i32(tmp);
7474 tcg_temp_free_i32(addr);
7475 store_reg(s, rd, tmp2);
7476 }
7477 }
7478 } else {
7479 int address_offset;
7480 int load;
7481 /* Misc load/store */
7482 rn = (insn >> 16) & 0xf;
7483 rd = (insn >> 12) & 0xf;
7484 addr = load_reg(s, rn);
7485 if (insn & (1 << 24))
7486 gen_add_datah_offset(s, insn, 0, addr);
7487 address_offset = 0;
7488 if (insn & (1 << 20)) {
7489 /* load */
7490 tmp = tcg_temp_new_i32();
7491 switch(sh) {
7492 case 1:
7493 gen_aa32_ld16u(tmp, addr, IS_USER(s));
7494 break;
7495 case 2:
7496 gen_aa32_ld8s(tmp, addr, IS_USER(s));
7497 break;
7498 default:
7499 case 3:
7500 gen_aa32_ld16s(tmp, addr, IS_USER(s));
7501 break;
7502 }
7503 load = 1;
7504 } else if (sh & 2) {
7505 ARCH(5TE);
7506 /* doubleword */
7507 if (sh & 1) {
7508 /* store */
7509 tmp = load_reg(s, rd);
7510 gen_aa32_st32(tmp, addr, IS_USER(s));
7511 tcg_temp_free_i32(tmp);
7512 tcg_gen_addi_i32(addr, addr, 4);
7513 tmp = load_reg(s, rd + 1);
7514 gen_aa32_st32(tmp, addr, IS_USER(s));
7515 tcg_temp_free_i32(tmp);
7516 load = 0;
7517 } else {
7518 /* load */
7519 tmp = tcg_temp_new_i32();
7520 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7521 store_reg(s, rd, tmp);
7522 tcg_gen_addi_i32(addr, addr, 4);
7523 tmp = tcg_temp_new_i32();
7524 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7525 rd++;
7526 load = 1;
7527 }
7528 address_offset = -4;
7529 } else {
7530 /* store */
7531 tmp = load_reg(s, rd);
7532 gen_aa32_st16(tmp, addr, IS_USER(s));
7533 tcg_temp_free_i32(tmp);
7534 load = 0;
7535 }
7536 /* Perform base writeback before the loaded value to
7537 ensure correct behavior with overlapping index registers.
7538 ldrd with base writeback is is undefined if the
7539 destination and index registers overlap. */
7540 if (!(insn & (1 << 24))) {
7541 gen_add_datah_offset(s, insn, address_offset, addr);
7542 store_reg(s, rn, addr);
7543 } else if (insn & (1 << 21)) {
7544 if (address_offset)
7545 tcg_gen_addi_i32(addr, addr, address_offset);
7546 store_reg(s, rn, addr);
7547 } else {
7548 tcg_temp_free_i32(addr);
7549 }
7550 if (load) {
7551 /* Complete the load. */
7552 store_reg(s, rd, tmp);
7553 }
7554 }
7555 break;
7556 case 0x4:
7557 case 0x5:
7558 goto do_ldst;
7559 case 0x6:
7560 case 0x7:
7561 if (insn & (1 << 4)) {
7562 ARCH(6);
7563 /* Armv6 Media instructions. */
7564 rm = insn & 0xf;
7565 rn = (insn >> 16) & 0xf;
7566 rd = (insn >> 12) & 0xf;
7567 rs = (insn >> 8) & 0xf;
7568 switch ((insn >> 23) & 3) {
7569 case 0: /* Parallel add/subtract. */
7570 op1 = (insn >> 20) & 7;
7571 tmp = load_reg(s, rn);
7572 tmp2 = load_reg(s, rm);
7573 sh = (insn >> 5) & 7;
7574 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
7575 goto illegal_op;
7576 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7577 tcg_temp_free_i32(tmp2);
7578 store_reg(s, rd, tmp);
7579 break;
7580 case 1:
7581 if ((insn & 0x00700020) == 0) {
7582 /* Halfword pack. */
7583 tmp = load_reg(s, rn);
7584 tmp2 = load_reg(s, rm);
7585 shift = (insn >> 7) & 0x1f;
7586 if (insn & (1 << 6)) {
7587 /* pkhtb */
7588 if (shift == 0)
7589 shift = 31;
7590 tcg_gen_sari_i32(tmp2, tmp2, shift);
7591 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7592 tcg_gen_ext16u_i32(tmp2, tmp2);
7593 } else {
7594 /* pkhbt */
7595 if (shift)
7596 tcg_gen_shli_i32(tmp2, tmp2, shift);
7597 tcg_gen_ext16u_i32(tmp, tmp);
7598 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7599 }
7600 tcg_gen_or_i32(tmp, tmp, tmp2);
7601 tcg_temp_free_i32(tmp2);
7602 store_reg(s, rd, tmp);
7603 } else if ((insn & 0x00200020) == 0x00200000) {
7604 /* [us]sat */
7605 tmp = load_reg(s, rm);
7606 shift = (insn >> 7) & 0x1f;
7607 if (insn & (1 << 6)) {
7608 if (shift == 0)
7609 shift = 31;
7610 tcg_gen_sari_i32(tmp, tmp, shift);
7611 } else {
7612 tcg_gen_shli_i32(tmp, tmp, shift);
7613 }
7614 sh = (insn >> 16) & 0x1f;
7615 tmp2 = tcg_const_i32(sh);
7616 if (insn & (1 << 22))
7617 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
7618 else
7619 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
7620 tcg_temp_free_i32(tmp2);
7621 store_reg(s, rd, tmp);
7622 } else if ((insn & 0x00300fe0) == 0x00200f20) {
7623 /* [us]sat16 */
7624 tmp = load_reg(s, rm);
7625 sh = (insn >> 16) & 0x1f;
7626 tmp2 = tcg_const_i32(sh);
7627 if (insn & (1 << 22))
7628 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
7629 else
7630 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
7631 tcg_temp_free_i32(tmp2);
7632 store_reg(s, rd, tmp);
7633 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
7634 /* Select bytes. */
7635 tmp = load_reg(s, rn);
7636 tmp2 = load_reg(s, rm);
7637 tmp3 = tcg_temp_new_i32();
7638 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
7639 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7640 tcg_temp_free_i32(tmp3);
7641 tcg_temp_free_i32(tmp2);
7642 store_reg(s, rd, tmp);
7643 } else if ((insn & 0x000003e0) == 0x00000060) {
7644 tmp = load_reg(s, rm);
7645 shift = (insn >> 10) & 3;
7646 /* ??? In many cases it's not necessary to do a
7647 rotate, a shift is sufficient. */
7648 if (shift != 0)
7649 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7650 op1 = (insn >> 20) & 7;
7651 switch (op1) {
7652 case 0: gen_sxtb16(tmp); break;
7653 case 2: gen_sxtb(tmp); break;
7654 case 3: gen_sxth(tmp); break;
7655 case 4: gen_uxtb16(tmp); break;
7656 case 6: gen_uxtb(tmp); break;
7657 case 7: gen_uxth(tmp); break;
7658 default: goto illegal_op;
7659 }
7660 if (rn != 15) {
7661 tmp2 = load_reg(s, rn);
7662 if ((op1 & 3) == 0) {
7663 gen_add16(tmp, tmp2);
7664 } else {
7665 tcg_gen_add_i32(tmp, tmp, tmp2);
7666 tcg_temp_free_i32(tmp2);
7667 }
7668 }
7669 store_reg(s, rd, tmp);
7670 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
7671 /* rev */
7672 tmp = load_reg(s, rm);
7673 if (insn & (1 << 22)) {
7674 if (insn & (1 << 7)) {
7675 gen_revsh(tmp);
7676 } else {
7677 ARCH(6T2);
7678 gen_helper_rbit(tmp, tmp);
7679 }
7680 } else {
7681 if (insn & (1 << 7))
7682 gen_rev16(tmp);
7683 else
7684 tcg_gen_bswap32_i32(tmp, tmp);
7685 }
7686 store_reg(s, rd, tmp);
7687 } else {
7688 goto illegal_op;
7689 }
7690 break;
7691 case 2: /* Multiplies (Type 3). */
7692 switch ((insn >> 20) & 0x7) {
7693 case 5:
7694 if (((insn >> 6) ^ (insn >> 7)) & 1) {
7695 /* op2 not 00x or 11x : UNDEF */
7696 goto illegal_op;
7697 }
7698 /* Signed multiply most significant [accumulate].
7699 (SMMUL, SMMLA, SMMLS) */
7700 tmp = load_reg(s, rm);
7701 tmp2 = load_reg(s, rs);
7702 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7703
7704 if (rd != 15) {
7705 tmp = load_reg(s, rd);
7706 if (insn & (1 << 6)) {
7707 tmp64 = gen_subq_msw(tmp64, tmp);
7708 } else {
7709 tmp64 = gen_addq_msw(tmp64, tmp);
7710 }
7711 }
7712 if (insn & (1 << 5)) {
7713 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7714 }
7715 tcg_gen_shri_i64(tmp64, tmp64, 32);
7716 tmp = tcg_temp_new_i32();
7717 tcg_gen_trunc_i64_i32(tmp, tmp64);
7718 tcg_temp_free_i64(tmp64);
7719 store_reg(s, rn, tmp);
7720 break;
7721 case 0:
7722 case 4:
7723 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
7724 if (insn & (1 << 7)) {
7725 goto illegal_op;
7726 }
7727 tmp = load_reg(s, rm);
7728 tmp2 = load_reg(s, rs);
7729 if (insn & (1 << 5))
7730 gen_swap_half(tmp2);
7731 gen_smul_dual(tmp, tmp2);
7732 if (insn & (1 << 6)) {
7733 /* This subtraction cannot overflow. */
7734 tcg_gen_sub_i32(tmp, tmp, tmp2);
7735 } else {
7736 /* This addition cannot overflow 32 bits;
7737 * however it may overflow considered as a signed
7738 * operation, in which case we must set the Q flag.
7739 */
7740 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7741 }
7742 tcg_temp_free_i32(tmp2);
7743 if (insn & (1 << 22)) {
7744 /* smlald, smlsld */
7745 tmp64 = tcg_temp_new_i64();
7746 tcg_gen_ext_i32_i64(tmp64, tmp);
7747 tcg_temp_free_i32(tmp);
7748 gen_addq(s, tmp64, rd, rn);
7749 gen_storeq_reg(s, rd, rn, tmp64);
7750 tcg_temp_free_i64(tmp64);
7751 } else {
7752 /* smuad, smusd, smlad, smlsd */
7753 if (rd != 15)
7754 {
7755 tmp2 = load_reg(s, rd);
7756 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7757 tcg_temp_free_i32(tmp2);
7758 }
7759 store_reg(s, rn, tmp);
7760 }
7761 break;
7762 case 1:
7763 case 3:
7764 /* SDIV, UDIV */
7765 if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
7766 goto illegal_op;
7767 }
7768 if (((insn >> 5) & 7) || (rd != 15)) {
7769 goto illegal_op;
7770 }
7771 tmp = load_reg(s, rm);
7772 tmp2 = load_reg(s, rs);
7773 if (insn & (1 << 21)) {
7774 gen_helper_udiv(tmp, tmp, tmp2);
7775 } else {
7776 gen_helper_sdiv(tmp, tmp, tmp2);
7777 }
7778 tcg_temp_free_i32(tmp2);
7779 store_reg(s, rn, tmp);
7780 break;
7781 default:
7782 goto illegal_op;
7783 }
7784 break;
7785 case 3:
7786 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7787 switch (op1) {
7788 case 0: /* Unsigned sum of absolute differences. */
7789 ARCH(6);
7790 tmp = load_reg(s, rm);
7791 tmp2 = load_reg(s, rs);
7792 gen_helper_usad8(tmp, tmp, tmp2);
7793 tcg_temp_free_i32(tmp2);
7794 if (rd != 15) {
7795 tmp2 = load_reg(s, rd);
7796 tcg_gen_add_i32(tmp, tmp, tmp2);
7797 tcg_temp_free_i32(tmp2);
7798 }
7799 store_reg(s, rn, tmp);
7800 break;
7801 case 0x20: case 0x24: case 0x28: case 0x2c:
7802 /* Bitfield insert/clear. */
7803 ARCH(6T2);
7804 shift = (insn >> 7) & 0x1f;
7805 i = (insn >> 16) & 0x1f;
7806 i = i + 1 - shift;
7807 if (rm == 15) {
7808 tmp = tcg_temp_new_i32();
7809 tcg_gen_movi_i32(tmp, 0);
7810 } else {
7811 tmp = load_reg(s, rm);
7812 }
7813 if (i != 32) {
7814 tmp2 = load_reg(s, rd);
7815 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7816 tcg_temp_free_i32(tmp2);
7817 }
7818 store_reg(s, rd, tmp);
7819 break;
7820 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7821 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7822 ARCH(6T2);
7823 tmp = load_reg(s, rm);
7824 shift = (insn >> 7) & 0x1f;
7825 i = ((insn >> 16) & 0x1f) + 1;
7826 if (shift + i > 32)
7827 goto illegal_op;
7828 if (i < 32) {
7829 if (op1 & 0x20) {
7830 gen_ubfx(tmp, shift, (1u << i) - 1);
7831 } else {
7832 gen_sbfx(tmp, shift, i);
7833 }
7834 }
7835 store_reg(s, rd, tmp);
7836 break;
7837 default:
7838 goto illegal_op;
7839 }
7840 break;
7841 }
7842 break;
7843 }
7844 do_ldst:
7845 /* Check for undefined extension instructions
7846 * per the ARM Bible IE:
7847 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7848 */
7849 sh = (0xf << 20) | (0xf << 4);
7850 if (op1 == 0x7 && ((insn & sh) == sh))
7851 {
7852 goto illegal_op;
7853 }
7854 /* load/store byte/word */
7855 rn = (insn >> 16) & 0xf;
7856 rd = (insn >> 12) & 0xf;
7857 tmp2 = load_reg(s, rn);
7858 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7859 if (insn & (1 << 24))
7860 gen_add_data_offset(s, insn, tmp2);
7861 if (insn & (1 << 20)) {
7862 /* load */
7863 tmp = tcg_temp_new_i32();
7864 if (insn & (1 << 22)) {
7865 gen_aa32_ld8u(tmp, tmp2, i);
7866 } else {
7867 gen_aa32_ld32u(tmp, tmp2, i);
7868 }
7869 } else {
7870 /* store */
7871 tmp = load_reg(s, rd);
7872 if (insn & (1 << 22)) {
7873 gen_aa32_st8(tmp, tmp2, i);
7874 } else {
7875 gen_aa32_st32(tmp, tmp2, i);
7876 }
7877 tcg_temp_free_i32(tmp);
7878 }
7879 if (!(insn & (1 << 24))) {
7880 gen_add_data_offset(s, insn, tmp2);
7881 store_reg(s, rn, tmp2);
7882 } else if (insn & (1 << 21)) {
7883 store_reg(s, rn, tmp2);
7884 } else {
7885 tcg_temp_free_i32(tmp2);
7886 }
7887 if (insn & (1 << 20)) {
7888 /* Complete the load. */
7889 store_reg_from_load(env, s, rd, tmp);
7890 }
7891 break;
7892 case 0x08:
7893 case 0x09:
7894 {
7895 int j, n, user, loaded_base;
7896 TCGv_i32 loaded_var;
7897 /* load/store multiple words */
7898 /* XXX: store correct base if write back */
7899 user = 0;
7900 if (insn & (1 << 22)) {
7901 if (IS_USER(s))
7902 goto illegal_op; /* only usable in supervisor mode */
7903
7904 if ((insn & (1 << 15)) == 0)
7905 user = 1;
7906 }
7907 rn = (insn >> 16) & 0xf;
7908 addr = load_reg(s, rn);
7909
7910 /* compute total size */
7911 loaded_base = 0;
7912 TCGV_UNUSED_I32(loaded_var);
7913 n = 0;
7914 for(i=0;i<16;i++) {
7915 if (insn & (1 << i))
7916 n++;
7917 }
7918 /* XXX: test invalid n == 0 case ? */
7919 if (insn & (1 << 23)) {
7920 if (insn & (1 << 24)) {
7921 /* pre increment */
7922 tcg_gen_addi_i32(addr, addr, 4);
7923 } else {
7924 /* post increment */
7925 }
7926 } else {
7927 if (insn & (1 << 24)) {
7928 /* pre decrement */
7929 tcg_gen_addi_i32(addr, addr, -(n * 4));
7930 } else {
7931 /* post decrement */
7932 if (n != 1)
7933 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7934 }
7935 }
7936 j = 0;
7937 for(i=0;i<16;i++) {
7938 if (insn & (1 << i)) {
7939 if (insn & (1 << 20)) {
7940 /* load */
7941 tmp = tcg_temp_new_i32();
7942 gen_aa32_ld32u(tmp, addr, IS_USER(s));
7943 if (user) {
7944 tmp2 = tcg_const_i32(i);
7945 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
7946 tcg_temp_free_i32(tmp2);
7947 tcg_temp_free_i32(tmp);
7948 } else if (i == rn) {
7949 loaded_var = tmp;
7950 loaded_base = 1;
7951 } else {
7952 store_reg_from_load(env, s, i, tmp);
7953 }
7954 } else {
7955 /* store */
7956 if (i == 15) {
7957 /* special case: r15 = PC + 8 */
7958 val = (long)s->pc + 4;
7959 tmp = tcg_temp_new_i32();
7960 tcg_gen_movi_i32(tmp, val);
7961 } else if (user) {
7962 tmp = tcg_temp_new_i32();
7963 tmp2 = tcg_const_i32(i);
7964 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
7965 tcg_temp_free_i32(tmp2);
7966 } else {
7967 tmp = load_reg(s, i);
7968 }
7969 gen_aa32_st32(tmp, addr, IS_USER(s));
7970 tcg_temp_free_i32(tmp);
7971 }
7972 j++;
7973 /* no need to add after the last transfer */
7974 if (j != n)
7975 tcg_gen_addi_i32(addr, addr, 4);
7976 }
7977 }
7978 if (insn & (1 << 21)) {
7979 /* write back */
7980 if (insn & (1 << 23)) {
7981 if (insn & (1 << 24)) {
7982 /* pre increment */
7983 } else {
7984 /* post increment */
7985 tcg_gen_addi_i32(addr, addr, 4);
7986 }
7987 } else {
7988 if (insn & (1 << 24)) {
7989 /* pre decrement */
7990 if (n != 1)
7991 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7992 } else {
7993 /* post decrement */
7994 tcg_gen_addi_i32(addr, addr, -(n * 4));
7995 }
7996 }
7997 store_reg(s, rn, addr);
7998 } else {
7999 tcg_temp_free_i32(addr);
8000 }
8001 if (loaded_base) {
8002 store_reg(s, rn, loaded_var);
8003 }
8004 if ((insn & (1 << 22)) && !user) {
8005 /* Restore CPSR from SPSR. */
8006 tmp = load_cpu_field(spsr);
8007 gen_set_cpsr(tmp, 0xffffffff);
8008 tcg_temp_free_i32(tmp);
8009 s->is_jmp = DISAS_UPDATE;
8010 }
8011 }
8012 break;
8013 case 0xa:
8014 case 0xb:
8015 {
8016 int32_t offset;
8017
8018 /* branch (and link) */
8019 val = (int32_t)s->pc;
8020 if (insn & (1 << 24)) {
8021 tmp = tcg_temp_new_i32();
8022 tcg_gen_movi_i32(tmp, val);
8023 store_reg(s, 14, tmp);
8024 }
8025 offset = sextract32(insn << 2, 0, 26);
8026 val += offset + 4;
8027 gen_jmp(s, val);
8028 }
8029 break;
8030 case 0xc:
8031 case 0xd:
8032 case 0xe:
8033 /* Coprocessor. */
8034 if (disas_coproc_insn(env, s, insn))
8035 goto illegal_op;
8036 break;
8037 case 0xf:
8038 /* swi */
8039 gen_set_pc_im(s, s->pc);
8040 s->is_jmp = DISAS_SWI;
8041 break;
8042 default:
8043 illegal_op:
8044 gen_exception_insn(s, 4, EXCP_UDEF);
8045 break;
8046 }
8047 }
8048 }
8049
8050 /* Return true if this is a Thumb-2 logical op. */
8051 static int
8052 thumb2_logic_op(int op)
8053 {
8054 return (op < 8);
8055 }
8056
8057 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
8058 then set condition code flags based on the result of the operation.
8059 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
8060 to the high bit of T1.
8061 Returns zero if the opcode is valid. */
8062
8063 static int
8064 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
8065 TCGv_i32 t0, TCGv_i32 t1)
8066 {
8067 int logic_cc;
8068
8069 logic_cc = 0;
8070 switch (op) {
8071 case 0: /* and */
8072 tcg_gen_and_i32(t0, t0, t1);
8073 logic_cc = conds;
8074 break;
8075 case 1: /* bic */
8076 tcg_gen_andc_i32(t0, t0, t1);
8077 logic_cc = conds;
8078 break;
8079 case 2: /* orr */
8080 tcg_gen_or_i32(t0, t0, t1);
8081 logic_cc = conds;
8082 break;
8083 case 3: /* orn */
8084 tcg_gen_orc_i32(t0, t0, t1);
8085 logic_cc = conds;
8086 break;
8087 case 4: /* eor */
8088 tcg_gen_xor_i32(t0, t0, t1);
8089 logic_cc = conds;
8090 break;
8091 case 8: /* add */
8092 if (conds)
8093 gen_add_CC(t0, t0, t1);
8094 else
8095 tcg_gen_add_i32(t0, t0, t1);
8096 break;
8097 case 10: /* adc */
8098 if (conds)
8099 gen_adc_CC(t0, t0, t1);
8100 else
8101 gen_adc(t0, t1);
8102 break;
8103 case 11: /* sbc */
8104 if (conds) {
8105 gen_sbc_CC(t0, t0, t1);
8106 } else {
8107 gen_sub_carry(t0, t0, t1);
8108 }
8109 break;
8110 case 13: /* sub */
8111 if (conds)
8112 gen_sub_CC(t0, t0, t1);
8113 else
8114 tcg_gen_sub_i32(t0, t0, t1);
8115 break;
8116 case 14: /* rsb */
8117 if (conds)
8118 gen_sub_CC(t0, t1, t0);
8119 else
8120 tcg_gen_sub_i32(t0, t1, t0);
8121 break;
8122 default: /* 5, 6, 7, 9, 12, 15. */
8123 return 1;
8124 }
8125 if (logic_cc) {
8126 gen_logic_CC(t0);
8127 if (shifter_out)
8128 gen_set_CF_bit31(t1);
8129 }
8130 return 0;
8131 }
8132
8133 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
8134 is not legal. */
8135 static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
8136 {
8137 uint32_t insn, imm, shift, offset;
8138 uint32_t rd, rn, rm, rs;
8139 TCGv_i32 tmp;
8140 TCGv_i32 tmp2;
8141 TCGv_i32 tmp3;
8142 TCGv_i32 addr;
8143 TCGv_i64 tmp64;
8144 int op;
8145 int shiftop;
8146 int conds;
8147 int logic_cc;
8148
8149 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
8150 || arm_feature (env, ARM_FEATURE_M))) {
8151 /* Thumb-1 cores may need to treat bl and blx as a pair of
8152 16-bit instructions to get correct prefetch abort behavior. */
8153 insn = insn_hw1;
8154 if ((insn & (1 << 12)) == 0) {
8155 ARCH(5);
8156 /* Second half of blx. */
8157 offset = ((insn & 0x7ff) << 1);
8158 tmp = load_reg(s, 14);
8159 tcg_gen_addi_i32(tmp, tmp, offset);
8160 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
8161
8162 tmp2 = tcg_temp_new_i32();
8163 tcg_gen_movi_i32(tmp2, s->pc | 1);
8164 store_reg(s, 14, tmp2);
8165 gen_bx(s, tmp);
8166 return 0;
8167 }
8168 if (insn & (1 << 11)) {
8169 /* Second half of bl. */
8170 offset = ((insn & 0x7ff) << 1) | 1;
8171 tmp = load_reg(s, 14);
8172 tcg_gen_addi_i32(tmp, tmp, offset);
8173
8174 tmp2 = tcg_temp_new_i32();
8175 tcg_gen_movi_i32(tmp2, s->pc | 1);
8176 store_reg(s, 14, tmp2);
8177 gen_bx(s, tmp);
8178 return 0;
8179 }
8180 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
8181 /* Instruction spans a page boundary. Implement it as two
8182 16-bit instructions in case the second half causes an
8183 prefetch abort. */
8184 offset = ((int32_t)insn << 21) >> 9;
8185 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
8186 return 0;
8187 }
8188 /* Fall through to 32-bit decode. */
8189 }
8190
8191 insn = arm_lduw_code(env, s->pc, s->bswap_code);
8192 s->pc += 2;
8193 insn |= (uint32_t)insn_hw1 << 16;
8194
8195 if ((insn & 0xf800e800) != 0xf000e800) {
8196 ARCH(6T2);
8197 }
8198
8199 rn = (insn >> 16) & 0xf;
8200 rs = (insn >> 12) & 0xf;
8201 rd = (insn >> 8) & 0xf;
8202 rm = insn & 0xf;
8203 switch ((insn >> 25) & 0xf) {
8204 case 0: case 1: case 2: case 3:
8205 /* 16-bit instructions. Should never happen. */
8206 abort();
8207 case 4:
8208 if (insn & (1 << 22)) {
8209 /* Other load/store, table branch. */
8210 if (insn & 0x01200000) {
8211 /* Load/store doubleword. */
8212 if (rn == 15) {
8213 addr = tcg_temp_new_i32();
8214 tcg_gen_movi_i32(addr, s->pc & ~3);
8215 } else {
8216 addr = load_reg(s, rn);
8217 }
8218 offset = (insn & 0xff) * 4;
8219 if ((insn & (1 << 23)) == 0)
8220 offset = -offset;
8221 if (insn & (1 << 24)) {
8222 tcg_gen_addi_i32(addr, addr, offset);
8223 offset = 0;
8224 }
8225 if (insn & (1 << 20)) {
8226 /* ldrd */
8227 tmp = tcg_temp_new_i32();
8228 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8229 store_reg(s, rs, tmp);
8230 tcg_gen_addi_i32(addr, addr, 4);
8231 tmp = tcg_temp_new_i32();
8232 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8233 store_reg(s, rd, tmp);
8234 } else {
8235 /* strd */
8236 tmp = load_reg(s, rs);
8237 gen_aa32_st32(tmp, addr, IS_USER(s));
8238 tcg_temp_free_i32(tmp);
8239 tcg_gen_addi_i32(addr, addr, 4);
8240 tmp = load_reg(s, rd);
8241 gen_aa32_st32(tmp, addr, IS_USER(s));
8242 tcg_temp_free_i32(tmp);
8243 }
8244 if (insn & (1 << 21)) {
8245 /* Base writeback. */
8246 if (rn == 15)
8247 goto illegal_op;
8248 tcg_gen_addi_i32(addr, addr, offset - 4);
8249 store_reg(s, rn, addr);
8250 } else {
8251 tcg_temp_free_i32(addr);
8252 }
8253 } else if ((insn & (1 << 23)) == 0) {
8254 /* Load/store exclusive word. */
8255 addr = tcg_temp_local_new_i32();
8256 load_reg_var(s, addr, rn);
8257 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
8258 if (insn & (1 << 20)) {
8259 gen_load_exclusive(s, rs, 15, addr, 2);
8260 } else {
8261 gen_store_exclusive(s, rd, rs, 15, addr, 2);
8262 }
8263 tcg_temp_free_i32(addr);
8264 } else if ((insn & (7 << 5)) == 0) {
8265 /* Table Branch. */
8266 if (rn == 15) {
8267 addr = tcg_temp_new_i32();
8268 tcg_gen_movi_i32(addr, s->pc);
8269 } else {
8270 addr = load_reg(s, rn);
8271 }
8272 tmp = load_reg(s, rm);
8273 tcg_gen_add_i32(addr, addr, tmp);
8274 if (insn & (1 << 4)) {
8275 /* tbh */
8276 tcg_gen_add_i32(addr, addr, tmp);
8277 tcg_temp_free_i32(tmp);
8278 tmp = tcg_temp_new_i32();
8279 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8280 } else { /* tbb */
8281 tcg_temp_free_i32(tmp);
8282 tmp = tcg_temp_new_i32();
8283 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8284 }
8285 tcg_temp_free_i32(addr);
8286 tcg_gen_shli_i32(tmp, tmp, 1);
8287 tcg_gen_addi_i32(tmp, tmp, s->pc);
8288 store_reg(s, 15, tmp);
8289 } else {
8290 int op2 = (insn >> 6) & 0x3;
8291 op = (insn >> 4) & 0x3;
8292 switch (op2) {
8293 case 0:
8294 goto illegal_op;
8295 case 1:
8296 /* Load/store exclusive byte/halfword/doubleword */
8297 if (op == 2) {
8298 goto illegal_op;
8299 }
8300 ARCH(7);
8301 break;
8302 case 2:
8303 /* Load-acquire/store-release */
8304 if (op == 3) {
8305 goto illegal_op;
8306 }
8307 /* Fall through */
8308 case 3:
8309 /* Load-acquire/store-release exclusive */
8310 ARCH(8);
8311 break;
8312 }
8313 addr = tcg_temp_local_new_i32();
8314 load_reg_var(s, addr, rn);
8315 if (!(op2 & 1)) {
8316 if (insn & (1 << 20)) {
8317 tmp = tcg_temp_new_i32();
8318 switch (op) {
8319 case 0: /* ldab */
8320 gen_aa32_ld8u(tmp, addr, IS_USER(s));
8321 break;
8322 case 1: /* ldah */
8323 gen_aa32_ld16u(tmp, addr, IS_USER(s));
8324 break;
8325 case 2: /* lda */
8326 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8327 break;
8328 default:
8329 abort();
8330 }
8331 store_reg(s, rs, tmp);
8332 } else {
8333 tmp = load_reg(s, rs);
8334 switch (op) {
8335 case 0: /* stlb */
8336 gen_aa32_st8(tmp, addr, IS_USER(s));
8337 break;
8338 case 1: /* stlh */
8339 gen_aa32_st16(tmp, addr, IS_USER(s));
8340 break;
8341 case 2: /* stl */
8342 gen_aa32_st32(tmp, addr, IS_USER(s));
8343 break;
8344 default:
8345 abort();
8346 }
8347 tcg_temp_free_i32(tmp);
8348 }
8349 } else if (insn & (1 << 20)) {
8350 gen_load_exclusive(s, rs, rd, addr, op);
8351 } else {
8352 gen_store_exclusive(s, rm, rs, rd, addr, op);
8353 }
8354 tcg_temp_free_i32(addr);
8355 }
8356 } else {
8357 /* Load/store multiple, RFE, SRS. */
8358 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
8359 /* RFE, SRS: not available in user mode or on M profile */
8360 if (IS_USER(s) || IS_M(env)) {
8361 goto illegal_op;
8362 }
8363 if (insn & (1 << 20)) {
8364 /* rfe */
8365 addr = load_reg(s, rn);
8366 if ((insn & (1 << 24)) == 0)
8367 tcg_gen_addi_i32(addr, addr, -8);
8368 /* Load PC into tmp and CPSR into tmp2. */
8369 tmp = tcg_temp_new_i32();
8370 gen_aa32_ld32u(tmp, addr, 0);
8371 tcg_gen_addi_i32(addr, addr, 4);
8372 tmp2 = tcg_temp_new_i32();
8373 gen_aa32_ld32u(tmp2, addr, 0);
8374 if (insn & (1 << 21)) {
8375 /* Base writeback. */
8376 if (insn & (1 << 24)) {
8377 tcg_gen_addi_i32(addr, addr, 4);
8378 } else {
8379 tcg_gen_addi_i32(addr, addr, -4);
8380 }
8381 store_reg(s, rn, addr);
8382 } else {
8383 tcg_temp_free_i32(addr);
8384 }
8385 gen_rfe(s, tmp, tmp2);
8386 } else {
8387 /* srs */
8388 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
8389 insn & (1 << 21));
8390 }
8391 } else {
8392 int i, loaded_base = 0;
8393 TCGv_i32 loaded_var;
8394 /* Load/store multiple. */
8395 addr = load_reg(s, rn);
8396 offset = 0;
8397 for (i = 0; i < 16; i++) {
8398 if (insn & (1 << i))
8399 offset += 4;
8400 }
8401 if (insn & (1 << 24)) {
8402 tcg_gen_addi_i32(addr, addr, -offset);
8403 }
8404
8405 TCGV_UNUSED_I32(loaded_var);
8406 for (i = 0; i < 16; i++) {
8407 if ((insn & (1 << i)) == 0)
8408 continue;
8409 if (insn & (1 << 20)) {
8410 /* Load. */
8411 tmp = tcg_temp_new_i32();
8412 gen_aa32_ld32u(tmp, addr, IS_USER(s));
8413 if (i == 15) {
8414 gen_bx(s, tmp);
8415 } else if (i == rn) {
8416 loaded_var = tmp;
8417 loaded_base = 1;
8418 } else {
8419 store_reg(s, i, tmp);
8420 }
8421 } else {
8422 /* Store. */
8423 tmp = load_reg(s, i);
8424 gen_aa32_st32(tmp, addr, IS_USER(s));
8425 tcg_temp_free_i32(tmp);
8426 }
8427 tcg_gen_addi_i32(addr, addr, 4);
8428 }
8429 if (loaded_base) {
8430 store_reg(s, rn, loaded_var);
8431 }
8432 if (insn & (1 << 21)) {
8433 /* Base register writeback. */
8434 if (insn & (1 << 24)) {
8435 tcg_gen_addi_i32(addr, addr, -offset);
8436 }
8437 /* Fault if writeback register is in register list. */
8438 if (insn & (1 << rn))
8439 goto illegal_op;
8440 store_reg(s, rn, addr);
8441 } else {
8442 tcg_temp_free_i32(addr);
8443 }
8444 }
8445 }
8446 break;
8447 case 5:
8448
8449 op = (insn >> 21) & 0xf;
8450 if (op == 6) {
8451 /* Halfword pack. */
8452 tmp = load_reg(s, rn);
8453 tmp2 = load_reg(s, rm);
8454 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8455 if (insn & (1 << 5)) {
8456 /* pkhtb */
8457 if (shift == 0)
8458 shift = 31;
8459 tcg_gen_sari_i32(tmp2, tmp2, shift);
8460 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
8461 tcg_gen_ext16u_i32(tmp2, tmp2);
8462 } else {
8463 /* pkhbt */
8464 if (shift)
8465 tcg_gen_shli_i32(tmp2, tmp2, shift);
8466 tcg_gen_ext16u_i32(tmp, tmp);
8467 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8468 }
8469 tcg_gen_or_i32(tmp, tmp, tmp2);
8470 tcg_temp_free_i32(tmp2);
8471 store_reg(s, rd, tmp);
8472 } else {
8473 /* Data processing register constant shift. */
8474 if (rn == 15) {
8475 tmp = tcg_temp_new_i32();
8476 tcg_gen_movi_i32(tmp, 0);
8477 } else {
8478 tmp = load_reg(s, rn);
8479 }
8480 tmp2 = load_reg(s, rm);
8481
8482 shiftop = (insn >> 4) & 3;
8483 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8484 conds = (insn & (1 << 20)) != 0;
8485 logic_cc = (conds && thumb2_logic_op(op));
8486 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8487 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
8488 goto illegal_op;
8489 tcg_temp_free_i32(tmp2);
8490 if (rd != 15) {
8491 store_reg(s, rd, tmp);
8492 } else {
8493 tcg_temp_free_i32(tmp);
8494 }
8495 }
8496 break;
8497 case 13: /* Misc data processing. */
8498 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
8499 if (op < 4 && (insn & 0xf000) != 0xf000)
8500 goto illegal_op;
8501 switch (op) {
8502 case 0: /* Register controlled shift. */
8503 tmp = load_reg(s, rn);
8504 tmp2 = load_reg(s, rm);
8505 if ((insn & 0x70) != 0)
8506 goto illegal_op;
8507 op = (insn >> 21) & 3;
8508 logic_cc = (insn & (1 << 20)) != 0;
8509 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
8510 if (logic_cc)
8511 gen_logic_CC(tmp);
8512 store_reg_bx(env, s, rd, tmp);
8513 break;
8514 case 1: /* Sign/zero extend. */
8515 tmp = load_reg(s, rm);
8516 shift = (insn >> 4) & 3;
8517 /* ??? In many cases it's not necessary to do a
8518 rotate, a shift is sufficient. */
8519 if (shift != 0)
8520 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8521 op = (insn >> 20) & 7;
8522 switch (op) {
8523 case 0: gen_sxth(tmp); break;
8524 case 1: gen_uxth(tmp); break;
8525 case 2: gen_sxtb16(tmp); break;
8526 case 3: gen_uxtb16(tmp); break;
8527 case 4: gen_sxtb(tmp); break;
8528 case 5: gen_uxtb(tmp); break;
8529 default: goto illegal_op;
8530 }
8531 if (rn != 15) {
8532 tmp2 = load_reg(s, rn);
8533 if ((op >> 1) == 1) {
8534 gen_add16(tmp, tmp2);
8535 } else {
8536 tcg_gen_add_i32(tmp, tmp, tmp2);
8537 tcg_temp_free_i32(tmp2);
8538 }
8539 }
8540 store_reg(s, rd, tmp);
8541 break;
8542 case 2: /* SIMD add/subtract. */
8543 op = (insn >> 20) & 7;
8544 shift = (insn >> 4) & 7;
8545 if ((op & 3) == 3 || (shift & 3) == 3)
8546 goto illegal_op;
8547 tmp = load_reg(s, rn);
8548 tmp2 = load_reg(s, rm);
8549 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8550 tcg_temp_free_i32(tmp2);
8551 store_reg(s, rd, tmp);
8552 break;
8553 case 3: /* Other data processing. */
8554 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
8555 if (op < 4) {
8556 /* Saturating add/subtract. */
8557 tmp = load_reg(s, rn);
8558 tmp2 = load_reg(s, rm);
8559 if (op & 1)
8560 gen_helper_double_saturate(tmp, cpu_env, tmp);
8561 if (op & 2)
8562 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
8563 else
8564 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
8565 tcg_temp_free_i32(tmp2);
8566 } else {
8567 tmp = load_reg(s, rn);
8568 switch (op) {
8569 case 0x0a: /* rbit */
8570 gen_helper_rbit(tmp, tmp);
8571 break;
8572 case 0x08: /* rev */
8573 tcg_gen_bswap32_i32(tmp, tmp);
8574 break;
8575 case 0x09: /* rev16 */
8576 gen_rev16(tmp);
8577 break;
8578 case 0x0b: /* revsh */
8579 gen_revsh(tmp);
8580 break;
8581 case 0x10: /* sel */
8582 tmp2 = load_reg(s, rm);
8583 tmp3 = tcg_temp_new_i32();
8584 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
8585 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
8586 tcg_temp_free_i32(tmp3);
8587 tcg_temp_free_i32(tmp2);
8588 break;
8589 case 0x18: /* clz */
8590 gen_helper_clz(tmp, tmp);
8591 break;
8592 default:
8593 goto illegal_op;
8594 }
8595 }
8596 store_reg(s, rd, tmp);
8597 break;
8598 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
8599 op = (insn >> 4) & 0xf;
8600 tmp = load_reg(s, rn);
8601 tmp2 = load_reg(s, rm);
8602 switch ((insn >> 20) & 7) {
8603 case 0: /* 32 x 32 -> 32 */
8604 tcg_gen_mul_i32(tmp, tmp, tmp2);
8605 tcg_temp_free_i32(tmp2);
8606 if (rs != 15) {
8607 tmp2 = load_reg(s, rs);
8608 if (op)
8609 tcg_gen_sub_i32(tmp, tmp2, tmp);
8610 else
8611 tcg_gen_add_i32(tmp, tmp, tmp2);
8612 tcg_temp_free_i32(tmp2);
8613 }
8614 break;
8615 case 1: /* 16 x 16 -> 32 */
8616 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8617 tcg_temp_free_i32(tmp2);
8618 if (rs != 15) {
8619 tmp2 = load_reg(s, rs);
8620 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8621 tcg_temp_free_i32(tmp2);
8622 }
8623 break;
8624 case 2: /* Dual multiply add. */
8625 case 4: /* Dual multiply subtract. */
8626 if (op)
8627 gen_swap_half(tmp2);
8628 gen_smul_dual(tmp, tmp2);
8629 if (insn & (1 << 22)) {
8630 /* This subtraction cannot overflow. */
8631 tcg_gen_sub_i32(tmp, tmp, tmp2);
8632 } else {
8633 /* This addition cannot overflow 32 bits;
8634 * however it may overflow considered as a signed
8635 * operation, in which case we must set the Q flag.
8636 */
8637 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8638 }
8639 tcg_temp_free_i32(tmp2);
8640 if (rs != 15)
8641 {
8642 tmp2 = load_reg(s, rs);
8643 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8644 tcg_temp_free_i32(tmp2);
8645 }
8646 break;
8647 case 3: /* 32 * 16 -> 32msb */
8648 if (op)
8649 tcg_gen_sari_i32(tmp2, tmp2, 16);
8650 else
8651 gen_sxth(tmp2);
8652 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8653 tcg_gen_shri_i64(tmp64, tmp64, 16);
8654 tmp = tcg_temp_new_i32();
8655 tcg_gen_trunc_i64_i32(tmp, tmp64);
8656 tcg_temp_free_i64(tmp64);
8657 if (rs != 15)
8658 {
8659 tmp2 = load_reg(s, rs);
8660 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8661 tcg_temp_free_i32(tmp2);
8662 }
8663 break;
8664 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
8665 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8666 if (rs != 15) {
8667 tmp = load_reg(s, rs);
8668 if (insn & (1 << 20)) {
8669 tmp64 = gen_addq_msw(tmp64, tmp);
8670 } else {
8671 tmp64 = gen_subq_msw(tmp64, tmp);
8672 }
8673 }
8674 if (insn & (1 << 4)) {
8675 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8676 }
8677 tcg_gen_shri_i64(tmp64, tmp64, 32);
8678 tmp = tcg_temp_new_i32();
8679 tcg_gen_trunc_i64_i32(tmp, tmp64);
8680 tcg_temp_free_i64(tmp64);
8681 break;
8682 case 7: /* Unsigned sum of absolute differences. */
8683 gen_helper_usad8(tmp, tmp, tmp2);
8684 tcg_temp_free_i32(tmp2);
8685 if (rs != 15) {
8686 tmp2 = load_reg(s, rs);
8687 tcg_gen_add_i32(tmp, tmp, tmp2);
8688 tcg_temp_free_i32(tmp2);
8689 }
8690 break;
8691 }
8692 store_reg(s, rd, tmp);
8693 break;
8694 case 6: case 7: /* 64-bit multiply, Divide. */
8695 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
8696 tmp = load_reg(s, rn);
8697 tmp2 = load_reg(s, rm);
8698 if ((op & 0x50) == 0x10) {
8699 /* sdiv, udiv */
8700 if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
8701 goto illegal_op;
8702 }
8703 if (op & 0x20)
8704 gen_helper_udiv(tmp, tmp, tmp2);
8705 else
8706 gen_helper_sdiv(tmp, tmp, tmp2);
8707 tcg_temp_free_i32(tmp2);
8708 store_reg(s, rd, tmp);
8709 } else if ((op & 0xe) == 0xc) {
8710 /* Dual multiply accumulate long. */
8711 if (op & 1)
8712 gen_swap_half(tmp2);
8713 gen_smul_dual(tmp, tmp2);
8714 if (op & 0x10) {
8715 tcg_gen_sub_i32(tmp, tmp, tmp2);
8716 } else {
8717 tcg_gen_add_i32(tmp, tmp, tmp2);
8718 }
8719 tcg_temp_free_i32(tmp2);
8720 /* BUGFIX */
8721 tmp64 = tcg_temp_new_i64();
8722 tcg_gen_ext_i32_i64(tmp64, tmp);
8723 tcg_temp_free_i32(tmp);
8724 gen_addq(s, tmp64, rs, rd);
8725 gen_storeq_reg(s, rs, rd, tmp64);
8726 tcg_temp_free_i64(tmp64);
8727 } else {
8728 if (op & 0x20) {
8729 /* Unsigned 64-bit multiply */
8730 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8731 } else {
8732 if (op & 8) {
8733 /* smlalxy */
8734 gen_mulxy(tmp, tmp2, op & 2, op & 1);
8735 tcg_temp_free_i32(tmp2);
8736 tmp64 = tcg_temp_new_i64();
8737 tcg_gen_ext_i32_i64(tmp64, tmp);
8738 tcg_temp_free_i32(tmp);
8739 } else {
8740 /* Signed 64-bit multiply */
8741 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8742 }
8743 }
8744 if (op & 4) {
8745 /* umaal */
8746 gen_addq_lo(s, tmp64, rs);
8747 gen_addq_lo(s, tmp64, rd);
8748 } else if (op & 0x40) {
8749 /* 64-bit accumulate. */
8750 gen_addq(s, tmp64, rs, rd);
8751 }
8752 gen_storeq_reg(s, rs, rd, tmp64);
8753 tcg_temp_free_i64(tmp64);
8754 }
8755 break;
8756 }
8757 break;
8758 case 6: case 7: case 14: case 15:
8759 /* Coprocessor. */
8760 if (((insn >> 24) & 3) == 3) {
8761 /* Translate into the equivalent ARM encoding. */
8762 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
8763 if (disas_neon_data_insn(env, s, insn))
8764 goto illegal_op;
8765 } else {
8766 if (insn & (1 << 28))
8767 goto illegal_op;
8768 if (disas_coproc_insn (env, s, insn))
8769 goto illegal_op;
8770 }
8771 break;
8772 case 8: case 9: case 10: case 11:
8773 if (insn & (1 << 15)) {
8774 /* Branches, misc control. */
8775 if (insn & 0x5000) {
8776 /* Unconditional branch. */
8777 /* signextend(hw1[10:0]) -> offset[:12]. */
8778 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
8779 /* hw1[10:0] -> offset[11:1]. */
8780 offset |= (insn & 0x7ff) << 1;
8781 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
8782 offset[24:22] already have the same value because of the
8783 sign extension above. */
8784 offset ^= ((~insn) & (1 << 13)) << 10;
8785 offset ^= ((~insn) & (1 << 11)) << 11;
8786
8787 if (insn & (1 << 14)) {
8788 /* Branch and link. */
8789 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
8790 }
8791
8792 offset += s->pc;
8793 if (insn & (1 << 12)) {
8794 /* b/bl */
8795 gen_jmp(s, offset);
8796 } else {
8797 /* blx */
8798 offset &= ~(uint32_t)2;
8799 /* thumb2 bx, no need to check */
8800 gen_bx_im(s, offset);
8801 }
8802 } else if (((insn >> 23) & 7) == 7) {
8803 /* Misc control */
8804 if (insn & (1 << 13))
8805 goto illegal_op;
8806
8807 if (insn & (1 << 26)) {
8808 /* Secure monitor call (v6Z) */
8809 qemu_log_mask(LOG_UNIMP,
8810 "arm: unimplemented secure monitor call\n");
8811 goto illegal_op; /* not implemented. */
8812 } else {
8813 op = (insn >> 20) & 7;
8814 switch (op) {
8815 case 0: /* msr cpsr. */
8816 if (IS_M(env)) {
8817 tmp = load_reg(s, rn);
8818 addr = tcg_const_i32(insn & 0xff);
8819 gen_helper_v7m_msr(cpu_env, addr, tmp);
8820 tcg_temp_free_i32(addr);
8821 tcg_temp_free_i32(tmp);
8822 gen_lookup_tb(s);
8823 break;
8824 }
8825 /* fall through */
8826 case 1: /* msr spsr. */
8827 if (IS_M(env))
8828 goto illegal_op;
8829 tmp = load_reg(s, rn);
8830 if (gen_set_psr(s,
8831 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8832 op == 1, tmp))
8833 goto illegal_op;
8834 break;
8835 case 2: /* cps, nop-hint. */
8836 if (((insn >> 8) & 7) == 0) {
8837 gen_nop_hint(s, insn & 0xff);
8838 }
8839 /* Implemented as NOP in user mode. */
8840 if (IS_USER(s))
8841 break;
8842 offset = 0;
8843 imm = 0;
8844 if (insn & (1 << 10)) {
8845 if (insn & (1 << 7))
8846 offset |= CPSR_A;
8847 if (insn & (1 << 6))
8848 offset |= CPSR_I;
8849 if (insn & (1 << 5))
8850 offset |= CPSR_F;
8851 if (insn & (1 << 9))
8852 imm = CPSR_A | CPSR_I | CPSR_F;
8853 }
8854 if (insn & (1 << 8)) {
8855 offset |= 0x1f;
8856 imm |= (insn & 0x1f);
8857 }
8858 if (offset) {
8859 gen_set_psr_im(s, offset, 0, imm);
8860 }
8861 break;
8862 case 3: /* Special control operations. */
8863 ARCH(7);
8864 op = (insn >> 4) & 0xf;
8865 switch (op) {
8866 case 2: /* clrex */
8867 gen_clrex(s);
8868 break;
8869 case 4: /* dsb */
8870 case 5: /* dmb */
8871 case 6: /* isb */
8872 /* These execute as NOPs. */
8873 break;
8874 default:
8875 goto illegal_op;
8876 }
8877 break;
8878 case 4: /* bxj */
8879 /* Trivial implementation equivalent to bx. */
8880 tmp = load_reg(s, rn);
8881 gen_bx(s, tmp);
8882 break;
8883 case 5: /* Exception return. */
8884 if (IS_USER(s)) {
8885 goto illegal_op;
8886 }
8887 if (rn != 14 || rd != 15) {
8888 goto illegal_op;
8889 }
8890 tmp = load_reg(s, rn);
8891 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8892 gen_exception_return(s, tmp);
8893 break;
8894 case 6: /* mrs cpsr. */
8895 tmp = tcg_temp_new_i32();
8896 if (IS_M(env)) {
8897 addr = tcg_const_i32(insn & 0xff);
8898 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8899 tcg_temp_free_i32(addr);
8900 } else {
8901 gen_helper_cpsr_read(tmp, cpu_env);
8902 }
8903 store_reg(s, rd, tmp);
8904 break;
8905 case 7: /* mrs spsr. */
8906 /* Not accessible in user mode. */
8907 if (IS_USER(s) || IS_M(env))
8908 goto illegal_op;
8909 tmp = load_cpu_field(spsr);
8910 store_reg(s, rd, tmp);
8911 break;
8912 }
8913 }
8914 } else {
8915 /* Conditional branch. */
8916 op = (insn >> 22) & 0xf;
8917 /* Generate a conditional jump to next instruction. */
8918 s->condlabel = gen_new_label();
8919 gen_test_cc(op ^ 1, s->condlabel);
8920 s->condjmp = 1;
8921
8922 /* offset[11:1] = insn[10:0] */
8923 offset = (insn & 0x7ff) << 1;
8924 /* offset[17:12] = insn[21:16]. */
8925 offset |= (insn & 0x003f0000) >> 4;
8926 /* offset[31:20] = insn[26]. */
8927 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8928 /* offset[18] = insn[13]. */
8929 offset |= (insn & (1 << 13)) << 5;
8930 /* offset[19] = insn[11]. */
8931 offset |= (insn & (1 << 11)) << 8;
8932
8933 /* jump to the offset */
8934 gen_jmp(s, s->pc + offset);
8935 }
8936 } else {
8937 /* Data processing immediate. */
8938 if (insn & (1 << 25)) {
8939 if (insn & (1 << 24)) {
8940 if (insn & (1 << 20))
8941 goto illegal_op;
8942 /* Bitfield/Saturate. */
8943 op = (insn >> 21) & 7;
8944 imm = insn & 0x1f;
8945 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8946 if (rn == 15) {
8947 tmp = tcg_temp_new_i32();
8948 tcg_gen_movi_i32(tmp, 0);
8949 } else {
8950 tmp = load_reg(s, rn);
8951 }
8952 switch (op) {
8953 case 2: /* Signed bitfield extract. */
8954 imm++;
8955 if (shift + imm > 32)
8956 goto illegal_op;
8957 if (imm < 32)
8958 gen_sbfx(tmp, shift, imm);
8959 break;
8960 case 6: /* Unsigned bitfield extract. */
8961 imm++;
8962 if (shift + imm > 32)
8963 goto illegal_op;
8964 if (imm < 32)
8965 gen_ubfx(tmp, shift, (1u << imm) - 1);
8966 break;
8967 case 3: /* Bitfield insert/clear. */
8968 if (imm < shift)
8969 goto illegal_op;
8970 imm = imm + 1 - shift;
8971 if (imm != 32) {
8972 tmp2 = load_reg(s, rd);
8973 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
8974 tcg_temp_free_i32(tmp2);
8975 }
8976 break;
8977 case 7:
8978 goto illegal_op;
8979 default: /* Saturate. */
8980 if (shift) {
8981 if (op & 1)
8982 tcg_gen_sari_i32(tmp, tmp, shift);
8983 else
8984 tcg_gen_shli_i32(tmp, tmp, shift);
8985 }
8986 tmp2 = tcg_const_i32(imm);
8987 if (op & 4) {
8988 /* Unsigned. */
8989 if ((op & 1) && shift == 0)
8990 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
8991 else
8992 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
8993 } else {
8994 /* Signed. */
8995 if ((op & 1) && shift == 0)
8996 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
8997 else
8998 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
8999 }
9000 tcg_temp_free_i32(tmp2);
9001 break;
9002 }
9003 store_reg(s, rd, tmp);
9004 } else {
9005 imm = ((insn & 0x04000000) >> 15)
9006 | ((insn & 0x7000) >> 4) | (insn & 0xff);
9007 if (insn & (1 << 22)) {
9008 /* 16-bit immediate. */
9009 imm |= (insn >> 4) & 0xf000;
9010 if (insn & (1 << 23)) {
9011 /* movt */
9012 tmp = load_reg(s, rd);
9013 tcg_gen_ext16u_i32(tmp, tmp);
9014 tcg_gen_ori_i32(tmp, tmp, imm << 16);
9015 } else {
9016 /* movw */
9017 tmp = tcg_temp_new_i32();
9018 tcg_gen_movi_i32(tmp, imm);
9019 }
9020 } else {
9021 /* Add/sub 12-bit immediate. */
9022 if (rn == 15) {
9023 offset = s->pc & ~(uint32_t)3;
9024 if (insn & (1 << 23))
9025 offset -= imm;
9026 else
9027 offset += imm;
9028 tmp = tcg_temp_new_i32();
9029 tcg_gen_movi_i32(tmp, offset);
9030 } else {
9031 tmp = load_reg(s, rn);
9032 if (insn & (1 << 23))
9033 tcg_gen_subi_i32(tmp, tmp, imm);
9034 else
9035 tcg_gen_addi_i32(tmp, tmp, imm);
9036 }
9037 }
9038 store_reg(s, rd, tmp);
9039 }
9040 } else {
9041 int shifter_out = 0;
9042 /* modified 12-bit immediate. */
9043 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
9044 imm = (insn & 0xff);
9045 switch (shift) {
9046 case 0: /* XY */
9047 /* Nothing to do. */
9048 break;
9049 case 1: /* 00XY00XY */
9050 imm |= imm << 16;
9051 break;
9052 case 2: /* XY00XY00 */
9053 imm |= imm << 16;
9054 imm <<= 8;
9055 break;
9056 case 3: /* XYXYXYXY */
9057 imm |= imm << 16;
9058 imm |= imm << 8;
9059 break;
9060 default: /* Rotated constant. */
9061 shift = (shift << 1) | (imm >> 7);
9062 imm |= 0x80;
9063 imm = imm << (32 - shift);
9064 shifter_out = 1;
9065 break;
9066 }
9067 tmp2 = tcg_temp_new_i32();
9068 tcg_gen_movi_i32(tmp2, imm);
9069 rn = (insn >> 16) & 0xf;
9070 if (rn == 15) {
9071 tmp = tcg_temp_new_i32();
9072 tcg_gen_movi_i32(tmp, 0);
9073 } else {
9074 tmp = load_reg(s, rn);
9075 }
9076 op = (insn >> 21) & 0xf;
9077 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
9078 shifter_out, tmp, tmp2))
9079 goto illegal_op;
9080 tcg_temp_free_i32(tmp2);
9081 rd = (insn >> 8) & 0xf;
9082 if (rd != 15) {
9083 store_reg(s, rd, tmp);
9084 } else {
9085 tcg_temp_free_i32(tmp);
9086 }
9087 }
9088 }
9089 break;
9090 case 12: /* Load/store single data item. */
9091 {
9092 int postinc = 0;
9093 int writeback = 0;
9094 int user;
9095 if ((insn & 0x01100000) == 0x01000000) {
9096 if (disas_neon_ls_insn(env, s, insn))
9097 goto illegal_op;
9098 break;
9099 }
9100 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
9101 if (rs == 15) {
9102 if (!(insn & (1 << 20))) {
9103 goto illegal_op;
9104 }
9105 if (op != 2) {
9106 /* Byte or halfword load space with dest == r15 : memory hints.
9107 * Catch them early so we don't emit pointless addressing code.
9108 * This space is a mix of:
9109 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
9110 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
9111 * cores)
9112 * unallocated hints, which must be treated as NOPs
9113 * UNPREDICTABLE space, which we NOP or UNDEF depending on
9114 * which is easiest for the decoding logic
9115 * Some space which must UNDEF
9116 */
9117 int op1 = (insn >> 23) & 3;
9118 int op2 = (insn >> 6) & 0x3f;
9119 if (op & 2) {
9120 goto illegal_op;
9121 }
9122 if (rn == 15) {
9123 /* UNPREDICTABLE, unallocated hint or
9124 * PLD/PLDW/PLI (literal)
9125 */
9126 return 0;
9127 }
9128 if (op1 & 1) {
9129 return 0; /* PLD/PLDW/PLI or unallocated hint */
9130 }
9131 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
9132 return 0; /* PLD/PLDW/PLI or unallocated hint */
9133 }
9134 /* UNDEF space, or an UNPREDICTABLE */
9135 return 1;
9136 }
9137 }
9138 user = IS_USER(s);
9139 if (rn == 15) {
9140 addr = tcg_temp_new_i32();
9141 /* PC relative. */
9142 /* s->pc has already been incremented by 4. */
9143 imm = s->pc & 0xfffffffc;
9144 if (insn & (1 << 23))
9145 imm += insn & 0xfff;
9146 else
9147 imm -= insn & 0xfff;
9148 tcg_gen_movi_i32(addr, imm);
9149 } else {
9150 addr = load_reg(s, rn);
9151 if (insn & (1 << 23)) {
9152 /* Positive offset. */
9153 imm = insn & 0xfff;
9154 tcg_gen_addi_i32(addr, addr, imm);
9155 } else {
9156 imm = insn & 0xff;
9157 switch ((insn >> 8) & 0xf) {
9158 case 0x0: /* Shifted Register. */
9159 shift = (insn >> 4) & 0xf;
9160 if (shift > 3) {
9161 tcg_temp_free_i32(addr);
9162 goto illegal_op;
9163 }
9164 tmp = load_reg(s, rm);
9165 if (shift)
9166 tcg_gen_shli_i32(tmp, tmp, shift);
9167 tcg_gen_add_i32(addr, addr, tmp);
9168 tcg_temp_free_i32(tmp);
9169 break;
9170 case 0xc: /* Negative offset. */
9171 tcg_gen_addi_i32(addr, addr, -imm);
9172 break;
9173 case 0xe: /* User privilege. */
9174 tcg_gen_addi_i32(addr, addr, imm);
9175 user = 1;
9176 break;
9177 case 0x9: /* Post-decrement. */
9178 imm = -imm;
9179 /* Fall through. */
9180 case 0xb: /* Post-increment. */
9181 postinc = 1;
9182 writeback = 1;
9183 break;
9184 case 0xd: /* Pre-decrement. */
9185 imm = -imm;
9186 /* Fall through. */
9187 case 0xf: /* Pre-increment. */
9188 tcg_gen_addi_i32(addr, addr, imm);
9189 writeback = 1;
9190 break;
9191 default:
9192 tcg_temp_free_i32(addr);
9193 goto illegal_op;
9194 }
9195 }
9196 }
9197 if (insn & (1 << 20)) {
9198 /* Load. */
9199 tmp = tcg_temp_new_i32();
9200 switch (op) {
9201 case 0:
9202 gen_aa32_ld8u(tmp, addr, user);
9203 break;
9204 case 4:
9205 gen_aa32_ld8s(tmp, addr, user);
9206 break;
9207 case 1:
9208 gen_aa32_ld16u(tmp, addr, user);
9209 break;
9210 case 5:
9211 gen_aa32_ld16s(tmp, addr, user);
9212 break;
9213 case 2:
9214 gen_aa32_ld32u(tmp, addr, user);
9215 break;
9216 default:
9217 tcg_temp_free_i32(tmp);
9218 tcg_temp_free_i32(addr);
9219 goto illegal_op;
9220 }
9221 if (rs == 15) {
9222 gen_bx(s, tmp);
9223 } else {
9224 store_reg(s, rs, tmp);
9225 }
9226 } else {
9227 /* Store. */
9228 tmp = load_reg(s, rs);
9229 switch (op) {
9230 case 0:
9231 gen_aa32_st8(tmp, addr, user);
9232 break;
9233 case 1:
9234 gen_aa32_st16(tmp, addr, user);
9235 break;
9236 case 2:
9237 gen_aa32_st32(tmp, addr, user);
9238 break;
9239 default:
9240 tcg_temp_free_i32(tmp);
9241 tcg_temp_free_i32(addr);
9242 goto illegal_op;
9243 }
9244 tcg_temp_free_i32(tmp);
9245 }
9246 if (postinc)
9247 tcg_gen_addi_i32(addr, addr, imm);
9248 if (writeback) {
9249 store_reg(s, rn, addr);
9250 } else {
9251 tcg_temp_free_i32(addr);
9252 }
9253 }
9254 break;
9255 default:
9256 goto illegal_op;
9257 }
9258 return 0;
9259 illegal_op:
9260 return 1;
9261 }
9262
9263 static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
9264 {
9265 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9266 int32_t offset;
9267 int i;
9268 TCGv_i32 tmp;
9269 TCGv_i32 tmp2;
9270 TCGv_i32 addr;
9271
9272 if (s->condexec_mask) {
9273 cond = s->condexec_cond;
9274 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
9275 s->condlabel = gen_new_label();
9276 gen_test_cc(cond ^ 1, s->condlabel);
9277 s->condjmp = 1;
9278 }
9279 }
9280
9281 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9282 s->pc += 2;
9283
9284 switch (insn >> 12) {
9285 case 0: case 1:
9286
9287 rd = insn & 7;
9288 op = (insn >> 11) & 3;
9289 if (op == 3) {
9290 /* add/subtract */
9291 rn = (insn >> 3) & 7;
9292 tmp = load_reg(s, rn);
9293 if (insn & (1 << 10)) {
9294 /* immediate */
9295 tmp2 = tcg_temp_new_i32();
9296 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
9297 } else {
9298 /* reg */
9299 rm = (insn >> 6) & 7;
9300 tmp2 = load_reg(s, rm);
9301 }
9302 if (insn & (1 << 9)) {
9303 if (s->condexec_mask)
9304 tcg_gen_sub_i32(tmp, tmp, tmp2);
9305 else
9306 gen_sub_CC(tmp, tmp, tmp2);
9307 } else {
9308 if (s->condexec_mask)
9309 tcg_gen_add_i32(tmp, tmp, tmp2);
9310 else
9311 gen_add_CC(tmp, tmp, tmp2);
9312 }
9313 tcg_temp_free_i32(tmp2);
9314 store_reg(s, rd, tmp);
9315 } else {
9316 /* shift immediate */
9317 rm = (insn >> 3) & 7;
9318 shift = (insn >> 6) & 0x1f;
9319 tmp = load_reg(s, rm);
9320 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9321 if (!s->condexec_mask)
9322 gen_logic_CC(tmp);
9323 store_reg(s, rd, tmp);
9324 }
9325 break;
9326 case 2: case 3:
9327 /* arithmetic large immediate */
9328 op = (insn >> 11) & 3;
9329 rd = (insn >> 8) & 0x7;
9330 if (op == 0) { /* mov */
9331 tmp = tcg_temp_new_i32();
9332 tcg_gen_movi_i32(tmp, insn & 0xff);
9333 if (!s->condexec_mask)
9334 gen_logic_CC(tmp);
9335 store_reg(s, rd, tmp);
9336 } else {
9337 tmp = load_reg(s, rd);
9338 tmp2 = tcg_temp_new_i32();
9339 tcg_gen_movi_i32(tmp2, insn & 0xff);
9340 switch (op) {
9341 case 1: /* cmp */
9342 gen_sub_CC(tmp, tmp, tmp2);
9343 tcg_temp_free_i32(tmp);
9344 tcg_temp_free_i32(tmp2);
9345 break;
9346 case 2: /* add */
9347 if (s->condexec_mask)
9348 tcg_gen_add_i32(tmp, tmp, tmp2);
9349 else
9350 gen_add_CC(tmp, tmp, tmp2);
9351 tcg_temp_free_i32(tmp2);
9352 store_reg(s, rd, tmp);
9353 break;
9354 case 3: /* sub */
9355 if (s->condexec_mask)
9356 tcg_gen_sub_i32(tmp, tmp, tmp2);
9357 else
9358 gen_sub_CC(tmp, tmp, tmp2);
9359 tcg_temp_free_i32(tmp2);
9360 store_reg(s, rd, tmp);
9361 break;
9362 }
9363 }
9364 break;
9365 case 4:
9366 if (insn & (1 << 11)) {
9367 rd = (insn >> 8) & 7;
9368 /* load pc-relative. Bit 1 of PC is ignored. */
9369 val = s->pc + 2 + ((insn & 0xff) * 4);
9370 val &= ~(uint32_t)2;
9371 addr = tcg_temp_new_i32();
9372 tcg_gen_movi_i32(addr, val);
9373 tmp = tcg_temp_new_i32();
9374 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9375 tcg_temp_free_i32(addr);
9376 store_reg(s, rd, tmp);
9377 break;
9378 }
9379 if (insn & (1 << 10)) {
9380 /* data processing extended or blx */
9381 rd = (insn & 7) | ((insn >> 4) & 8);
9382 rm = (insn >> 3) & 0xf;
9383 op = (insn >> 8) & 3;
9384 switch (op) {
9385 case 0: /* add */
9386 tmp = load_reg(s, rd);
9387 tmp2 = load_reg(s, rm);
9388 tcg_gen_add_i32(tmp, tmp, tmp2);
9389 tcg_temp_free_i32(tmp2);
9390 store_reg(s, rd, tmp);
9391 break;
9392 case 1: /* cmp */
9393 tmp = load_reg(s, rd);
9394 tmp2 = load_reg(s, rm);
9395 gen_sub_CC(tmp, tmp, tmp2);
9396 tcg_temp_free_i32(tmp2);
9397 tcg_temp_free_i32(tmp);
9398 break;
9399 case 2: /* mov/cpy */
9400 tmp = load_reg(s, rm);
9401 store_reg(s, rd, tmp);
9402 break;
9403 case 3:/* branch [and link] exchange thumb register */
9404 tmp = load_reg(s, rm);
9405 if (insn & (1 << 7)) {
9406 ARCH(5);
9407 val = (uint32_t)s->pc | 1;
9408 tmp2 = tcg_temp_new_i32();
9409 tcg_gen_movi_i32(tmp2, val);
9410 store_reg(s, 14, tmp2);
9411 }
9412 /* already thumb, no need to check */
9413 gen_bx(s, tmp);
9414 break;
9415 }
9416 break;
9417 }
9418
9419 /* data processing register */
9420 rd = insn & 7;
9421 rm = (insn >> 3) & 7;
9422 op = (insn >> 6) & 0xf;
9423 if (op == 2 || op == 3 || op == 4 || op == 7) {
9424 /* the shift/rotate ops want the operands backwards */
9425 val = rm;
9426 rm = rd;
9427 rd = val;
9428 val = 1;
9429 } else {
9430 val = 0;
9431 }
9432
9433 if (op == 9) { /* neg */
9434 tmp = tcg_temp_new_i32();
9435 tcg_gen_movi_i32(tmp, 0);
9436 } else if (op != 0xf) { /* mvn doesn't read its first operand */
9437 tmp = load_reg(s, rd);
9438 } else {
9439 TCGV_UNUSED_I32(tmp);
9440 }
9441
9442 tmp2 = load_reg(s, rm);
9443 switch (op) {
9444 case 0x0: /* and */
9445 tcg_gen_and_i32(tmp, tmp, tmp2);
9446 if (!s->condexec_mask)
9447 gen_logic_CC(tmp);
9448 break;
9449 case 0x1: /* eor */
9450 tcg_gen_xor_i32(tmp, tmp, tmp2);
9451 if (!s->condexec_mask)
9452 gen_logic_CC(tmp);
9453 break;
9454 case 0x2: /* lsl */
9455 if (s->condexec_mask) {
9456 gen_shl(tmp2, tmp2, tmp);
9457 } else {
9458 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
9459 gen_logic_CC(tmp2);
9460 }
9461 break;
9462 case 0x3: /* lsr */
9463 if (s->condexec_mask) {
9464 gen_shr(tmp2, tmp2, tmp);
9465 } else {
9466 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
9467 gen_logic_CC(tmp2);
9468 }
9469 break;
9470 case 0x4: /* asr */
9471 if (s->condexec_mask) {
9472 gen_sar(tmp2, tmp2, tmp);
9473 } else {
9474 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
9475 gen_logic_CC(tmp2);
9476 }
9477 break;
9478 case 0x5: /* adc */
9479 if (s->condexec_mask) {
9480 gen_adc(tmp, tmp2);
9481 } else {
9482 gen_adc_CC(tmp, tmp, tmp2);
9483 }
9484 break;
9485 case 0x6: /* sbc */
9486 if (s->condexec_mask) {
9487 gen_sub_carry(tmp, tmp, tmp2);
9488 } else {
9489 gen_sbc_CC(tmp, tmp, tmp2);
9490 }
9491 break;
9492 case 0x7: /* ror */
9493 if (s->condexec_mask) {
9494 tcg_gen_andi_i32(tmp, tmp, 0x1f);
9495 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9496 } else {
9497 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
9498 gen_logic_CC(tmp2);
9499 }
9500 break;
9501 case 0x8: /* tst */
9502 tcg_gen_and_i32(tmp, tmp, tmp2);
9503 gen_logic_CC(tmp);
9504 rd = 16;
9505 break;
9506 case 0x9: /* neg */
9507 if (s->condexec_mask)
9508 tcg_gen_neg_i32(tmp, tmp2);
9509 else
9510 gen_sub_CC(tmp, tmp, tmp2);
9511 break;
9512 case 0xa: /* cmp */
9513 gen_sub_CC(tmp, tmp, tmp2);
9514 rd = 16;
9515 break;
9516 case 0xb: /* cmn */
9517 gen_add_CC(tmp, tmp, tmp2);
9518 rd = 16;
9519 break;
9520 case 0xc: /* orr */
9521 tcg_gen_or_i32(tmp, tmp, tmp2);
9522 if (!s->condexec_mask)
9523 gen_logic_CC(tmp);
9524 break;
9525 case 0xd: /* mul */
9526 tcg_gen_mul_i32(tmp, tmp, tmp2);
9527 if (!s->condexec_mask)
9528 gen_logic_CC(tmp);
9529 break;
9530 case 0xe: /* bic */
9531 tcg_gen_andc_i32(tmp, tmp, tmp2);
9532 if (!s->condexec_mask)
9533 gen_logic_CC(tmp);
9534 break;
9535 case 0xf: /* mvn */
9536 tcg_gen_not_i32(tmp2, tmp2);
9537 if (!s->condexec_mask)
9538 gen_logic_CC(tmp2);
9539 val = 1;
9540 rm = rd;
9541 break;
9542 }
9543 if (rd != 16) {
9544 if (val) {
9545 store_reg(s, rm, tmp2);
9546 if (op != 0xf)
9547 tcg_temp_free_i32(tmp);
9548 } else {
9549 store_reg(s, rd, tmp);
9550 tcg_temp_free_i32(tmp2);
9551 }
9552 } else {
9553 tcg_temp_free_i32(tmp);
9554 tcg_temp_free_i32(tmp2);
9555 }
9556 break;
9557
9558 case 5:
9559 /* load/store register offset. */
9560 rd = insn & 7;
9561 rn = (insn >> 3) & 7;
9562 rm = (insn >> 6) & 7;
9563 op = (insn >> 9) & 7;
9564 addr = load_reg(s, rn);
9565 tmp = load_reg(s, rm);
9566 tcg_gen_add_i32(addr, addr, tmp);
9567 tcg_temp_free_i32(tmp);
9568
9569 if (op < 3) { /* store */
9570 tmp = load_reg(s, rd);
9571 } else {
9572 tmp = tcg_temp_new_i32();
9573 }
9574
9575 switch (op) {
9576 case 0: /* str */
9577 gen_aa32_st32(tmp, addr, IS_USER(s));
9578 break;
9579 case 1: /* strh */
9580 gen_aa32_st16(tmp, addr, IS_USER(s));
9581 break;
9582 case 2: /* strb */
9583 gen_aa32_st8(tmp, addr, IS_USER(s));
9584 break;
9585 case 3: /* ldrsb */
9586 gen_aa32_ld8s(tmp, addr, IS_USER(s));
9587 break;
9588 case 4: /* ldr */
9589 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9590 break;
9591 case 5: /* ldrh */
9592 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9593 break;
9594 case 6: /* ldrb */
9595 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9596 break;
9597 case 7: /* ldrsh */
9598 gen_aa32_ld16s(tmp, addr, IS_USER(s));
9599 break;
9600 }
9601 if (op >= 3) { /* load */
9602 store_reg(s, rd, tmp);
9603 } else {
9604 tcg_temp_free_i32(tmp);
9605 }
9606 tcg_temp_free_i32(addr);
9607 break;
9608
9609 case 6:
9610 /* load/store word immediate offset */
9611 rd = insn & 7;
9612 rn = (insn >> 3) & 7;
9613 addr = load_reg(s, rn);
9614 val = (insn >> 4) & 0x7c;
9615 tcg_gen_addi_i32(addr, addr, val);
9616
9617 if (insn & (1 << 11)) {
9618 /* load */
9619 tmp = tcg_temp_new_i32();
9620 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9621 store_reg(s, rd, tmp);
9622 } else {
9623 /* store */
9624 tmp = load_reg(s, rd);
9625 gen_aa32_st32(tmp, addr, IS_USER(s));
9626 tcg_temp_free_i32(tmp);
9627 }
9628 tcg_temp_free_i32(addr);
9629 break;
9630
9631 case 7:
9632 /* load/store byte immediate offset */
9633 rd = insn & 7;
9634 rn = (insn >> 3) & 7;
9635 addr = load_reg(s, rn);
9636 val = (insn >> 6) & 0x1f;
9637 tcg_gen_addi_i32(addr, addr, val);
9638
9639 if (insn & (1 << 11)) {
9640 /* load */
9641 tmp = tcg_temp_new_i32();
9642 gen_aa32_ld8u(tmp, addr, IS_USER(s));
9643 store_reg(s, rd, tmp);
9644 } else {
9645 /* store */
9646 tmp = load_reg(s, rd);
9647 gen_aa32_st8(tmp, addr, IS_USER(s));
9648 tcg_temp_free_i32(tmp);
9649 }
9650 tcg_temp_free_i32(addr);
9651 break;
9652
9653 case 8:
9654 /* load/store halfword immediate offset */
9655 rd = insn & 7;
9656 rn = (insn >> 3) & 7;
9657 addr = load_reg(s, rn);
9658 val = (insn >> 5) & 0x3e;
9659 tcg_gen_addi_i32(addr, addr, val);
9660
9661 if (insn & (1 << 11)) {
9662 /* load */
9663 tmp = tcg_temp_new_i32();
9664 gen_aa32_ld16u(tmp, addr, IS_USER(s));
9665 store_reg(s, rd, tmp);
9666 } else {
9667 /* store */
9668 tmp = load_reg(s, rd);
9669 gen_aa32_st16(tmp, addr, IS_USER(s));
9670 tcg_temp_free_i32(tmp);
9671 }
9672 tcg_temp_free_i32(addr);
9673 break;
9674
9675 case 9:
9676 /* load/store from stack */
9677 rd = (insn >> 8) & 7;
9678 addr = load_reg(s, 13);
9679 val = (insn & 0xff) * 4;
9680 tcg_gen_addi_i32(addr, addr, val);
9681
9682 if (insn & (1 << 11)) {
9683 /* load */
9684 tmp = tcg_temp_new_i32();
9685 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9686 store_reg(s, rd, tmp);
9687 } else {
9688 /* store */
9689 tmp = load_reg(s, rd);
9690 gen_aa32_st32(tmp, addr, IS_USER(s));
9691 tcg_temp_free_i32(tmp);
9692 }
9693 tcg_temp_free_i32(addr);
9694 break;
9695
9696 case 10:
9697 /* add to high reg */
9698 rd = (insn >> 8) & 7;
9699 if (insn & (1 << 11)) {
9700 /* SP */
9701 tmp = load_reg(s, 13);
9702 } else {
9703 /* PC. bit 1 is ignored. */
9704 tmp = tcg_temp_new_i32();
9705 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
9706 }
9707 val = (insn & 0xff) * 4;
9708 tcg_gen_addi_i32(tmp, tmp, val);
9709 store_reg(s, rd, tmp);
9710 break;
9711
9712 case 11:
9713 /* misc */
9714 op = (insn >> 8) & 0xf;
9715 switch (op) {
9716 case 0:
9717 /* adjust stack pointer */
9718 tmp = load_reg(s, 13);
9719 val = (insn & 0x7f) * 4;
9720 if (insn & (1 << 7))
9721 val = -(int32_t)val;
9722 tcg_gen_addi_i32(tmp, tmp, val);
9723 store_reg(s, 13, tmp);
9724 break;
9725
9726 case 2: /* sign/zero extend. */
9727 ARCH(6);
9728 rd = insn & 7;
9729 rm = (insn >> 3) & 7;
9730 tmp = load_reg(s, rm);
9731 switch ((insn >> 6) & 3) {
9732 case 0: gen_sxth(tmp); break;
9733 case 1: gen_sxtb(tmp); break;
9734 case 2: gen_uxth(tmp); break;
9735 case 3: gen_uxtb(tmp); break;
9736 }
9737 store_reg(s, rd, tmp);
9738 break;
9739 case 4: case 5: case 0xc: case 0xd:
9740 /* push/pop */
9741 addr = load_reg(s, 13);
9742 if (insn & (1 << 8))
9743 offset = 4;
9744 else
9745 offset = 0;
9746 for (i = 0; i < 8; i++) {
9747 if (insn & (1 << i))
9748 offset += 4;
9749 }
9750 if ((insn & (1 << 11)) == 0) {
9751 tcg_gen_addi_i32(addr, addr, -offset);
9752 }
9753 for (i = 0; i < 8; i++) {
9754 if (insn & (1 << i)) {
9755 if (insn & (1 << 11)) {
9756 /* pop */
9757 tmp = tcg_temp_new_i32();
9758 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9759 store_reg(s, i, tmp);
9760 } else {
9761 /* push */
9762 tmp = load_reg(s, i);
9763 gen_aa32_st32(tmp, addr, IS_USER(s));
9764 tcg_temp_free_i32(tmp);
9765 }
9766 /* advance to the next address. */
9767 tcg_gen_addi_i32(addr, addr, 4);
9768 }
9769 }
9770 TCGV_UNUSED_I32(tmp);
9771 if (insn & (1 << 8)) {
9772 if (insn & (1 << 11)) {
9773 /* pop pc */
9774 tmp = tcg_temp_new_i32();
9775 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9776 /* don't set the pc until the rest of the instruction
9777 has completed */
9778 } else {
9779 /* push lr */
9780 tmp = load_reg(s, 14);
9781 gen_aa32_st32(tmp, addr, IS_USER(s));
9782 tcg_temp_free_i32(tmp);
9783 }
9784 tcg_gen_addi_i32(addr, addr, 4);
9785 }
9786 if ((insn & (1 << 11)) == 0) {
9787 tcg_gen_addi_i32(addr, addr, -offset);
9788 }
9789 /* write back the new stack pointer */
9790 store_reg(s, 13, addr);
9791 /* set the new PC value */
9792 if ((insn & 0x0900) == 0x0900) {
9793 store_reg_from_load(env, s, 15, tmp);
9794 }
9795 break;
9796
9797 case 1: case 3: case 9: case 11: /* czb */
9798 rm = insn & 7;
9799 tmp = load_reg(s, rm);
9800 s->condlabel = gen_new_label();
9801 s->condjmp = 1;
9802 if (insn & (1 << 11))
9803 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9804 else
9805 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
9806 tcg_temp_free_i32(tmp);
9807 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
9808 val = (uint32_t)s->pc + 2;
9809 val += offset;
9810 gen_jmp(s, val);
9811 break;
9812
9813 case 15: /* IT, nop-hint. */
9814 if ((insn & 0xf) == 0) {
9815 gen_nop_hint(s, (insn >> 4) & 0xf);
9816 break;
9817 }
9818 /* If Then. */
9819 s->condexec_cond = (insn >> 4) & 0xe;
9820 s->condexec_mask = insn & 0x1f;
9821 /* No actual code generated for this insn, just setup state. */
9822 break;
9823
9824 case 0xe: /* bkpt */
9825 ARCH(5);
9826 gen_exception_insn(s, 2, EXCP_BKPT);
9827 break;
9828
9829 case 0xa: /* rev */
9830 ARCH(6);
9831 rn = (insn >> 3) & 0x7;
9832 rd = insn & 0x7;
9833 tmp = load_reg(s, rn);
9834 switch ((insn >> 6) & 3) {
9835 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
9836 case 1: gen_rev16(tmp); break;
9837 case 3: gen_revsh(tmp); break;
9838 default: goto illegal_op;
9839 }
9840 store_reg(s, rd, tmp);
9841 break;
9842
9843 case 6:
9844 switch ((insn >> 5) & 7) {
9845 case 2:
9846 /* setend */
9847 ARCH(6);
9848 if (((insn >> 3) & 1) != s->bswap_code) {
9849 /* Dynamic endianness switching not implemented. */
9850 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9851 goto illegal_op;
9852 }
9853 break;
9854 case 3:
9855 /* cps */
9856 ARCH(6);
9857 if (IS_USER(s)) {
9858 break;
9859 }
9860 if (IS_M(env)) {
9861 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9862 /* FAULTMASK */
9863 if (insn & 1) {
9864 addr = tcg_const_i32(19);
9865 gen_helper_v7m_msr(cpu_env, addr, tmp);
9866 tcg_temp_free_i32(addr);
9867 }
9868 /* PRIMASK */
9869 if (insn & 2) {
9870 addr = tcg_const_i32(16);
9871 gen_helper_v7m_msr(cpu_env, addr, tmp);
9872 tcg_temp_free_i32(addr);
9873 }
9874 tcg_temp_free_i32(tmp);
9875 gen_lookup_tb(s);
9876 } else {
9877 if (insn & (1 << 4)) {
9878 shift = CPSR_A | CPSR_I | CPSR_F;
9879 } else {
9880 shift = 0;
9881 }
9882 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
9883 }
9884 break;
9885 default:
9886 goto undef;
9887 }
9888 break;
9889
9890 default:
9891 goto undef;
9892 }
9893 break;
9894
9895 case 12:
9896 {
9897 /* load/store multiple */
9898 TCGv_i32 loaded_var;
9899 TCGV_UNUSED_I32(loaded_var);
9900 rn = (insn >> 8) & 0x7;
9901 addr = load_reg(s, rn);
9902 for (i = 0; i < 8; i++) {
9903 if (insn & (1 << i)) {
9904 if (insn & (1 << 11)) {
9905 /* load */
9906 tmp = tcg_temp_new_i32();
9907 gen_aa32_ld32u(tmp, addr, IS_USER(s));
9908 if (i == rn) {
9909 loaded_var = tmp;
9910 } else {
9911 store_reg(s, i, tmp);
9912 }
9913 } else {
9914 /* store */
9915 tmp = load_reg(s, i);
9916 gen_aa32_st32(tmp, addr, IS_USER(s));
9917 tcg_temp_free_i32(tmp);
9918 }
9919 /* advance to the next address */
9920 tcg_gen_addi_i32(addr, addr, 4);
9921 }
9922 }
9923 if ((insn & (1 << rn)) == 0) {
9924 /* base reg not in list: base register writeback */
9925 store_reg(s, rn, addr);
9926 } else {
9927 /* base reg in list: if load, complete it now */
9928 if (insn & (1 << 11)) {
9929 store_reg(s, rn, loaded_var);
9930 }
9931 tcg_temp_free_i32(addr);
9932 }
9933 break;
9934 }
9935 case 13:
9936 /* conditional branch or swi */
9937 cond = (insn >> 8) & 0xf;
9938 if (cond == 0xe)
9939 goto undef;
9940
9941 if (cond == 0xf) {
9942 /* swi */
9943 gen_set_pc_im(s, s->pc);
9944 s->is_jmp = DISAS_SWI;
9945 break;
9946 }
9947 /* generate a conditional jump to next instruction */
9948 s->condlabel = gen_new_label();
9949 gen_test_cc(cond ^ 1, s->condlabel);
9950 s->condjmp = 1;
9951
9952 /* jump to the offset */
9953 val = (uint32_t)s->pc + 2;
9954 offset = ((int32_t)insn << 24) >> 24;
9955 val += offset << 1;
9956 gen_jmp(s, val);
9957 break;
9958
9959 case 14:
9960 if (insn & (1 << 11)) {
9961 if (disas_thumb2_insn(env, s, insn))
9962 goto undef32;
9963 break;
9964 }
9965 /* unconditional branch */
9966 val = (uint32_t)s->pc;
9967 offset = ((int32_t)insn << 21) >> 21;
9968 val += (offset << 1) + 2;
9969 gen_jmp(s, val);
9970 break;
9971
9972 case 15:
9973 if (disas_thumb2_insn(env, s, insn))
9974 goto undef32;
9975 break;
9976 }
9977 return;
9978 undef32:
9979 gen_exception_insn(s, 4, EXCP_UDEF);
9980 return;
9981 illegal_op:
9982 undef:
9983 gen_exception_insn(s, 2, EXCP_UDEF);
9984 }
9985
9986 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9987 basic block 'tb'. If search_pc is TRUE, also generate PC
9988 information for each intermediate instruction. */
9989 static inline void gen_intermediate_code_internal(ARMCPU *cpu,
9990 TranslationBlock *tb,
9991 bool search_pc)
9992 {
9993 CPUState *cs = CPU(cpu);
9994 CPUARMState *env = &cpu->env;
9995 DisasContext dc1, *dc = &dc1;
9996 CPUBreakpoint *bp;
9997 uint16_t *gen_opc_end;
9998 int j, lj;
9999 target_ulong pc_start;
10000 target_ulong next_page_start;
10001 int num_insns;
10002 int max_insns;
10003
10004 /* generate intermediate code */
10005 pc_start = tb->pc;
10006
10007 dc->tb = tb;
10008
10009 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
10010
10011 dc->is_jmp = DISAS_NEXT;
10012 dc->pc = pc_start;
10013 dc->singlestep_enabled = cs->singlestep_enabled;
10014 dc->condjmp = 0;
10015 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
10016 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
10017 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
10018 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
10019 #if !defined(CONFIG_USER_ONLY)
10020 dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
10021 #endif
10022 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
10023 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
10024 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
10025 cpu_F0s = tcg_temp_new_i32();
10026 cpu_F1s = tcg_temp_new_i32();
10027 cpu_F0d = tcg_temp_new_i64();
10028 cpu_F1d = tcg_temp_new_i64();
10029 cpu_V0 = cpu_F0d;
10030 cpu_V1 = cpu_F1d;
10031 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
10032 cpu_M0 = tcg_temp_new_i64();
10033 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
10034 lj = -1;
10035 num_insns = 0;
10036 max_insns = tb->cflags & CF_COUNT_MASK;
10037 if (max_insns == 0)
10038 max_insns = CF_COUNT_MASK;
10039
10040 gen_tb_start();
10041
10042 tcg_clear_temp_count();
10043
10044 /* A note on handling of the condexec (IT) bits:
10045 *
10046 * We want to avoid the overhead of having to write the updated condexec
10047 * bits back to the CPUARMState for every instruction in an IT block. So:
10048 * (1) if the condexec bits are not already zero then we write
10049 * zero back into the CPUARMState now. This avoids complications trying
10050 * to do it at the end of the block. (For example if we don't do this
10051 * it's hard to identify whether we can safely skip writing condexec
10052 * at the end of the TB, which we definitely want to do for the case
10053 * where a TB doesn't do anything with the IT state at all.)
10054 * (2) if we are going to leave the TB then we call gen_set_condexec()
10055 * which will write the correct value into CPUARMState if zero is wrong.
10056 * This is done both for leaving the TB at the end, and for leaving
10057 * it because of an exception we know will happen, which is done in
10058 * gen_exception_insn(). The latter is necessary because we need to
10059 * leave the TB with the PC/IT state just prior to execution of the
10060 * instruction which caused the exception.
10061 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
10062 * then the CPUARMState will be wrong and we need to reset it.
10063 * This is handled in the same way as restoration of the
10064 * PC in these situations: we will be called again with search_pc=1
10065 * and generate a mapping of the condexec bits for each PC in
10066 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
10067 * this to restore the condexec bits.
10068 *
10069 * Note that there are no instructions which can read the condexec
10070 * bits, and none which can write non-static values to them, so
10071 * we don't need to care about whether CPUARMState is correct in the
10072 * middle of a TB.
10073 */
10074
10075 /* Reset the conditional execution bits immediately. This avoids
10076 complications trying to do it at the end of the block. */
10077 if (dc->condexec_mask || dc->condexec_cond)
10078 {
10079 TCGv_i32 tmp = tcg_temp_new_i32();
10080 tcg_gen_movi_i32(tmp, 0);
10081 store_cpu_field(tmp, condexec_bits);
10082 }
10083 do {
10084 #ifdef CONFIG_USER_ONLY
10085 /* Intercept jump to the magic kernel page. */
10086 if (dc->pc >= 0xffff0000) {
10087 /* We always get here via a jump, so know we are not in a
10088 conditional execution block. */
10089 gen_exception(EXCP_KERNEL_TRAP);
10090 dc->is_jmp = DISAS_UPDATE;
10091 break;
10092 }
10093 #else
10094 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
10095 /* We always get here via a jump, so know we are not in a
10096 conditional execution block. */
10097 gen_exception(EXCP_EXCEPTION_EXIT);
10098 dc->is_jmp = DISAS_UPDATE;
10099 break;
10100 }
10101 #endif
10102
10103 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
10104 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
10105 if (bp->pc == dc->pc) {
10106 gen_exception_insn(dc, 0, EXCP_DEBUG);
10107 /* Advance PC so that clearing the breakpoint will
10108 invalidate this TB. */
10109 dc->pc += 2;
10110 goto done_generating;
10111 }
10112 }
10113 }
10114 if (search_pc) {
10115 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
10116 if (lj < j) {
10117 lj++;
10118 while (lj < j)
10119 tcg_ctx.gen_opc_instr_start[lj++] = 0;
10120 }
10121 tcg_ctx.gen_opc_pc[lj] = dc->pc;
10122 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
10123 tcg_ctx.gen_opc_instr_start[lj] = 1;
10124 tcg_ctx.gen_opc_icount[lj] = num_insns;
10125 }
10126
10127 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
10128 gen_io_start();
10129
10130 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
10131 tcg_gen_debug_insn_start(dc->pc);
10132 }
10133
10134 if (dc->thumb) {
10135 disas_thumb_insn(env, dc);
10136 if (dc->condexec_mask) {
10137 dc->condexec_cond = (dc->condexec_cond & 0xe)
10138 | ((dc->condexec_mask >> 4) & 1);
10139 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
10140 if (dc->condexec_mask == 0) {
10141 dc->condexec_cond = 0;
10142 }
10143 }
10144 } else {
10145 disas_arm_insn(env, dc);
10146 }
10147
10148 if (dc->condjmp && !dc->is_jmp) {
10149 gen_set_label(dc->condlabel);
10150 dc->condjmp = 0;
10151 }
10152
10153 if (tcg_check_temp_count()) {
10154 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
10155 dc->pc);
10156 }
10157
10158 /* Translation stops when a conditional branch is encountered.
10159 * Otherwise the subsequent code could get translated several times.
10160 * Also stop translation when a page boundary is reached. This
10161 * ensures prefetch aborts occur at the right place. */
10162 num_insns ++;
10163 } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
10164 !cs->singlestep_enabled &&
10165 !singlestep &&
10166 dc->pc < next_page_start &&
10167 num_insns < max_insns);
10168
10169 if (tb->cflags & CF_LAST_IO) {
10170 if (dc->condjmp) {
10171 /* FIXME: This can theoretically happen with self-modifying
10172 code. */
10173 cpu_abort(env, "IO on conditional branch instruction");
10174 }
10175 gen_io_end();
10176 }
10177
10178 /* At this stage dc->condjmp will only be set when the skipped
10179 instruction was a conditional branch or trap, and the PC has
10180 already been written. */
10181 if (unlikely(cs->singlestep_enabled)) {
10182 /* Make sure the pc is updated, and raise a debug exception. */
10183 if (dc->condjmp) {
10184 gen_set_condexec(dc);
10185 if (dc->is_jmp == DISAS_SWI) {
10186 gen_exception(EXCP_SWI);
10187 } else {
10188 gen_exception(EXCP_DEBUG);
10189 }
10190 gen_set_label(dc->condlabel);
10191 }
10192 if (dc->condjmp || !dc->is_jmp) {
10193 gen_set_pc_im(dc, dc->pc);
10194 dc->condjmp = 0;
10195 }
10196 gen_set_condexec(dc);
10197 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
10198 gen_exception(EXCP_SWI);
10199 } else {
10200 /* FIXME: Single stepping a WFI insn will not halt
10201 the CPU. */
10202 gen_exception(EXCP_DEBUG);
10203 }
10204 } else {
10205 /* While branches must always occur at the end of an IT block,
10206 there are a few other things that can cause us to terminate
10207 the TB in the middle of an IT block:
10208 - Exception generating instructions (bkpt, swi, undefined).
10209 - Page boundaries.
10210 - Hardware watchpoints.
10211 Hardware breakpoints have already been handled and skip this code.
10212 */
10213 gen_set_condexec(dc);
10214 switch(dc->is_jmp) {
10215 case DISAS_NEXT:
10216 gen_goto_tb(dc, 1, dc->pc);
10217 break;
10218 default:
10219 case DISAS_JUMP:
10220 case DISAS_UPDATE:
10221 /* indicate that the hash table must be used to find the next TB */
10222 tcg_gen_exit_tb(0);
10223 break;
10224 case DISAS_TB_JUMP:
10225 /* nothing more to generate */
10226 break;
10227 case DISAS_WFI:
10228 gen_helper_wfi(cpu_env);
10229 break;
10230 case DISAS_SWI:
10231 gen_exception(EXCP_SWI);
10232 break;
10233 }
10234 if (dc->condjmp) {
10235 gen_set_label(dc->condlabel);
10236 gen_set_condexec(dc);
10237 gen_goto_tb(dc, 1, dc->pc);
10238 dc->condjmp = 0;
10239 }
10240 }
10241
10242 done_generating:
10243 gen_tb_end(tb, num_insns);
10244 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
10245
10246 #ifdef DEBUG_DISAS
10247 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
10248 qemu_log("----------------\n");
10249 qemu_log("IN: %s\n", lookup_symbol(pc_start));
10250 log_target_disas(env, pc_start, dc->pc - pc_start,
10251 dc->thumb | (dc->bswap_code << 1));
10252 qemu_log("\n");
10253 }
10254 #endif
10255 if (search_pc) {
10256 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
10257 lj++;
10258 while (lj <= j)
10259 tcg_ctx.gen_opc_instr_start[lj++] = 0;
10260 } else {
10261 tb->size = dc->pc - pc_start;
10262 tb->icount = num_insns;
10263 }
10264 }
10265
10266 void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
10267 {
10268 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
10269 }
10270
10271 void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
10272 {
10273 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
10274 }
10275
10276 static const char *cpu_mode_names[16] = {
10277 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
10278 "???", "???", "???", "und", "???", "???", "???", "sys"
10279 };
10280
10281 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
10282 int flags)
10283 {
10284 ARMCPU *cpu = ARM_CPU(cs);
10285 CPUARMState *env = &cpu->env;
10286 int i;
10287 uint32_t psr;
10288
10289 for(i=0;i<16;i++) {
10290 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
10291 if ((i % 4) == 3)
10292 cpu_fprintf(f, "\n");
10293 else
10294 cpu_fprintf(f, " ");
10295 }
10296 psr = cpsr_read(env);
10297 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
10298 psr,
10299 psr & (1 << 31) ? 'N' : '-',
10300 psr & (1 << 30) ? 'Z' : '-',
10301 psr & (1 << 29) ? 'C' : '-',
10302 psr & (1 << 28) ? 'V' : '-',
10303 psr & CPSR_T ? 'T' : 'A',
10304 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
10305
10306 if (flags & CPU_DUMP_FPU) {
10307 int numvfpregs = 0;
10308 if (arm_feature(env, ARM_FEATURE_VFP)) {
10309 numvfpregs += 16;
10310 }
10311 if (arm_feature(env, ARM_FEATURE_VFP3)) {
10312 numvfpregs += 16;
10313 }
10314 for (i = 0; i < numvfpregs; i++) {
10315 uint64_t v = float64_val(env->vfp.regs[i]);
10316 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
10317 i * 2, (uint32_t)v,
10318 i * 2 + 1, (uint32_t)(v >> 32),
10319 i, v);
10320 }
10321 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
10322 }
10323 }
10324
10325 void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
10326 {
10327 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
10328 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
10329 }