]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
exploiting the new interface in vnc.c (Stefano Stabellini)
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
fad6cb1a 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
2c0262af
FB
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
79383c9c 32#include "qemu-log.h"
1497c961 33
a7812ae4 34#include "helpers.h"
1497c961 35#define GEN_HELPER 1
b26eefb6 36#include "helpers.h"
2c0262af 37
9ee6e8bb
PB
38#define ENABLE_ARCH_5J 0
39#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 43
86753403 44#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 45
2c0262af
FB
46/* internal defines */
47typedef struct DisasContext {
0fa85d43 48 target_ulong pc;
2c0262af 49 int is_jmp;
e50e6a20
FB
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
9ee6e8bb
PB
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
2c0262af 57 struct TranslationBlock *tb;
8aaca4c0 58 int singlestep_enabled;
5899f386 59 int thumb;
b5ff1b31
FB
60#if !defined(CONFIG_USER_ONLY)
61 int user;
62#endif
2c0262af
FB
63} DisasContext;
64
b5ff1b31
FB
65#if defined(CONFIG_USER_ONLY)
66#define IS_USER(s) 1
67#else
68#define IS_USER(s) (s->user)
69#endif
70
9ee6e8bb
PB
71/* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73#define DISAS_WFI 4
74#define DISAS_SWI 5
2c0262af 75
a7812ae4 76static TCGv_ptr cpu_env;
ad69471c 77/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 78static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
b26eefb6
PB
88/* initialize TCG globals. */
89void arm_translate_init(void)
90{
a7812ae4
PB
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92
93 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
94 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 95
a7812ae4
PB
96#define GEN_HELPER 2
97#include "helpers.h"
b26eefb6
PB
98}
99
100/* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
102#define MAX_TEMPS 8
103static int num_temps;
104static TCGv temps[MAX_TEMPS];
105
106/* Allocate a temporary variable. */
a7812ae4 107static TCGv_i32 new_tmp(void)
b26eefb6
PB
108{
109 TCGv tmp;
110 if (num_temps == MAX_TEMPS)
111 abort();
112
a7812ae4 113 if (GET_TCGV_I32(temps[num_temps]))
b26eefb6
PB
114 return temps[num_temps++];
115
a7812ae4 116 tmp = tcg_temp_new_i32();
b26eefb6
PB
117 temps[num_temps++] = tmp;
118 return tmp;
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
124 int i;
125 num_temps--;
126 i = num_temps;
a7812ae4 127 if (TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
128 return;
129
130 /* Shuffle this temp to the last slot. */
a7812ae4 131 while (!TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
132 i--;
133 while (i < num_temps) {
134 temps[i] = temps[i + 1];
135 i++;
136 }
137 temps[i] = tmp;
138}
139
d9ba4830
PB
140static inline TCGv load_cpu_offset(int offset)
141{
142 TCGv tmp = new_tmp();
143 tcg_gen_ld_i32(tmp, cpu_env, offset);
144 return tmp;
145}
146
147#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
148
149static inline void store_cpu_offset(TCGv var, int offset)
150{
151 tcg_gen_st_i32(var, cpu_env, offset);
152 dead_tmp(var);
153}
154
155#define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
157
b26eefb6
PB
158/* Set a variable to the value of a CPU register. */
159static void load_reg_var(DisasContext *s, TCGv var, int reg)
160{
161 if (reg == 15) {
162 uint32_t addr;
163 /* normaly, since we updated PC, we need only to add one insn */
164 if (s->thumb)
165 addr = (long)s->pc + 2;
166 else
167 addr = (long)s->pc + 4;
168 tcg_gen_movi_i32(var, addr);
169 } else {
170 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
171 }
172}
173
174/* Create a new temporary and set it to the value of a CPU register. */
175static inline TCGv load_reg(DisasContext *s, int reg)
176{
177 TCGv tmp = new_tmp();
178 load_reg_var(s, tmp, reg);
179 return tmp;
180}
181
182/* Set a CPU register. The source must be a temporary and will be
183 marked as dead. */
184static void store_reg(DisasContext *s, int reg, TCGv var)
185{
186 if (reg == 15) {
187 tcg_gen_andi_i32(var, var, ~1);
188 s->is_jmp = DISAS_JUMP;
189 }
190 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
191 dead_tmp(var);
192}
193
194
195/* Basic operations. */
196#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
197#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
199
200#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
204
8984bd2e
PB
205#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
211
b26eefb6
PB
212#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
219
b26eefb6
PB
220#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
221#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
222
223/* Value extensions. */
86831435
PB
224#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
226#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228
1497c961
PB
229#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
231
232#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 233
d9ba4830
PB
234#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235/* Set NZCV flags from the high 4 bits of var. */
236#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237
238static void gen_exception(int excp)
239{
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
244}
245
3670669c
PB
246static void gen_smul_dual(TCGv a, TCGv b)
247{
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
22478e79
AZ
250 tcg_gen_ext16s_i32(tmp1, a);
251 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
259}
260
261/* Byteswap each halfword. */
262static void gen_rev16(TCGv var)
263{
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
271}
272
273/* Byteswap low halfword and sign extend. */
274static void gen_revsh(TCGv var)
275{
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
283}
284
285/* Unsigned bitfield extract. */
286static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287{
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
291}
292
293/* Signed bitfield extract. */
294static void gen_sbfx(TCGv var, int shift, int width)
295{
296 uint32_t signbit;
297
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
305 }
306}
307
308/* Bitfield insertion. Insert val into base. Clobbers base and val. */
309static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310{
3670669c 311 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
314 tcg_gen_or_i32(dest, base, val);
315}
316
d9ba4830
PB
317/* Round the top 32 bits of a 64-bit value. */
318static void gen_roundqd(TCGv a, TCGv b)
3670669c 319{
d9ba4830
PB
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
3670669c
PB
322}
323
8f01245e
PB
324/* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
5e3f878a 326/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 327static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338}
339
a7812ae4 340static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 341{
a7812ae4
PB
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
351}
352
8f01245e
PB
353/* Unsigned 32x32->64 multiply. */
354static void gen_op_mull_T0_T1(void)
355{
a7812ae4
PB
356 TCGv_i64 tmp1 = tcg_temp_new_i64();
357 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e
PB
358
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365}
366
367/* Signed 32x32->64 multiply. */
d9ba4830 368static void gen_imull(TCGv a, TCGv b)
8f01245e 369{
a7812ae4
PB
370 TCGv_i64 tmp1 = tcg_temp_new_i64();
371 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 372
d9ba4830
PB
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 376 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 377 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
378 tcg_gen_trunc_i64_i32(b, tmp1);
379}
d9ba4830 380
8f01245e
PB
381/* Swap low and high halfwords. */
382static void gen_swap_half(TCGv var)
383{
384 TCGv tmp = new_tmp();
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
3670669c 388 dead_tmp(tmp);
8f01245e
PB
389}
390
b26eefb6
PB
391/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
396 */
397
398static void gen_add16(TCGv t0, TCGv t1)
399{
400 TCGv tmp = new_tmp();
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
407 dead_tmp(tmp);
408 dead_tmp(t1);
409}
410
9a119ff6
PB
411#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
412
b26eefb6
PB
413/* Set CF to the top bit of var. */
414static void gen_set_CF_bit31(TCGv var)
415{
416 TCGv tmp = new_tmp();
417 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 418 gen_set_CF(tmp);
b26eefb6
PB
419 dead_tmp(tmp);
420}
421
422/* Set N and Z flags from var. */
423static inline void gen_logic_CC(TCGv var)
424{
6fbe23d5
PB
425 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
427}
428
429/* T0 += T1 + CF. */
430static void gen_adc_T0_T1(void)
431{
d9ba4830 432 TCGv tmp;
b26eefb6 433 gen_op_addl_T0_T1();
d9ba4830 434 tmp = load_cpu_field(CF);
b26eefb6
PB
435 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
436 dead_tmp(tmp);
437}
438
3670669c
PB
439/* dest = T0 - T1 + CF - 1. */
440static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
441{
d9ba4830 442 TCGv tmp;
3670669c 443 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 444 tmp = load_cpu_field(CF);
3670669c
PB
445 tcg_gen_add_i32(dest, dest, tmp);
446 tcg_gen_subi_i32(dest, dest, 1);
447 dead_tmp(tmp);
448}
449
450#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
451#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
452
b26eefb6
PB
453/* T0 &= ~T1. Clobbers T1. */
454/* FIXME: Implement bic natively. */
8f8e3aa4
PB
455static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
456{
457 TCGv tmp = new_tmp();
458 tcg_gen_not_i32(tmp, t1);
459 tcg_gen_and_i32(dest, t0, tmp);
460 dead_tmp(tmp);
461}
b26eefb6
PB
462static inline void gen_op_bicl_T0_T1(void)
463{
464 gen_op_notl_T1();
465 gen_op_andl_T0_T1();
466}
467
ad69471c
PB
468/* FIXME: Implement this natively. */
469#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
470
b26eefb6
PB
471/* FIXME: Implement this natively. */
472static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
473{
474 TCGv tmp;
475
476 if (i == 0)
477 return;
478
479 tmp = new_tmp();
480 tcg_gen_shri_i32(tmp, t1, i);
481 tcg_gen_shli_i32(t1, t1, 32 - i);
482 tcg_gen_or_i32(t0, t1, tmp);
483 dead_tmp(tmp);
484}
485
9a119ff6 486static void shifter_out_im(TCGv var, int shift)
b26eefb6 487{
9a119ff6
PB
488 TCGv tmp = new_tmp();
489 if (shift == 0) {
490 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 491 } else {
9a119ff6 492 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 493 if (shift != 31)
9a119ff6
PB
494 tcg_gen_andi_i32(tmp, tmp, 1);
495 }
496 gen_set_CF(tmp);
497 dead_tmp(tmp);
498}
b26eefb6 499
9a119ff6
PB
500/* Shift by immediate. Includes special handling for shift == 0. */
501static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
502{
503 switch (shiftop) {
504 case 0: /* LSL */
505 if (shift != 0) {
506 if (flags)
507 shifter_out_im(var, 32 - shift);
508 tcg_gen_shli_i32(var, var, shift);
509 }
510 break;
511 case 1: /* LSR */
512 if (shift == 0) {
513 if (flags) {
514 tcg_gen_shri_i32(var, var, 31);
515 gen_set_CF(var);
516 }
517 tcg_gen_movi_i32(var, 0);
518 } else {
519 if (flags)
520 shifter_out_im(var, shift - 1);
521 tcg_gen_shri_i32(var, var, shift);
522 }
523 break;
524 case 2: /* ASR */
525 if (shift == 0)
526 shift = 32;
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 if (shift == 32)
530 shift = 31;
531 tcg_gen_sari_i32(var, var, shift);
532 break;
533 case 3: /* ROR/RRX */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, shift - 1);
537 tcg_gen_rori_i32(var, var, shift); break;
538 } else {
d9ba4830 539 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
540 if (flags)
541 shifter_out_im(var, 0);
542 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
543 tcg_gen_shli_i32(tmp, tmp, 31);
544 tcg_gen_or_i32(var, var, tmp);
545 dead_tmp(tmp);
b26eefb6
PB
546 }
547 }
548};
549
8984bd2e
PB
550static inline void gen_arm_shift_reg(TCGv var, int shiftop,
551 TCGv shift, int flags)
552{
553 if (flags) {
554 switch (shiftop) {
555 case 0: gen_helper_shl_cc(var, var, shift); break;
556 case 1: gen_helper_shr_cc(var, var, shift); break;
557 case 2: gen_helper_sar_cc(var, var, shift); break;
558 case 3: gen_helper_ror_cc(var, var, shift); break;
559 }
560 } else {
561 switch (shiftop) {
562 case 0: gen_helper_shl(var, var, shift); break;
563 case 1: gen_helper_shr(var, var, shift); break;
564 case 2: gen_helper_sar(var, var, shift); break;
565 case 3: gen_helper_ror(var, var, shift); break;
566 }
567 }
568 dead_tmp(shift);
569}
570
6ddbc6e4
PB
571#define PAS_OP(pfx) \
572 switch (op2) { \
573 case 0: gen_pas_helper(glue(pfx,add16)); break; \
574 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
575 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
576 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
577 case 4: gen_pas_helper(glue(pfx,add8)); break; \
578 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
579 }
d9ba4830 580static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 581{
a7812ae4 582 TCGv_ptr tmp;
6ddbc6e4
PB
583
584 switch (op1) {
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
586 case 1:
a7812ae4 587 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
588 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
589 PAS_OP(s)
590 break;
591 case 5:
a7812ae4 592 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
594 PAS_OP(u)
595 break;
596#undef gen_pas_helper
597#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 2:
599 PAS_OP(q);
600 break;
601 case 3:
602 PAS_OP(sh);
603 break;
604 case 6:
605 PAS_OP(uq);
606 break;
607 case 7:
608 PAS_OP(uh);
609 break;
610#undef gen_pas_helper
611 }
612}
9ee6e8bb
PB
613#undef PAS_OP
614
6ddbc6e4
PB
615/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
616#define PAS_OP(pfx) \
617 switch (op2) { \
618 case 0: gen_pas_helper(glue(pfx,add8)); break; \
619 case 1: gen_pas_helper(glue(pfx,add16)); break; \
620 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
621 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
622 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
623 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
624 }
d9ba4830 625static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 626{
a7812ae4 627 TCGv_ptr tmp;
6ddbc6e4
PB
628
629 switch (op1) {
630#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
631 case 0:
a7812ae4 632 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
633 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
634 PAS_OP(s)
635 break;
636 case 4:
a7812ae4 637 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
638 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
639 PAS_OP(u)
640 break;
641#undef gen_pas_helper
642#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
643 case 1:
644 PAS_OP(q);
645 break;
646 case 2:
647 PAS_OP(sh);
648 break;
649 case 5:
650 PAS_OP(uq);
651 break;
652 case 6:
653 PAS_OP(uh);
654 break;
655#undef gen_pas_helper
656 }
657}
9ee6e8bb
PB
658#undef PAS_OP
659
d9ba4830
PB
660static void gen_test_cc(int cc, int label)
661{
662 TCGv tmp;
663 TCGv tmp2;
d9ba4830
PB
664 int inv;
665
d9ba4830
PB
666 switch (cc) {
667 case 0: /* eq: Z */
6fbe23d5 668 tmp = load_cpu_field(ZF);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 1: /* ne: !Z */
6fbe23d5 672 tmp = load_cpu_field(ZF);
cb63669a 673 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
674 break;
675 case 2: /* cs: C */
676 tmp = load_cpu_field(CF);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
678 break;
679 case 3: /* cc: !C */
680 tmp = load_cpu_field(CF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
682 break;
683 case 4: /* mi: N */
6fbe23d5 684 tmp = load_cpu_field(NF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
686 break;
687 case 5: /* pl: !N */
6fbe23d5 688 tmp = load_cpu_field(NF);
cb63669a 689 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
690 break;
691 case 6: /* vs: V */
692 tmp = load_cpu_field(VF);
cb63669a 693 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
694 break;
695 case 7: /* vc: !V */
696 tmp = load_cpu_field(VF);
cb63669a 697 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
698 break;
699 case 8: /* hi: C && !Z */
700 inv = gen_new_label();
701 tmp = load_cpu_field(CF);
cb63669a 702 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 703 dead_tmp(tmp);
6fbe23d5 704 tmp = load_cpu_field(ZF);
cb63669a 705 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
706 gen_set_label(inv);
707 break;
708 case 9: /* ls: !C || Z */
709 tmp = load_cpu_field(CF);
cb63669a 710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 711 dead_tmp(tmp);
6fbe23d5 712 tmp = load_cpu_field(ZF);
cb63669a 713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
714 break;
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp = load_cpu_field(VF);
6fbe23d5 717 tmp2 = load_cpu_field(NF);
d9ba4830
PB
718 tcg_gen_xor_i32(tmp, tmp, tmp2);
719 dead_tmp(tmp2);
cb63669a 720 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
721 break;
722 case 11: /* lt: N != V -> N ^ V != 0 */
723 tmp = load_cpu_field(VF);
6fbe23d5 724 tmp2 = load_cpu_field(NF);
d9ba4830
PB
725 tcg_gen_xor_i32(tmp, tmp, tmp2);
726 dead_tmp(tmp2);
cb63669a 727 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
728 break;
729 case 12: /* gt: !Z && N == V */
730 inv = gen_new_label();
6fbe23d5 731 tmp = load_cpu_field(ZF);
cb63669a 732 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
733 dead_tmp(tmp);
734 tmp = load_cpu_field(VF);
6fbe23d5 735 tmp2 = load_cpu_field(NF);
d9ba4830
PB
736 tcg_gen_xor_i32(tmp, tmp, tmp2);
737 dead_tmp(tmp2);
cb63669a 738 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
739 gen_set_label(inv);
740 break;
741 case 13: /* le: Z || N != V */
6fbe23d5 742 tmp = load_cpu_field(ZF);
cb63669a 743 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
744 dead_tmp(tmp);
745 tmp = load_cpu_field(VF);
6fbe23d5 746 tmp2 = load_cpu_field(NF);
d9ba4830
PB
747 tcg_gen_xor_i32(tmp, tmp, tmp2);
748 dead_tmp(tmp2);
cb63669a 749 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
750 break;
751 default:
752 fprintf(stderr, "Bad condition code 0x%x\n", cc);
753 abort();
754 }
755 dead_tmp(tmp);
756}
2c0262af 757
b1d8e52e 758static const uint8_t table_logic_cc[16] = {
2c0262af
FB
759 1, /* and */
760 1, /* xor */
761 0, /* sub */
762 0, /* rsb */
763 0, /* add */
764 0, /* adc */
765 0, /* sbc */
766 0, /* rsc */
767 1, /* andl */
768 1, /* xorl */
769 0, /* cmp */
770 0, /* cmn */
771 1, /* orr */
772 1, /* mov */
773 1, /* bic */
774 1, /* mvn */
775};
3b46e624 776
d9ba4830
PB
777/* Set PC and Thumb state from an immediate address. */
778static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 779{
b26eefb6 780 TCGv tmp;
99c475ab 781
b26eefb6
PB
782 s->is_jmp = DISAS_UPDATE;
783 tmp = new_tmp();
d9ba4830
PB
784 if (s->thumb != (addr & 1)) {
785 tcg_gen_movi_i32(tmp, addr & 1);
786 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
787 }
788 tcg_gen_movi_i32(tmp, addr & ~1);
789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 790 dead_tmp(tmp);
d9ba4830
PB
791}
792
793/* Set PC and Thumb state from var. var is marked as dead. */
794static inline void gen_bx(DisasContext *s, TCGv var)
795{
796 TCGv tmp;
797
798 s->is_jmp = DISAS_UPDATE;
799 tmp = new_tmp();
800 tcg_gen_andi_i32(tmp, var, 1);
801 store_cpu_field(tmp, thumb);
802 tcg_gen_andi_i32(var, var, ~1);
803 store_cpu_field(var, regs[15]);
804}
805
806/* TODO: This should be removed. Use gen_bx instead. */
807static inline void gen_bx_T0(DisasContext *s)
808{
809 TCGv tmp = new_tmp();
810 tcg_gen_mov_i32(tmp, cpu_T[0]);
811 gen_bx(s, tmp);
b26eefb6 812}
b5ff1b31 813
b0109805
PB
814static inline TCGv gen_ld8s(TCGv addr, int index)
815{
816 TCGv tmp = new_tmp();
817 tcg_gen_qemu_ld8s(tmp, addr, index);
818 return tmp;
819}
820static inline TCGv gen_ld8u(TCGv addr, int index)
821{
822 TCGv tmp = new_tmp();
823 tcg_gen_qemu_ld8u(tmp, addr, index);
824 return tmp;
825}
826static inline TCGv gen_ld16s(TCGv addr, int index)
827{
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld16s(tmp, addr, index);
830 return tmp;
831}
832static inline TCGv gen_ld16u(TCGv addr, int index)
833{
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld16u(tmp, addr, index);
836 return tmp;
837}
838static inline TCGv gen_ld32(TCGv addr, int index)
839{
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld32u(tmp, addr, index);
842 return tmp;
843}
844static inline void gen_st8(TCGv val, TCGv addr, int index)
845{
846 tcg_gen_qemu_st8(val, addr, index);
847 dead_tmp(val);
848}
849static inline void gen_st16(TCGv val, TCGv addr, int index)
850{
851 tcg_gen_qemu_st16(val, addr, index);
852 dead_tmp(val);
853}
854static inline void gen_st32(TCGv val, TCGv addr, int index)
855{
856 tcg_gen_qemu_st32(val, addr, index);
857 dead_tmp(val);
858}
b5ff1b31 859
2c0262af
FB
860static inline void gen_movl_T0_reg(DisasContext *s, int reg)
861{
b26eefb6 862 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
863}
864
865static inline void gen_movl_T1_reg(DisasContext *s, int reg)
866{
b26eefb6 867 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
868}
869
870static inline void gen_movl_T2_reg(DisasContext *s, int reg)
871{
b26eefb6
PB
872 load_reg_var(s, cpu_T[2], reg);
873}
874
5e3f878a
PB
875static inline void gen_set_pc_im(uint32_t val)
876{
877 TCGv tmp = new_tmp();
878 tcg_gen_movi_i32(tmp, val);
879 store_cpu_field(tmp, regs[15]);
880}
881
2c0262af
FB
882static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
883{
b26eefb6
PB
884 TCGv tmp;
885 if (reg == 15) {
886 tmp = new_tmp();
887 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
888 } else {
889 tmp = cpu_T[t];
890 }
891 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 892 if (reg == 15) {
b26eefb6 893 dead_tmp(tmp);
2c0262af
FB
894 s->is_jmp = DISAS_JUMP;
895 }
896}
897
898static inline void gen_movl_reg_T0(DisasContext *s, int reg)
899{
900 gen_movl_reg_TN(s, reg, 0);
901}
902
903static inline void gen_movl_reg_T1(DisasContext *s, int reg)
904{
905 gen_movl_reg_TN(s, reg, 1);
906}
907
b5ff1b31
FB
908/* Force a TB lookup after an instruction that changes the CPU state. */
909static inline void gen_lookup_tb(DisasContext *s)
910{
911 gen_op_movl_T0_im(s->pc);
912 gen_movl_reg_T0(s, 15);
913 s->is_jmp = DISAS_UPDATE;
914}
915
b0109805
PB
916static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
917 TCGv var)
2c0262af 918{
1e8d4eec 919 int val, rm, shift, shiftop;
b26eefb6 920 TCGv offset;
2c0262af
FB
921
922 if (!(insn & (1 << 25))) {
923 /* immediate */
924 val = insn & 0xfff;
925 if (!(insn & (1 << 23)))
926 val = -val;
537730b9 927 if (val != 0)
b0109805 928 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
929 } else {
930 /* shift/register */
931 rm = (insn) & 0xf;
932 shift = (insn >> 7) & 0x1f;
1e8d4eec 933 shiftop = (insn >> 5) & 3;
b26eefb6 934 offset = load_reg(s, rm);
9a119ff6 935 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 936 if (!(insn & (1 << 23)))
b0109805 937 tcg_gen_sub_i32(var, var, offset);
2c0262af 938 else
b0109805 939 tcg_gen_add_i32(var, var, offset);
b26eefb6 940 dead_tmp(offset);
2c0262af
FB
941 }
942}
943
191f9a93 944static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 945 int extra, TCGv var)
2c0262af
FB
946{
947 int val, rm;
b26eefb6 948 TCGv offset;
3b46e624 949
2c0262af
FB
950 if (insn & (1 << 22)) {
951 /* immediate */
952 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
953 if (!(insn & (1 << 23)))
954 val = -val;
18acad92 955 val += extra;
537730b9 956 if (val != 0)
b0109805 957 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
958 } else {
959 /* register */
191f9a93 960 if (extra)
b0109805 961 tcg_gen_addi_i32(var, var, extra);
2c0262af 962 rm = (insn) & 0xf;
b26eefb6 963 offset = load_reg(s, rm);
2c0262af 964 if (!(insn & (1 << 23)))
b0109805 965 tcg_gen_sub_i32(var, var, offset);
2c0262af 966 else
b0109805 967 tcg_gen_add_i32(var, var, offset);
b26eefb6 968 dead_tmp(offset);
2c0262af
FB
969 }
970}
971
4373f3ce
PB
972#define VFP_OP2(name) \
973static inline void gen_vfp_##name(int dp) \
974{ \
975 if (dp) \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
977 else \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
979}
980
4373f3ce
PB
981VFP_OP2(add)
982VFP_OP2(sub)
983VFP_OP2(mul)
984VFP_OP2(div)
985
986#undef VFP_OP2
987
988static inline void gen_vfp_abs(int dp)
989{
990 if (dp)
991 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
992 else
993 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
994}
995
996static inline void gen_vfp_neg(int dp)
997{
998 if (dp)
999 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1000 else
1001 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1002}
1003
1004static inline void gen_vfp_sqrt(int dp)
1005{
1006 if (dp)
1007 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1008 else
1009 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1010}
1011
1012static inline void gen_vfp_cmp(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1016 else
1017 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1018}
1019
1020static inline void gen_vfp_cmpe(int dp)
1021{
1022 if (dp)
1023 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1024 else
1025 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1026}
1027
1028static inline void gen_vfp_F1_ld0(int dp)
1029{
1030 if (dp)
5b340b51 1031 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1032 else
5b340b51 1033 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1034}
1035
1036static inline void gen_vfp_uito(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1040 else
1041 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1042}
1043
1044static inline void gen_vfp_sito(int dp)
1045{
1046 if (dp)
66230e0d 1047 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1048 else
66230e0d 1049 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1050}
1051
1052static inline void gen_vfp_toui(int dp)
1053{
1054 if (dp)
1055 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1056 else
1057 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1058}
1059
1060static inline void gen_vfp_touiz(int dp)
1061{
1062 if (dp)
1063 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1064 else
1065 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1066}
1067
1068static inline void gen_vfp_tosi(int dp)
1069{
1070 if (dp)
1071 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1072 else
1073 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1074}
1075
1076static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1077{
1078 if (dp)
4373f3ce 1079 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1080 else
4373f3ce
PB
1081 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1082}
1083
1084#define VFP_GEN_FIX(name) \
1085static inline void gen_vfp_##name(int dp, int shift) \
1086{ \
1087 if (dp) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1089 else \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1091}
4373f3ce
PB
1092VFP_GEN_FIX(tosh)
1093VFP_GEN_FIX(tosl)
1094VFP_GEN_FIX(touh)
1095VFP_GEN_FIX(toul)
1096VFP_GEN_FIX(shto)
1097VFP_GEN_FIX(slto)
1098VFP_GEN_FIX(uhto)
1099VFP_GEN_FIX(ulto)
1100#undef VFP_GEN_FIX
9ee6e8bb 1101
b5ff1b31
FB
1102static inline void gen_vfp_ld(DisasContext *s, int dp)
1103{
1104 if (dp)
4373f3ce 1105 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1106 else
4373f3ce 1107 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1108}
1109
1110static inline void gen_vfp_st(DisasContext *s, int dp)
1111{
1112 if (dp)
4373f3ce 1113 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1114 else
4373f3ce 1115 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1116}
1117
8e96005d
FB
1118static inline long
1119vfp_reg_offset (int dp, int reg)
1120{
1121 if (dp)
1122 return offsetof(CPUARMState, vfp.regs[reg]);
1123 else if (reg & 1) {
1124 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1125 + offsetof(CPU_DoubleU, l.upper);
1126 } else {
1127 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1128 + offsetof(CPU_DoubleU, l.lower);
1129 }
1130}
9ee6e8bb
PB
1131
1132/* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1134static inline long
1135neon_reg_offset (int reg, int n)
1136{
1137 int sreg;
1138 sreg = reg * 2 + n;
1139 return vfp_reg_offset(0, sreg);
1140}
1141
ad69471c
PB
1142/* FIXME: Remove these. */
1143#define neon_T0 cpu_T[0]
1144#define neon_T1 cpu_T[1]
1145#define NEON_GET_REG(T, reg, n) \
1146 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1147#define NEON_SET_REG(T, reg, n) \
1148 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1149
8f8e3aa4
PB
1150static TCGv neon_load_reg(int reg, int pass)
1151{
1152 TCGv tmp = new_tmp();
1153 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1154 return tmp;
1155}
1156
1157static void neon_store_reg(int reg, int pass, TCGv var)
1158{
1159 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1160 dead_tmp(var);
1161}
1162
a7812ae4 1163static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1164{
1165 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1166}
1167
a7812ae4 1168static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1169{
1170 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1171}
1172
4373f3ce
PB
1173#define tcg_gen_ld_f32 tcg_gen_ld_i32
1174#define tcg_gen_ld_f64 tcg_gen_ld_i64
1175#define tcg_gen_st_f32 tcg_gen_st_i32
1176#define tcg_gen_st_f64 tcg_gen_st_i64
1177
b7bcbe95
FB
1178static inline void gen_mov_F0_vreg(int dp, int reg)
1179{
1180 if (dp)
4373f3ce 1181 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1182 else
4373f3ce 1183 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1184}
1185
1186static inline void gen_mov_F1_vreg(int dp, int reg)
1187{
1188 if (dp)
4373f3ce 1189 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1190 else
4373f3ce 1191 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1192}
1193
1194static inline void gen_mov_vreg_F0(int dp, int reg)
1195{
1196 if (dp)
4373f3ce 1197 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1198 else
4373f3ce 1199 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1200}
1201
18c9b560
AZ
1202#define ARM_CP_RW_BIT (1 << 20)
1203
a7812ae4 1204static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1205{
1206 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1207}
1208
a7812ae4 1209static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1210{
1211 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1212}
1213
1214static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1215{
1216 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1217}
1218
1219static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1220{
1221 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1222}
1223
1224static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1225{
1226 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1227}
1228
1229static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1230{
1231 iwmmxt_store_reg(cpu_M0, rn);
1232}
1233
1234static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1235{
1236 iwmmxt_load_reg(cpu_M0, rn);
1237}
1238
1239static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1240{
1241 iwmmxt_load_reg(cpu_V1, rn);
1242 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1243}
1244
1245static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1246{
1247 iwmmxt_load_reg(cpu_V1, rn);
1248 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1249}
1250
1251static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1252{
1253 iwmmxt_load_reg(cpu_V1, rn);
1254 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1255}
1256
1257#define IWMMXT_OP(name) \
1258static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1259{ \
1260 iwmmxt_load_reg(cpu_V1, rn); \
1261 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1262}
1263
1264#define IWMMXT_OP_ENV(name) \
1265static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1266{ \
1267 iwmmxt_load_reg(cpu_V1, rn); \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1269}
1270
1271#define IWMMXT_OP_ENV_SIZE(name) \
1272IWMMXT_OP_ENV(name##b) \
1273IWMMXT_OP_ENV(name##w) \
1274IWMMXT_OP_ENV(name##l)
1275
1276#define IWMMXT_OP_ENV1(name) \
1277static inline void gen_op_iwmmxt_##name##_M0(void) \
1278{ \
1279 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1280}
1281
1282IWMMXT_OP(maddsq)
1283IWMMXT_OP(madduq)
1284IWMMXT_OP(sadb)
1285IWMMXT_OP(sadw)
1286IWMMXT_OP(mulslw)
1287IWMMXT_OP(mulshw)
1288IWMMXT_OP(mululw)
1289IWMMXT_OP(muluhw)
1290IWMMXT_OP(macsw)
1291IWMMXT_OP(macuw)
1292
1293IWMMXT_OP_ENV_SIZE(unpackl)
1294IWMMXT_OP_ENV_SIZE(unpackh)
1295
1296IWMMXT_OP_ENV1(unpacklub)
1297IWMMXT_OP_ENV1(unpackluw)
1298IWMMXT_OP_ENV1(unpacklul)
1299IWMMXT_OP_ENV1(unpackhub)
1300IWMMXT_OP_ENV1(unpackhuw)
1301IWMMXT_OP_ENV1(unpackhul)
1302IWMMXT_OP_ENV1(unpacklsb)
1303IWMMXT_OP_ENV1(unpacklsw)
1304IWMMXT_OP_ENV1(unpacklsl)
1305IWMMXT_OP_ENV1(unpackhsb)
1306IWMMXT_OP_ENV1(unpackhsw)
1307IWMMXT_OP_ENV1(unpackhsl)
1308
1309IWMMXT_OP_ENV_SIZE(cmpeq)
1310IWMMXT_OP_ENV_SIZE(cmpgtu)
1311IWMMXT_OP_ENV_SIZE(cmpgts)
1312
1313IWMMXT_OP_ENV_SIZE(mins)
1314IWMMXT_OP_ENV_SIZE(minu)
1315IWMMXT_OP_ENV_SIZE(maxs)
1316IWMMXT_OP_ENV_SIZE(maxu)
1317
1318IWMMXT_OP_ENV_SIZE(subn)
1319IWMMXT_OP_ENV_SIZE(addn)
1320IWMMXT_OP_ENV_SIZE(subu)
1321IWMMXT_OP_ENV_SIZE(addu)
1322IWMMXT_OP_ENV_SIZE(subs)
1323IWMMXT_OP_ENV_SIZE(adds)
1324
1325IWMMXT_OP_ENV(avgb0)
1326IWMMXT_OP_ENV(avgb1)
1327IWMMXT_OP_ENV(avgw0)
1328IWMMXT_OP_ENV(avgw1)
1329
1330IWMMXT_OP(msadb)
1331
1332IWMMXT_OP_ENV(packuw)
1333IWMMXT_OP_ENV(packul)
1334IWMMXT_OP_ENV(packuq)
1335IWMMXT_OP_ENV(packsw)
1336IWMMXT_OP_ENV(packsl)
1337IWMMXT_OP_ENV(packsq)
1338
1339static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1340{
1341 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1342}
1343
1344static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1345{
1346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1347}
1348
1349static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1350{
1351 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1352}
1353
1354static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1355{
1356 iwmmxt_load_reg(cpu_V1, rn);
1357 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1358}
1359
1360static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1361{
1362 TCGv tmp = tcg_const_i32(shift);
1363 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1364}
1365
1366static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1367{
1368 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1369 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1370 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1371}
1372
1373static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1374{
1375 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1376 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1377 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1378}
1379
1380static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1381{
1382 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1383 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1384 if (mask != ~0u)
1385 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1386}
1387
1388static void gen_op_iwmmxt_set_mup(void)
1389{
1390 TCGv tmp;
1391 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1392 tcg_gen_ori_i32(tmp, tmp, 2);
1393 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1394}
1395
1396static void gen_op_iwmmxt_set_cup(void)
1397{
1398 TCGv tmp;
1399 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400 tcg_gen_ori_i32(tmp, tmp, 1);
1401 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1402}
1403
1404static void gen_op_iwmmxt_setpsr_nz(void)
1405{
1406 TCGv tmp = new_tmp();
1407 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1408 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1409}
1410
1411static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1412{
1413 iwmmxt_load_reg(cpu_V1, rn);
86831435 1414 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1415 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1416}
1417
1418
1419static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1420{
1421 iwmmxt_load_reg(cpu_V0, rn);
1422 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1423 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1424 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1425}
1426
1427static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1428{
36aa55dc 1429 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1430 iwmmxt_store_reg(cpu_V0, rn);
1431}
1432
18c9b560
AZ
1433static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1434{
1435 int rd;
1436 uint32_t offset;
1437
1438 rd = (insn >> 16) & 0xf;
1439 gen_movl_T1_reg(s, rd);
1440
1441 offset = (insn & 0xff) << ((insn >> 7) & 2);
1442 if (insn & (1 << 24)) {
1443 /* Pre indexed */
1444 if (insn & (1 << 23))
1445 gen_op_addl_T1_im(offset);
1446 else
1447 gen_op_addl_T1_im(-offset);
1448
1449 if (insn & (1 << 21))
1450 gen_movl_reg_T1(s, rd);
1451 } else if (insn & (1 << 21)) {
1452 /* Post indexed */
1453 if (insn & (1 << 23))
1454 gen_op_movl_T0_im(offset);
1455 else
1456 gen_op_movl_T0_im(- offset);
1457 gen_op_addl_T0_T1();
1458 gen_movl_reg_T0(s, rd);
1459 } else if (!(insn & (1 << 23)))
1460 return 1;
1461 return 0;
1462}
1463
1464static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1465{
1466 int rd = (insn >> 0) & 0xf;
1467
1468 if (insn & (1 << 8))
1469 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1470 return 1;
1471 else
1472 gen_op_iwmmxt_movl_T0_wCx(rd);
1473 else
e677137d 1474 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1475
1476 gen_op_movl_T1_im(mask);
1477 gen_op_andl_T0_T1();
1478 return 0;
1479}
1480
1481/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1482 (ie. an undefined instruction). */
1483static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1484{
1485 int rd, wrd;
1486 int rdhi, rdlo, rd0, rd1, i;
b0109805 1487 TCGv tmp;
18c9b560
AZ
1488
1489 if ((insn & 0x0e000e00) == 0x0c000000) {
1490 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1491 wrd = insn & 0xf;
1492 rdlo = (insn >> 12) & 0xf;
1493 rdhi = (insn >> 16) & 0xf;
1494 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1495 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1496 gen_movl_reg_T0(s, rdlo);
1497 gen_movl_reg_T1(s, rdhi);
1498 } else { /* TMCRR */
1499 gen_movl_T0_reg(s, rdlo);
1500 gen_movl_T1_reg(s, rdhi);
e677137d 1501 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1502 gen_op_iwmmxt_set_mup();
1503 }
1504 return 0;
1505 }
1506
1507 wrd = (insn >> 12) & 0xf;
1508 if (gen_iwmmxt_address(s, insn))
1509 return 1;
1510 if (insn & ARM_CP_RW_BIT) {
1511 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1512 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1513 tcg_gen_mov_i32(cpu_T[0], tmp);
1514 dead_tmp(tmp);
18c9b560
AZ
1515 gen_op_iwmmxt_movl_wCx_T0(wrd);
1516 } else {
e677137d
PB
1517 i = 1;
1518 if (insn & (1 << 8)) {
1519 if (insn & (1 << 22)) { /* WLDRD */
1520 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1521 i = 0;
1522 } else { /* WLDRW wRd */
1523 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1524 }
1525 } else {
1526 if (insn & (1 << 22)) { /* WLDRH */
1527 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1528 } else { /* WLDRB */
1529 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1530 }
1531 }
1532 if (i) {
1533 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1534 dead_tmp(tmp);
1535 }
18c9b560
AZ
1536 gen_op_iwmmxt_movq_wRn_M0(wrd);
1537 }
1538 } else {
1539 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1540 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1541 tmp = new_tmp();
1542 tcg_gen_mov_i32(tmp, cpu_T[0]);
1543 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1544 } else {
1545 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1546 tmp = new_tmp();
1547 if (insn & (1 << 8)) {
1548 if (insn & (1 << 22)) { /* WSTRD */
1549 dead_tmp(tmp);
1550 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1553 gen_st32(tmp, cpu_T[1], IS_USER(s));
1554 }
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1558 gen_st16(tmp, cpu_T[1], IS_USER(s));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1561 gen_st8(tmp, cpu_T[1], IS_USER(s));
1562 }
1563 }
18c9b560
AZ
1564 }
1565 }
1566 return 0;
1567 }
1568
1569 if ((insn & 0x0f000000) != 0x0e000000)
1570 return 1;
1571
1572 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 0) & 0xf;
1576 rd1 = (insn >> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1583 break;
1584 case 0x011: /* TMCR */
1585 if (insn & 0xf)
1586 return 1;
1587 rd = (insn >> 12) & 0xf;
1588 wrd = (insn >> 16) & 0xf;
1589 switch (wrd) {
1590 case ARM_IWMMXT_wCID:
1591 case ARM_IWMMXT_wCASF:
1592 break;
1593 case ARM_IWMMXT_wCon:
1594 gen_op_iwmmxt_set_cup();
1595 /* Fall through. */
1596 case ARM_IWMMXT_wCSSF:
1597 gen_op_iwmmxt_movl_T0_wCx(wrd);
1598 gen_movl_T1_reg(s, rd);
1599 gen_op_bicl_T0_T1();
1600 gen_op_iwmmxt_movl_wCx_T0(wrd);
1601 break;
1602 case ARM_IWMMXT_wCGR0:
1603 case ARM_IWMMXT_wCGR1:
1604 case ARM_IWMMXT_wCGR2:
1605 case ARM_IWMMXT_wCGR3:
1606 gen_op_iwmmxt_set_cup();
1607 gen_movl_reg_T0(s, rd);
1608 gen_op_iwmmxt_movl_wCx_T0(wrd);
1609 break;
1610 default:
1611 return 1;
1612 }
1613 break;
1614 case 0x100: /* WXOR */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 0) & 0xf;
1617 rd1 = (insn >> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1624 break;
1625 case 0x111: /* TMRC */
1626 if (insn & 0xf)
1627 return 1;
1628 rd = (insn >> 12) & 0xf;
1629 wrd = (insn >> 16) & 0xf;
1630 gen_op_iwmmxt_movl_T0_wCx(wrd);
1631 gen_movl_reg_T0(s, rd);
1632 break;
1633 case 0x300: /* WANDN */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1638 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x200: /* WAND */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 0) & 0xf;
1659 rd1 = (insn >> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 if (insn & (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1663 else
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 break;
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 switch ((insn >> 22) & 3) {
1696 case 0:
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1698 break;
1699 case 1:
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1701 break;
1702 case 2:
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1704 break;
1705 case 3:
1706 return 1;
1707 }
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1711 break;
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1719 else
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1721 if (!(insn & (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1731 if (insn & (1 << 21)) {
1732 if (insn & (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1736 } else {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1741 }
18c9b560
AZ
1742 gen_op_iwmmxt_movq_wRn_M0(wrd);
1743 gen_op_iwmmxt_set_mup();
1744 break;
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1752 else
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1754 if (!(insn & (1 << 20))) {
e677137d
PB
1755 iwmmxt_load_reg(cpu_V1, wrd);
1756 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1757 }
1758 gen_op_iwmmxt_movq_wRn_M0(wrd);
1759 gen_op_iwmmxt_set_mup();
1760 break;
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
1766 switch ((insn >> 22) & 3) {
1767 case 0:
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1769 break;
1770 case 1:
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1772 break;
1773 case 2:
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1775 break;
1776 case 3:
1777 return 1;
1778 }
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1782 break;
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd = (insn >> 12) & 0xf;
1785 rd0 = (insn >> 16) & 0xf;
1786 rd1 = (insn >> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1788 if (insn & (1 << 22)) {
1789 if (insn & (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1791 else
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1793 } else {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1798 }
18c9b560
AZ
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1802 break;
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 rd1 = (insn >> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1809 gen_op_movl_T1_im(7);
1810 gen_op_andl_T0_T1();
1811 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 break;
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 rd = (insn >> 12) & 0xf;
1817 wrd = (insn >> 16) & 0xf;
1818 gen_movl_T0_reg(s, rd);
1819 gen_op_iwmmxt_movq_M0_wRn(wrd);
1820 switch ((insn >> 6) & 3) {
1821 case 0:
1822 gen_op_movl_T1_im(0xff);
1823 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1824 break;
1825 case 1:
1826 gen_op_movl_T1_im(0xffff);
1827 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1828 break;
1829 case 2:
1830 gen_op_movl_T1_im(0xffffffff);
1831 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1832 break;
1833 case 3:
1834 return 1;
1835 }
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 break;
1839 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1840 rd = (insn >> 12) & 0xf;
1841 wrd = (insn >> 16) & 0xf;
1842 if (rd == 15)
1843 return 1;
1844 gen_op_iwmmxt_movq_M0_wRn(wrd);
1845 switch ((insn >> 22) & 3) {
1846 case 0:
1847 if (insn & 8)
1848 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1849 else {
e677137d 1850 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1851 }
1852 break;
1853 case 1:
1854 if (insn & 8)
1855 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1856 else {
e677137d 1857 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1858 }
1859 break;
1860 case 2:
e677137d 1861 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1862 break;
1863 case 3:
1864 return 1;
1865 }
b26eefb6 1866 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1867 break;
1868 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1869 if ((insn & 0x000ff008) != 0x0003f000)
1870 return 1;
1871 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1872 switch ((insn >> 22) & 3) {
1873 case 0:
1874 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1875 break;
1876 case 1:
1877 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1878 break;
1879 case 2:
1880 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1881 break;
1882 case 3:
1883 return 1;
1884 }
1885 gen_op_shll_T1_im(28);
d9ba4830 1886 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1887 break;
1888 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1889 rd = (insn >> 12) & 0xf;
1890 wrd = (insn >> 16) & 0xf;
1891 gen_movl_T0_reg(s, rd);
1892 switch ((insn >> 6) & 3) {
1893 case 0:
e677137d 1894 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1895 break;
1896 case 1:
e677137d 1897 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1898 break;
1899 case 2:
e677137d 1900 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 break;
1908 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1909 if ((insn & 0x000ff00f) != 0x0003f000)
1910 return 1;
1911 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 for (i = 0; i < 7; i ++) {
1915 gen_op_shll_T1_im(4);
1916 gen_op_andl_T0_T1();
1917 }
1918 break;
1919 case 1:
1920 for (i = 0; i < 3; i ++) {
1921 gen_op_shll_T1_im(8);
1922 gen_op_andl_T0_T1();
1923 }
1924 break;
1925 case 2:
1926 gen_op_shll_T1_im(16);
1927 gen_op_andl_T0_T1();
1928 break;
1929 case 3:
1930 return 1;
1931 }
d9ba4830 1932 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1933 break;
1934 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
e677137d 1940 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1941 break;
1942 case 1:
e677137d 1943 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1944 break;
1945 case 2:
e677137d 1946 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1947 break;
1948 case 3:
1949 return 1;
1950 }
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 break;
1954 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1955 if ((insn & 0x000ff00f) != 0x0003f000)
1956 return 1;
1957 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1958 switch ((insn >> 22) & 3) {
1959 case 0:
1960 for (i = 0; i < 7; i ++) {
1961 gen_op_shll_T1_im(4);
1962 gen_op_orl_T0_T1();
1963 }
1964 break;
1965 case 1:
1966 for (i = 0; i < 3; i ++) {
1967 gen_op_shll_T1_im(8);
1968 gen_op_orl_T0_T1();
1969 }
1970 break;
1971 case 2:
1972 gen_op_shll_T1_im(16);
1973 gen_op_orl_T0_T1();
1974 break;
1975 case 3:
1976 return 1;
1977 }
d9ba4830 1978 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1979 break;
1980 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1981 rd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 16) & 0xf;
1983 if ((insn & 0xf) != 0)
1984 return 1;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
1986 switch ((insn >> 22) & 3) {
1987 case 0:
e677137d 1988 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
1989 break;
1990 case 1:
e677137d 1991 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
1992 break;
1993 case 2:
e677137d 1994 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
1995 break;
1996 case 3:
1997 return 1;
1998 }
1999 gen_movl_reg_T0(s, rd);
2000 break;
2001 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2002 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 rd1 = (insn >> 0) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 switch ((insn >> 22) & 3) {
2008 case 0:
2009 if (insn & (1 << 21))
2010 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2011 else
2012 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2013 break;
2014 case 1:
2015 if (insn & (1 << 21))
2016 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2017 else
2018 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2019 break;
2020 case 2:
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2023 else
2024 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2025 break;
2026 case 3:
2027 return 1;
2028 }
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2032 break;
2033 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2034 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
2040 if (insn & (1 << 21))
2041 gen_op_iwmmxt_unpacklsb_M0();
2042 else
2043 gen_op_iwmmxt_unpacklub_M0();
2044 break;
2045 case 1:
2046 if (insn & (1 << 21))
2047 gen_op_iwmmxt_unpacklsw_M0();
2048 else
2049 gen_op_iwmmxt_unpackluw_M0();
2050 break;
2051 case 2:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpacklsl_M0();
2054 else
2055 gen_op_iwmmxt_unpacklul_M0();
2056 break;
2057 case 3:
2058 return 1;
2059 }
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2065 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2066 wrd = (insn >> 12) & 0xf;
2067 rd0 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 if (insn & (1 << 21))
2072 gen_op_iwmmxt_unpackhsb_M0();
2073 else
2074 gen_op_iwmmxt_unpackhub_M0();
2075 break;
2076 case 1:
2077 if (insn & (1 << 21))
2078 gen_op_iwmmxt_unpackhsw_M0();
2079 else
2080 gen_op_iwmmxt_unpackhuw_M0();
2081 break;
2082 case 2:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_unpackhsl_M0();
2085 else
2086 gen_op_iwmmxt_unpackhul_M0();
2087 break;
2088 case 3:
2089 return 1;
2090 }
2091 gen_op_iwmmxt_movq_wRn_M0(wrd);
2092 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup();
2094 break;
2095 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2096 case 0x214: case 0x614: case 0xa14: case 0xe14:
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0);
2100 if (gen_iwmmxt_shift(insn, 0xff))
2101 return 1;
2102 switch ((insn >> 22) & 3) {
2103 case 0:
2104 return 1;
2105 case 1:
e677137d 2106 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2107 break;
2108 case 2:
e677137d 2109 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2110 break;
2111 case 3:
e677137d 2112 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2113 break;
2114 }
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2120 case 0x014: case 0x414: case 0x814: case 0xc14:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (gen_iwmmxt_shift(insn, 0xff))
2125 return 1;
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 return 1;
2129 case 1:
e677137d 2130 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2131 break;
2132 case 2:
e677137d 2133 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2134 break;
2135 case 3:
e677137d 2136 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2137 break;
2138 }
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2144 case 0x114: case 0x514: case 0x914: case 0xd14:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 if (gen_iwmmxt_shift(insn, 0xff))
2149 return 1;
2150 switch ((insn >> 22) & 3) {
2151 case 0:
2152 return 1;
2153 case 1:
e677137d 2154 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2155 break;
2156 case 2:
e677137d 2157 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2158 break;
2159 case 3:
e677137d 2160 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2161 break;
2162 }
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2166 break;
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 wrd = (insn >> 12) & 0xf;
2170 rd0 = (insn >> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0);
2172 switch ((insn >> 22) & 3) {
2173 case 0:
2174 return 1;
2175 case 1:
2176 if (gen_iwmmxt_shift(insn, 0xf))
2177 return 1;
e677137d 2178 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2179 break;
2180 case 2:
2181 if (gen_iwmmxt_shift(insn, 0x1f))
2182 return 1;
e677137d 2183 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2184 break;
2185 case 3:
2186 if (gen_iwmmxt_shift(insn, 0x3f))
2187 return 1;
e677137d 2188 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2189 break;
2190 }
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2194 break;
2195 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2196 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 rd1 = (insn >> 0) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
2201 switch ((insn >> 22) & 3) {
2202 case 0:
2203 if (insn & (1 << 21))
2204 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2205 else
2206 gen_op_iwmmxt_minub_M0_wRn(rd1);
2207 break;
2208 case 1:
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2213 break;
2214 case 2:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2217 else
2218 gen_op_iwmmxt_minul_M0_wRn(rd1);
2219 break;
2220 case 3:
2221 return 1;
2222 }
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 break;
2226 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2227 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 rd1 = (insn >> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 switch ((insn >> 22) & 3) {
2233 case 0:
2234 if (insn & (1 << 21))
2235 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2236 else
2237 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2238 break;
2239 case 1:
2240 if (insn & (1 << 21))
2241 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2242 else
2243 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2244 break;
2245 case 2:
2246 if (insn & (1 << 21))
2247 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2248 else
2249 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2250 break;
2251 case 3:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 break;
2257 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2258 case 0x402: case 0x502: case 0x602: case 0x702:
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 rd1 = (insn >> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0);
2263 gen_op_movl_T0_im((insn >> 20) & 3);
2264 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd);
2266 gen_op_iwmmxt_set_mup();
2267 break;
2268 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2269 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2270 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2271 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 20) & 0xf) {
2277 case 0x0:
2278 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2279 break;
2280 case 0x1:
2281 gen_op_iwmmxt_subub_M0_wRn(rd1);
2282 break;
2283 case 0x3:
2284 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2285 break;
2286 case 0x4:
2287 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2288 break;
2289 case 0x5:
2290 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2291 break;
2292 case 0x7:
2293 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2294 break;
2295 case 0x8:
2296 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2297 break;
2298 case 0x9:
2299 gen_op_iwmmxt_subul_M0_wRn(rd1);
2300 break;
2301 case 0xb:
2302 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2303 break;
2304 default:
2305 return 1;
2306 }
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 gen_op_iwmmxt_set_cup();
2310 break;
2311 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2312 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2313 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2314 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2315 wrd = (insn >> 12) & 0xf;
2316 rd0 = (insn >> 16) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0);
2318 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2319 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2333 case 0x0:
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2335 break;
2336 case 0x1:
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2338 break;
2339 case 0x3:
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2341 break;
2342 case 0x4:
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2344 break;
2345 case 0x5:
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2347 break;
2348 case 0x7:
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2350 break;
2351 case 0x8:
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2353 break;
2354 case 0x9:
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2356 break;
2357 case 0xb:
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2359 break;
2360 default:
2361 return 1;
2362 }
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 if (!(insn & (1 << 20)))
2376 return 1;
2377 switch ((insn >> 22) & 3) {
2378 case 0:
2379 return 1;
2380 case 1:
2381 if (insn & (1 << 21))
2382 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2383 else
2384 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2385 break;
2386 case 2:
2387 if (insn & (1 << 21))
2388 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2389 else
2390 gen_op_iwmmxt_packul_M0_wRn(rd1);
2391 break;
2392 case 3:
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2395 else
2396 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2397 break;
2398 }
2399 gen_op_iwmmxt_movq_wRn_M0(wrd);
2400 gen_op_iwmmxt_set_mup();
2401 gen_op_iwmmxt_set_cup();
2402 break;
2403 case 0x201: case 0x203: case 0x205: case 0x207:
2404 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2405 case 0x211: case 0x213: case 0x215: case 0x217:
2406 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2407 wrd = (insn >> 5) & 0xf;
2408 rd0 = (insn >> 12) & 0xf;
2409 rd1 = (insn >> 0) & 0xf;
2410 if (rd0 == 0xf || rd1 == 0xf)
2411 return 1;
2412 gen_op_iwmmxt_movq_M0_wRn(wrd);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
b26eefb6
PB
2415 gen_movl_T0_reg(s, rd0);
2416 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2417 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2418 break;
2419 case 0x8: /* TMIAPH */
b26eefb6
PB
2420 gen_movl_T0_reg(s, rd0);
2421 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2422 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2423 break;
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2425 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2426 if (insn & (1 << 16))
2427 gen_op_shrl_T1_im(16);
2428 gen_op_movl_T0_T1();
b26eefb6 2429 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2430 if (insn & (1 << 17))
2431 gen_op_shrl_T1_im(16);
2432 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2433 break;
2434 default:
2435 return 1;
2436 }
2437 gen_op_iwmmxt_movq_wRn_M0(wrd);
2438 gen_op_iwmmxt_set_mup();
2439 break;
2440 default:
2441 return 1;
2442 }
2443
2444 return 0;
2445}
2446
2447/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2448 (ie. an undefined instruction). */
2449static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2450{
2451 int acc, rd0, rd1, rdhi, rdlo;
2452
2453 if ((insn & 0x0ff00f10) == 0x0e200010) {
2454 /* Multiply with Internal Accumulate Format */
2455 rd0 = (insn >> 12) & 0xf;
2456 rd1 = insn & 0xf;
2457 acc = (insn >> 5) & 7;
2458
2459 if (acc != 0)
2460 return 1;
2461
2462 switch ((insn >> 16) & 0xf) {
2463 case 0x0: /* MIA */
b26eefb6
PB
2464 gen_movl_T0_reg(s, rd0);
2465 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2466 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2467 break;
2468 case 0x8: /* MIAPH */
b26eefb6
PB
2469 gen_movl_T0_reg(s, rd0);
2470 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2471 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2472 break;
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
b26eefb6 2477 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2478 if (insn & (1 << 16))
2479 gen_op_shrl_T1_im(16);
2480 gen_op_movl_T0_T1();
b26eefb6 2481 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2482 if (insn & (1 << 17))
2483 gen_op_shrl_T1_im(16);
2484 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2485 break;
2486 default:
2487 return 1;
2488 }
2489
2490 gen_op_iwmmxt_movq_wRn_M0(acc);
2491 return 0;
2492 }
2493
2494 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi = (insn >> 16) & 0xf;
2497 rdlo = (insn >> 12) & 0xf;
2498 acc = insn & 7;
2499
2500 if (acc != 0)
2501 return 1;
2502
2503 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2504 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2505 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2506 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2507 gen_op_andl_T0_T1();
b26eefb6 2508 gen_movl_reg_T0(s, rdhi);
18c9b560 2509 } else { /* MAR */
b26eefb6
PB
2510 gen_movl_T0_reg(s, rdlo);
2511 gen_movl_T1_reg(s, rdhi);
e677137d 2512 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2513 }
2514 return 0;
2515 }
2516
2517 return 1;
2518}
2519
c1713132
AZ
2520/* Disassemble system coprocessor instruction. Return nonzero if
2521 instruction is not defined. */
2522static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2523{
8984bd2e 2524 TCGv tmp;
c1713132
AZ
2525 uint32_t rd = (insn >> 12) & 0xf;
2526 uint32_t cp = (insn >> 8) & 0xf;
2527 if (IS_USER(s)) {
2528 return 1;
2529 }
2530
18c9b560 2531 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2532 if (!env->cp[cp].cp_read)
2533 return 1;
8984bd2e
PB
2534 gen_set_pc_im(s->pc);
2535 tmp = new_tmp();
2536 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2537 store_reg(s, rd, tmp);
c1713132
AZ
2538 } else {
2539 if (!env->cp[cp].cp_write)
2540 return 1;
8984bd2e
PB
2541 gen_set_pc_im(s->pc);
2542 tmp = load_reg(s, rd);
2543 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2544 dead_tmp(tmp);
c1713132
AZ
2545 }
2546 return 0;
2547}
2548
9ee6e8bb
PB
2549static int cp15_user_ok(uint32_t insn)
2550{
2551 int cpn = (insn >> 16) & 0xf;
2552 int cpm = insn & 0xf;
2553 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2554
2555 if (cpn == 13 && cpm == 0) {
2556 /* TLS register. */
2557 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2558 return 1;
2559 }
2560 if (cpn == 7) {
2561 /* ISB, DSB, DMB. */
2562 if ((cpm == 5 && op == 4)
2563 || (cpm == 10 && (op == 4 || op == 5)))
2564 return 1;
2565 }
2566 return 0;
2567}
2568
b5ff1b31
FB
2569/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2570 instruction is not defined. */
a90b7318 2571static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2572{
2573 uint32_t rd;
8984bd2e 2574 TCGv tmp;
b5ff1b31 2575
9ee6e8bb
PB
2576 /* M profile cores use memory mapped registers instead of cp15. */
2577 if (arm_feature(env, ARM_FEATURE_M))
2578 return 1;
2579
2580 if ((insn & (1 << 25)) == 0) {
2581 if (insn & (1 << 20)) {
2582 /* mrrc */
2583 return 1;
2584 }
2585 /* mcrr. Used for block cache operations, so implement as no-op. */
2586 return 0;
2587 }
2588 if ((insn & (1 << 4)) == 0) {
2589 /* cdp */
2590 return 1;
2591 }
2592 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2593 return 1;
2594 }
9332f9da
FB
2595 if ((insn & 0x0fff0fff) == 0x0e070f90
2596 || (insn & 0x0fff0fff) == 0x0e070f58) {
2597 /* Wait for interrupt. */
8984bd2e 2598 gen_set_pc_im(s->pc);
9ee6e8bb 2599 s->is_jmp = DISAS_WFI;
9332f9da
FB
2600 return 0;
2601 }
b5ff1b31 2602 rd = (insn >> 12) & 0xf;
18c9b560 2603 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2604 tmp = new_tmp();
2605 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2606 /* If the destination register is r15 then sets condition codes. */
2607 if (rd != 15)
8984bd2e
PB
2608 store_reg(s, rd, tmp);
2609 else
2610 dead_tmp(tmp);
b5ff1b31 2611 } else {
8984bd2e
PB
2612 tmp = load_reg(s, rd);
2613 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2614 dead_tmp(tmp);
a90b7318
AZ
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2619 (insn & 0x0fff0fff) != 0x0e010f10)
2620 gen_lookup_tb(s);
b5ff1b31 2621 }
b5ff1b31
FB
2622 return 0;
2623}
2624
9ee6e8bb
PB
2625#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2626#define VFP_SREG(insn, bigbit, smallbit) \
2627 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2628#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2629 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2630 reg = (((insn) >> (bigbit)) & 0x0f) \
2631 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2632 } else { \
2633 if (insn & (1 << (smallbit))) \
2634 return 1; \
2635 reg = ((insn) >> (bigbit)) & 0x0f; \
2636 }} while (0)
2637
2638#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2639#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2640#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2641#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2642#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2643#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2644
4373f3ce
PB
2645/* Move between integer and VFP cores. */
2646static TCGv gen_vfp_mrs(void)
2647{
2648 TCGv tmp = new_tmp();
2649 tcg_gen_mov_i32(tmp, cpu_F0s);
2650 return tmp;
2651}
2652
2653static void gen_vfp_msr(TCGv tmp)
2654{
2655 tcg_gen_mov_i32(cpu_F0s, tmp);
2656 dead_tmp(tmp);
2657}
2658
9ee6e8bb
PB
2659static inline int
2660vfp_enabled(CPUState * env)
2661{
2662 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2663}
2664
ad69471c
PB
2665static void gen_neon_dup_u8(TCGv var, int shift)
2666{
2667 TCGv tmp = new_tmp();
2668 if (shift)
2669 tcg_gen_shri_i32(var, var, shift);
86831435 2670 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2671 tcg_gen_shli_i32(tmp, var, 8);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_gen_shli_i32(tmp, var, 16);
2674 tcg_gen_or_i32(var, var, tmp);
2675 dead_tmp(tmp);
2676}
2677
2678static void gen_neon_dup_low16(TCGv var)
2679{
2680 TCGv tmp = new_tmp();
86831435 2681 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2682 tcg_gen_shli_i32(tmp, var, 16);
2683 tcg_gen_or_i32(var, var, tmp);
2684 dead_tmp(tmp);
2685}
2686
2687static void gen_neon_dup_high16(TCGv var)
2688{
2689 TCGv tmp = new_tmp();
2690 tcg_gen_andi_i32(var, var, 0xffff0000);
2691 tcg_gen_shri_i32(tmp, var, 16);
2692 tcg_gen_or_i32(var, var, tmp);
2693 dead_tmp(tmp);
2694}
2695
b7bcbe95
FB
2696/* Disassemble a VFP instruction. Returns nonzero if an error occured
2697 (ie. an undefined instruction). */
2698static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2699{
2700 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2701 int dp, veclen;
4373f3ce 2702 TCGv tmp;
ad69471c 2703 TCGv tmp2;
b7bcbe95 2704
40f137e1
PB
2705 if (!arm_feature(env, ARM_FEATURE_VFP))
2706 return 1;
2707
9ee6e8bb
PB
2708 if (!vfp_enabled(env)) {
2709 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2710 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2711 return 1;
2712 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2713 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2714 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2715 return 1;
2716 }
b7bcbe95
FB
2717 dp = ((insn & 0xf00) == 0xb00);
2718 switch ((insn >> 24) & 0xf) {
2719 case 0xe:
2720 if (insn & (1 << 4)) {
2721 /* single register transfer */
b7bcbe95
FB
2722 rd = (insn >> 12) & 0xf;
2723 if (dp) {
9ee6e8bb
PB
2724 int size;
2725 int pass;
2726
2727 VFP_DREG_N(rn, insn);
2728 if (insn & 0xf)
b7bcbe95 2729 return 1;
9ee6e8bb
PB
2730 if (insn & 0x00c00060
2731 && !arm_feature(env, ARM_FEATURE_NEON))
2732 return 1;
2733
2734 pass = (insn >> 21) & 1;
2735 if (insn & (1 << 22)) {
2736 size = 0;
2737 offset = ((insn >> 5) & 3) * 8;
2738 } else if (insn & (1 << 5)) {
2739 size = 1;
2740 offset = (insn & (1 << 6)) ? 16 : 0;
2741 } else {
2742 size = 2;
2743 offset = 0;
2744 }
18c9b560 2745 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2746 /* vfp->arm */
ad69471c 2747 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2748 switch (size) {
2749 case 0:
9ee6e8bb 2750 if (offset)
ad69471c 2751 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2752 if (insn & (1 << 23))
ad69471c 2753 gen_uxtb(tmp);
9ee6e8bb 2754 else
ad69471c 2755 gen_sxtb(tmp);
9ee6e8bb
PB
2756 break;
2757 case 1:
9ee6e8bb
PB
2758 if (insn & (1 << 23)) {
2759 if (offset) {
ad69471c 2760 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2761 } else {
ad69471c 2762 gen_uxth(tmp);
9ee6e8bb
PB
2763 }
2764 } else {
2765 if (offset) {
ad69471c 2766 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2767 } else {
ad69471c 2768 gen_sxth(tmp);
9ee6e8bb
PB
2769 }
2770 }
2771 break;
2772 case 2:
9ee6e8bb
PB
2773 break;
2774 }
ad69471c 2775 store_reg(s, rd, tmp);
b7bcbe95
FB
2776 } else {
2777 /* arm->vfp */
ad69471c 2778 tmp = load_reg(s, rd);
9ee6e8bb
PB
2779 if (insn & (1 << 23)) {
2780 /* VDUP */
2781 if (size == 0) {
ad69471c 2782 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2783 } else if (size == 1) {
ad69471c 2784 gen_neon_dup_low16(tmp);
9ee6e8bb 2785 }
ad69471c
PB
2786 tmp2 = new_tmp();
2787 tcg_gen_mov_i32(tmp2, tmp);
2788 neon_store_reg(rn, 0, tmp2);
3018f259 2789 neon_store_reg(rn, 1, tmp);
9ee6e8bb
PB
2790 } else {
2791 /* VMOV */
2792 switch (size) {
2793 case 0:
ad69471c
PB
2794 tmp2 = neon_load_reg(rn, pass);
2795 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2796 dead_tmp(tmp2);
9ee6e8bb
PB
2797 break;
2798 case 1:
ad69471c
PB
2799 tmp2 = neon_load_reg(rn, pass);
2800 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2801 dead_tmp(tmp2);
9ee6e8bb
PB
2802 break;
2803 case 2:
9ee6e8bb
PB
2804 break;
2805 }
ad69471c 2806 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2807 }
b7bcbe95 2808 }
9ee6e8bb
PB
2809 } else { /* !dp */
2810 if ((insn & 0x6f) != 0x00)
2811 return 1;
2812 rn = VFP_SREG_N(insn);
18c9b560 2813 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2814 /* vfp->arm */
2815 if (insn & (1 << 21)) {
2816 /* system register */
40f137e1 2817 rn >>= 1;
9ee6e8bb 2818
b7bcbe95 2819 switch (rn) {
40f137e1 2820 case ARM_VFP_FPSID:
4373f3ce 2821 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2822 VFP3 restricts all id registers to privileged
2823 accesses. */
2824 if (IS_USER(s)
2825 && arm_feature(env, ARM_FEATURE_VFP3))
2826 return 1;
4373f3ce 2827 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2828 break;
40f137e1 2829 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2830 if (IS_USER(s))
2831 return 1;
4373f3ce 2832 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2833 break;
40f137e1
PB
2834 case ARM_VFP_FPINST:
2835 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2836 /* Not present in VFP3. */
2837 if (IS_USER(s)
2838 || arm_feature(env, ARM_FEATURE_VFP3))
2839 return 1;
4373f3ce 2840 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2841 break;
40f137e1 2842 case ARM_VFP_FPSCR:
601d70b9 2843 if (rd == 15) {
4373f3ce
PB
2844 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2845 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2846 } else {
2847 tmp = new_tmp();
2848 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2849 }
b7bcbe95 2850 break;
9ee6e8bb
PB
2851 case ARM_VFP_MVFR0:
2852 case ARM_VFP_MVFR1:
2853 if (IS_USER(s)
2854 || !arm_feature(env, ARM_FEATURE_VFP3))
2855 return 1;
4373f3ce 2856 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2857 break;
b7bcbe95
FB
2858 default:
2859 return 1;
2860 }
2861 } else {
2862 gen_mov_F0_vreg(0, rn);
4373f3ce 2863 tmp = gen_vfp_mrs();
b7bcbe95
FB
2864 }
2865 if (rd == 15) {
b5ff1b31 2866 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2867 gen_set_nzcv(tmp);
2868 dead_tmp(tmp);
2869 } else {
2870 store_reg(s, rd, tmp);
2871 }
b7bcbe95
FB
2872 } else {
2873 /* arm->vfp */
4373f3ce 2874 tmp = load_reg(s, rd);
b7bcbe95 2875 if (insn & (1 << 21)) {
40f137e1 2876 rn >>= 1;
b7bcbe95
FB
2877 /* system register */
2878 switch (rn) {
40f137e1 2879 case ARM_VFP_FPSID:
9ee6e8bb
PB
2880 case ARM_VFP_MVFR0:
2881 case ARM_VFP_MVFR1:
b7bcbe95
FB
2882 /* Writes are ignored. */
2883 break;
40f137e1 2884 case ARM_VFP_FPSCR:
4373f3ce
PB
2885 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2886 dead_tmp(tmp);
b5ff1b31 2887 gen_lookup_tb(s);
b7bcbe95 2888 break;
40f137e1 2889 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2890 if (IS_USER(s))
2891 return 1;
4373f3ce 2892 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2893 gen_lookup_tb(s);
2894 break;
2895 case ARM_VFP_FPINST:
2896 case ARM_VFP_FPINST2:
4373f3ce 2897 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2898 break;
b7bcbe95
FB
2899 default:
2900 return 1;
2901 }
2902 } else {
4373f3ce 2903 gen_vfp_msr(tmp);
b7bcbe95
FB
2904 gen_mov_vreg_F0(0, rn);
2905 }
2906 }
2907 }
2908 } else {
2909 /* data processing */
2910 /* The opcode is in bits 23, 21, 20 and 6. */
2911 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2912 if (dp) {
2913 if (op == 15) {
2914 /* rn is opcode */
2915 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2916 } else {
2917 /* rn is register number */
9ee6e8bb 2918 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2919 }
2920
2921 if (op == 15 && (rn == 15 || rn > 17)) {
2922 /* Integer or single precision destination. */
9ee6e8bb 2923 rd = VFP_SREG_D(insn);
b7bcbe95 2924 } else {
9ee6e8bb 2925 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2926 }
2927
2928 if (op == 15 && (rn == 16 || rn == 17)) {
2929 /* Integer source. */
2930 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2931 } else {
9ee6e8bb 2932 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2933 }
2934 } else {
9ee6e8bb 2935 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2936 if (op == 15 && rn == 15) {
2937 /* Double precision destination. */
9ee6e8bb
PB
2938 VFP_DREG_D(rd, insn);
2939 } else {
2940 rd = VFP_SREG_D(insn);
2941 }
2942 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2943 }
2944
2945 veclen = env->vfp.vec_len;
2946 if (op == 15 && rn > 3)
2947 veclen = 0;
2948
2949 /* Shut up compiler warnings. */
2950 delta_m = 0;
2951 delta_d = 0;
2952 bank_mask = 0;
3b46e624 2953
b7bcbe95
FB
2954 if (veclen > 0) {
2955 if (dp)
2956 bank_mask = 0xc;
2957 else
2958 bank_mask = 0x18;
2959
2960 /* Figure out what type of vector operation this is. */
2961 if ((rd & bank_mask) == 0) {
2962 /* scalar */
2963 veclen = 0;
2964 } else {
2965 if (dp)
2966 delta_d = (env->vfp.vec_stride >> 1) + 1;
2967 else
2968 delta_d = env->vfp.vec_stride + 1;
2969
2970 if ((rm & bank_mask) == 0) {
2971 /* mixed scalar/vector */
2972 delta_m = 0;
2973 } else {
2974 /* vector */
2975 delta_m = delta_d;
2976 }
2977 }
2978 }
2979
2980 /* Load the initial operands. */
2981 if (op == 15) {
2982 switch (rn) {
2983 case 16:
2984 case 17:
2985 /* Integer source */
2986 gen_mov_F0_vreg(0, rm);
2987 break;
2988 case 8:
2989 case 9:
2990 /* Compare */
2991 gen_mov_F0_vreg(dp, rd);
2992 gen_mov_F1_vreg(dp, rm);
2993 break;
2994 case 10:
2995 case 11:
2996 /* Compare with zero */
2997 gen_mov_F0_vreg(dp, rd);
2998 gen_vfp_F1_ld0(dp);
2999 break;
9ee6e8bb
PB
3000 case 20:
3001 case 21:
3002 case 22:
3003 case 23:
644ad806
PB
3004 case 28:
3005 case 29:
3006 case 30:
3007 case 31:
9ee6e8bb
PB
3008 /* Source and destination the same. */
3009 gen_mov_F0_vreg(dp, rd);
3010 break;
b7bcbe95
FB
3011 default:
3012 /* One source operand. */
3013 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3014 break;
b7bcbe95
FB
3015 }
3016 } else {
3017 /* Two source operands. */
3018 gen_mov_F0_vreg(dp, rn);
3019 gen_mov_F1_vreg(dp, rm);
3020 }
3021
3022 for (;;) {
3023 /* Perform the calculation. */
3024 switch (op) {
3025 case 0: /* mac: fd + (fn * fm) */
3026 gen_vfp_mul(dp);
3027 gen_mov_F1_vreg(dp, rd);
3028 gen_vfp_add(dp);
3029 break;
3030 case 1: /* nmac: fd - (fn * fm) */
3031 gen_vfp_mul(dp);
3032 gen_vfp_neg(dp);
3033 gen_mov_F1_vreg(dp, rd);
3034 gen_vfp_add(dp);
3035 break;
3036 case 2: /* msc: -fd + (fn * fm) */
3037 gen_vfp_mul(dp);
3038 gen_mov_F1_vreg(dp, rd);
3039 gen_vfp_sub(dp);
3040 break;
3041 case 3: /* nmsc: -fd - (fn * fm) */
3042 gen_vfp_mul(dp);
b7bcbe95 3043 gen_vfp_neg(dp);
c9fb531a
PB
3044 gen_mov_F1_vreg(dp, rd);
3045 gen_vfp_sub(dp);
b7bcbe95
FB
3046 break;
3047 case 4: /* mul: fn * fm */
3048 gen_vfp_mul(dp);
3049 break;
3050 case 5: /* nmul: -(fn * fm) */
3051 gen_vfp_mul(dp);
3052 gen_vfp_neg(dp);
3053 break;
3054 case 6: /* add: fn + fm */
3055 gen_vfp_add(dp);
3056 break;
3057 case 7: /* sub: fn - fm */
3058 gen_vfp_sub(dp);
3059 break;
3060 case 8: /* div: fn / fm */
3061 gen_vfp_div(dp);
3062 break;
9ee6e8bb
PB
3063 case 14: /* fconst */
3064 if (!arm_feature(env, ARM_FEATURE_VFP3))
3065 return 1;
3066
3067 n = (insn << 12) & 0x80000000;
3068 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3069 if (dp) {
3070 if (i & 0x40)
3071 i |= 0x3f80;
3072 else
3073 i |= 0x4000;
3074 n |= i << 16;
4373f3ce 3075 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3076 } else {
3077 if (i & 0x40)
3078 i |= 0x780;
3079 else
3080 i |= 0x800;
3081 n |= i << 19;
5b340b51 3082 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3083 }
9ee6e8bb 3084 break;
b7bcbe95
FB
3085 case 15: /* extension space */
3086 switch (rn) {
3087 case 0: /* cpy */
3088 /* no-op */
3089 break;
3090 case 1: /* abs */
3091 gen_vfp_abs(dp);
3092 break;
3093 case 2: /* neg */
3094 gen_vfp_neg(dp);
3095 break;
3096 case 3: /* sqrt */
3097 gen_vfp_sqrt(dp);
3098 break;
3099 case 8: /* cmp */
3100 gen_vfp_cmp(dp);
3101 break;
3102 case 9: /* cmpe */
3103 gen_vfp_cmpe(dp);
3104 break;
3105 case 10: /* cmpz */
3106 gen_vfp_cmp(dp);
3107 break;
3108 case 11: /* cmpez */
3109 gen_vfp_F1_ld0(dp);
3110 gen_vfp_cmpe(dp);
3111 break;
3112 case 15: /* single<->double conversion */
3113 if (dp)
4373f3ce 3114 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3115 else
4373f3ce 3116 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3117 break;
3118 case 16: /* fuito */
3119 gen_vfp_uito(dp);
3120 break;
3121 case 17: /* fsito */
3122 gen_vfp_sito(dp);
3123 break;
9ee6e8bb
PB
3124 case 20: /* fshto */
3125 if (!arm_feature(env, ARM_FEATURE_VFP3))
3126 return 1;
644ad806 3127 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3128 break;
3129 case 21: /* fslto */
3130 if (!arm_feature(env, ARM_FEATURE_VFP3))
3131 return 1;
644ad806 3132 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3133 break;
3134 case 22: /* fuhto */
3135 if (!arm_feature(env, ARM_FEATURE_VFP3))
3136 return 1;
644ad806 3137 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3138 break;
3139 case 23: /* fulto */
3140 if (!arm_feature(env, ARM_FEATURE_VFP3))
3141 return 1;
644ad806 3142 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3143 break;
b7bcbe95
FB
3144 case 24: /* ftoui */
3145 gen_vfp_toui(dp);
3146 break;
3147 case 25: /* ftouiz */
3148 gen_vfp_touiz(dp);
3149 break;
3150 case 26: /* ftosi */
3151 gen_vfp_tosi(dp);
3152 break;
3153 case 27: /* ftosiz */
3154 gen_vfp_tosiz(dp);
3155 break;
9ee6e8bb
PB
3156 case 28: /* ftosh */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3158 return 1;
644ad806 3159 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3160 break;
3161 case 29: /* ftosl */
3162 if (!arm_feature(env, ARM_FEATURE_VFP3))
3163 return 1;
644ad806 3164 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3165 break;
3166 case 30: /* ftouh */
3167 if (!arm_feature(env, ARM_FEATURE_VFP3))
3168 return 1;
644ad806 3169 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3170 break;
3171 case 31: /* ftoul */
3172 if (!arm_feature(env, ARM_FEATURE_VFP3))
3173 return 1;
644ad806 3174 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3175 break;
b7bcbe95
FB
3176 default: /* undefined */
3177 printf ("rn:%d\n", rn);
3178 return 1;
3179 }
3180 break;
3181 default: /* undefined */
3182 printf ("op:%d\n", op);
3183 return 1;
3184 }
3185
3186 /* Write back the result. */
3187 if (op == 15 && (rn >= 8 && rn <= 11))
3188 ; /* Comparison, do nothing. */
3189 else if (op == 15 && rn > 17)
3190 /* Integer result. */
3191 gen_mov_vreg_F0(0, rd);
3192 else if (op == 15 && rn == 15)
3193 /* conversion */
3194 gen_mov_vreg_F0(!dp, rd);
3195 else
3196 gen_mov_vreg_F0(dp, rd);
3197
3198 /* break out of the loop if we have finished */
3199 if (veclen == 0)
3200 break;
3201
3202 if (op == 15 && delta_m == 0) {
3203 /* single source one-many */
3204 while (veclen--) {
3205 rd = ((rd + delta_d) & (bank_mask - 1))
3206 | (rd & bank_mask);
3207 gen_mov_vreg_F0(dp, rd);
3208 }
3209 break;
3210 }
3211 /* Setup the next operands. */
3212 veclen--;
3213 rd = ((rd + delta_d) & (bank_mask - 1))
3214 | (rd & bank_mask);
3215
3216 if (op == 15) {
3217 /* One source operand. */
3218 rm = ((rm + delta_m) & (bank_mask - 1))
3219 | (rm & bank_mask);
3220 gen_mov_F0_vreg(dp, rm);
3221 } else {
3222 /* Two source operands. */
3223 rn = ((rn + delta_d) & (bank_mask - 1))
3224 | (rn & bank_mask);
3225 gen_mov_F0_vreg(dp, rn);
3226 if (delta_m) {
3227 rm = ((rm + delta_m) & (bank_mask - 1))
3228 | (rm & bank_mask);
3229 gen_mov_F1_vreg(dp, rm);
3230 }
3231 }
3232 }
3233 }
3234 break;
3235 case 0xc:
3236 case 0xd:
9ee6e8bb 3237 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3238 /* two-register transfer */
3239 rn = (insn >> 16) & 0xf;
3240 rd = (insn >> 12) & 0xf;
3241 if (dp) {
9ee6e8bb
PB
3242 VFP_DREG_M(rm, insn);
3243 } else {
3244 rm = VFP_SREG_M(insn);
3245 }
b7bcbe95 3246
18c9b560 3247 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3248 /* vfp->arm */
3249 if (dp) {
4373f3ce
PB
3250 gen_mov_F0_vreg(0, rm * 2);
3251 tmp = gen_vfp_mrs();
3252 store_reg(s, rd, tmp);
3253 gen_mov_F0_vreg(0, rm * 2 + 1);
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rn, tmp);
b7bcbe95
FB
3256 } else {
3257 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3258 tmp = gen_vfp_mrs();
3259 store_reg(s, rn, tmp);
b7bcbe95 3260 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3261 tmp = gen_vfp_mrs();
3262 store_reg(s, rd, tmp);
b7bcbe95
FB
3263 }
3264 } else {
3265 /* arm->vfp */
3266 if (dp) {
4373f3ce
PB
3267 tmp = load_reg(s, rd);
3268 gen_vfp_msr(tmp);
3269 gen_mov_vreg_F0(0, rm * 2);
3270 tmp = load_reg(s, rn);
3271 gen_vfp_msr(tmp);
3272 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3273 } else {
4373f3ce
PB
3274 tmp = load_reg(s, rn);
3275 gen_vfp_msr(tmp);
b7bcbe95 3276 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3277 tmp = load_reg(s, rd);
3278 gen_vfp_msr(tmp);
b7bcbe95
FB
3279 gen_mov_vreg_F0(0, rm + 1);
3280 }
3281 }
3282 } else {
3283 /* Load/store */
3284 rn = (insn >> 16) & 0xf;
3285 if (dp)
9ee6e8bb 3286 VFP_DREG_D(rd, insn);
b7bcbe95 3287 else
9ee6e8bb
PB
3288 rd = VFP_SREG_D(insn);
3289 if (s->thumb && rn == 15) {
3290 gen_op_movl_T1_im(s->pc & ~2);
3291 } else {
3292 gen_movl_T1_reg(s, rn);
3293 }
b7bcbe95
FB
3294 if ((insn & 0x01200000) == 0x01000000) {
3295 /* Single load/store */
3296 offset = (insn & 0xff) << 2;
3297 if ((insn & (1 << 23)) == 0)
3298 offset = -offset;
3299 gen_op_addl_T1_im(offset);
3300 if (insn & (1 << 20)) {
b5ff1b31 3301 gen_vfp_ld(s, dp);
b7bcbe95
FB
3302 gen_mov_vreg_F0(dp, rd);
3303 } else {
3304 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3305 gen_vfp_st(s, dp);
b7bcbe95
FB
3306 }
3307 } else {
3308 /* load/store multiple */
3309 if (dp)
3310 n = (insn >> 1) & 0x7f;
3311 else
3312 n = insn & 0xff;
3313
3314 if (insn & (1 << 24)) /* pre-decrement */
3315 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3316
3317 if (dp)
3318 offset = 8;
3319 else
3320 offset = 4;
3321 for (i = 0; i < n; i++) {
18c9b560 3322 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3323 /* load */
b5ff1b31 3324 gen_vfp_ld(s, dp);
b7bcbe95
FB
3325 gen_mov_vreg_F0(dp, rd + i);
3326 } else {
3327 /* store */
3328 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3329 gen_vfp_st(s, dp);
b7bcbe95
FB
3330 }
3331 gen_op_addl_T1_im(offset);
3332 }
3333 if (insn & (1 << 21)) {
3334 /* writeback */
3335 if (insn & (1 << 24))
3336 offset = -offset * n;
3337 else if (dp && (insn & 1))
3338 offset = 4;
3339 else
3340 offset = 0;
3341
3342 if (offset != 0)
3343 gen_op_addl_T1_im(offset);
3344 gen_movl_reg_T1(s, rn);
3345 }
3346 }
3347 }
3348 break;
3349 default:
3350 /* Should never happen. */
3351 return 1;
3352 }
3353 return 0;
3354}
3355
6e256c93 3356static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3357{
6e256c93
FB
3358 TranslationBlock *tb;
3359
3360 tb = s->tb;
3361 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3362 tcg_gen_goto_tb(n);
8984bd2e 3363 gen_set_pc_im(dest);
57fec1fe 3364 tcg_gen_exit_tb((long)tb + n);
6e256c93 3365 } else {
8984bd2e 3366 gen_set_pc_im(dest);
57fec1fe 3367 tcg_gen_exit_tb(0);
6e256c93 3368 }
c53be334
FB
3369}
3370
8aaca4c0
FB
3371static inline void gen_jmp (DisasContext *s, uint32_t dest)
3372{
551bd27f 3373 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3374 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3375 if (s->thumb)
d9ba4830
PB
3376 dest |= 1;
3377 gen_bx_im(s, dest);
8aaca4c0 3378 } else {
6e256c93 3379 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3380 s->is_jmp = DISAS_TB_JUMP;
3381 }
3382}
3383
d9ba4830 3384static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3385{
ee097184 3386 if (x)
d9ba4830 3387 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3388 else
d9ba4830 3389 gen_sxth(t0);
ee097184 3390 if (y)
d9ba4830 3391 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3392 else
d9ba4830
PB
3393 gen_sxth(t1);
3394 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3395}
3396
3397/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3398static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3399 uint32_t mask;
3400
3401 mask = 0;
3402 if (flags & (1 << 0))
3403 mask |= 0xff;
3404 if (flags & (1 << 1))
3405 mask |= 0xff00;
3406 if (flags & (1 << 2))
3407 mask |= 0xff0000;
3408 if (flags & (1 << 3))
3409 mask |= 0xff000000;
9ee6e8bb 3410
2ae23e75 3411 /* Mask out undefined bits. */
9ee6e8bb
PB
3412 mask &= ~CPSR_RESERVED;
3413 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3414 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3415 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3416 mask &= ~CPSR_IT;
9ee6e8bb 3417 /* Mask out execution state bits. */
2ae23e75 3418 if (!spsr)
e160c51c 3419 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3420 /* Mask out privileged bits. */
3421 if (IS_USER(s))
9ee6e8bb 3422 mask &= CPSR_USER;
b5ff1b31
FB
3423 return mask;
3424}
3425
3426/* Returns nonzero if access to the PSR is not permitted. */
3427static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3428{
d9ba4830 3429 TCGv tmp;
b5ff1b31
FB
3430 if (spsr) {
3431 /* ??? This is also undefined in system mode. */
3432 if (IS_USER(s))
3433 return 1;
d9ba4830
PB
3434
3435 tmp = load_cpu_field(spsr);
3436 tcg_gen_andi_i32(tmp, tmp, ~mask);
3437 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3438 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3439 store_cpu_field(tmp, spsr);
b5ff1b31 3440 } else {
d9ba4830 3441 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3442 }
3443 gen_lookup_tb(s);
3444 return 0;
3445}
3446
9ee6e8bb 3447/* Generate an old-style exception return. */
b5ff1b31
FB
3448static void gen_exception_return(DisasContext *s)
3449{
d9ba4830 3450 TCGv tmp;
e22f8f39 3451 gen_movl_reg_T0(s, 15);
d9ba4830
PB
3452 tmp = load_cpu_field(spsr);
3453 gen_set_cpsr(tmp, 0xffffffff);
3454 dead_tmp(tmp);
b5ff1b31
FB
3455 s->is_jmp = DISAS_UPDATE;
3456}
3457
b0109805
PB
3458/* Generate a v6 exception return. Marks both values as dead. */
3459static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3460{
b0109805
PB
3461 gen_set_cpsr(cpsr, 0xffffffff);
3462 dead_tmp(cpsr);
3463 store_reg(s, 15, pc);
9ee6e8bb
PB
3464 s->is_jmp = DISAS_UPDATE;
3465}
3b46e624 3466
9ee6e8bb
PB
3467static inline void
3468gen_set_condexec (DisasContext *s)
3469{
3470 if (s->condexec_mask) {
8f01245e
PB
3471 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3472 TCGv tmp = new_tmp();
3473 tcg_gen_movi_i32(tmp, val);
d9ba4830 3474 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3475 }
3476}
3b46e624 3477
9ee6e8bb
PB
3478static void gen_nop_hint(DisasContext *s, int val)
3479{
3480 switch (val) {
3481 case 3: /* wfi */
8984bd2e 3482 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3483 s->is_jmp = DISAS_WFI;
3484 break;
3485 case 2: /* wfe */
3486 case 4: /* sev */
3487 /* TODO: Implement SEV and WFE. May help SMP performance. */
3488 default: /* nop */
3489 break;
3490 }
3491}
99c475ab 3492
ad69471c
PB
3493/* These macros help make the code more readable when migrating from the
3494 old dyngen helpers. They should probably be removed when
3495 T0/T1 are removed. */
3496#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3497#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3498
ad69471c 3499#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3500
3501static inline int gen_neon_add(int size)
3502{
3503 switch (size) {
ad69471c
PB
3504 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3505 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3506 case 2: gen_op_addl_T0_T1(); break;
3507 default: return 1;
3508 }
3509 return 0;
3510}
3511
ad69471c
PB
3512static inline void gen_neon_rsb(int size)
3513{
3514 switch (size) {
3515 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3516 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3517 case 2: gen_op_rsbl_T0_T1(); break;
3518 default: return;
3519 }
3520}
3521
3522/* 32-bit pairwise ops end up the same as the elementwise versions. */
3523#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3524#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3525#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3526#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3527
3528/* FIXME: This is wrong. They set the wrong overflow bit. */
3529#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3530#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3531#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3532#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3533
3534#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3535 switch ((size << 1) | u) { \
3536 case 0: \
3537 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3538 break; \
3539 case 1: \
3540 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3541 break; \
3542 case 2: \
3543 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3544 break; \
3545 case 3: \
3546 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3547 break; \
3548 case 4: \
3549 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3550 break; \
3551 case 5: \
3552 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3553 break; \
3554 default: return 1; \
3555 }} while (0)
9ee6e8bb
PB
3556
3557#define GEN_NEON_INTEGER_OP(name) do { \
3558 switch ((size << 1) | u) { \
ad69471c
PB
3559 case 0: \
3560 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3561 break; \
3562 case 1: \
3563 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3564 break; \
3565 case 2: \
3566 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3567 break; \
3568 case 3: \
3569 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3570 break; \
3571 case 4: \
3572 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3573 break; \
3574 case 5: \
3575 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3576 break; \
9ee6e8bb
PB
3577 default: return 1; \
3578 }} while (0)
3579
3580static inline void
3581gen_neon_movl_scratch_T0(int scratch)
3582{
3583 uint32_t offset;
3584
3585 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3586 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3587}
3588
3589static inline void
3590gen_neon_movl_scratch_T1(int scratch)
3591{
3592 uint32_t offset;
3593
3594 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3595 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3596}
3597
3598static inline void
3599gen_neon_movl_T0_scratch(int scratch)
3600{
3601 uint32_t offset;
3602
3603 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3604 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3605}
3606
3607static inline void
3608gen_neon_movl_T1_scratch(int scratch)
3609{
3610 uint32_t offset;
3611
3612 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3613 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3614}
3615
3616static inline void gen_neon_get_scalar(int size, int reg)
3617{
3618 if (size == 1) {
3619 NEON_GET_REG(T0, reg >> 1, reg & 1);
3620 } else {
3621 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3622 if (reg & 1)
ad69471c 3623 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3624 else
ad69471c 3625 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3626 }
3627}
3628
3629static void gen_neon_unzip(int reg, int q, int tmp, int size)
3630{
3631 int n;
3632
3633 for (n = 0; n < q + 1; n += 2) {
3634 NEON_GET_REG(T0, reg, n);
3635 NEON_GET_REG(T0, reg, n + n);
3636 switch (size) {
ad69471c
PB
3637 case 0: gen_helper_neon_unzip_u8(); break;
3638 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3639 case 2: /* no-op */; break;
3640 default: abort();
3641 }
3642 gen_neon_movl_scratch_T0(tmp + n);
3643 gen_neon_movl_scratch_T1(tmp + n + 1);
3644 }
3645}
3646
3647static struct {
3648 int nregs;
3649 int interleave;
3650 int spacing;
3651} neon_ls_element_type[11] = {
3652 {4, 4, 1},
3653 {4, 4, 2},
3654 {4, 1, 1},
3655 {4, 2, 1},
3656 {3, 3, 1},
3657 {3, 3, 2},
3658 {3, 1, 1},
3659 {1, 1, 1},
3660 {2, 2, 1},
3661 {2, 2, 2},
3662 {2, 1, 1}
3663};
3664
3665/* Translate a NEON load/store element instruction. Return nonzero if the
3666 instruction is invalid. */
3667static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3668{
3669 int rd, rn, rm;
3670 int op;
3671 int nregs;
3672 int interleave;
3673 int stride;
3674 int size;
3675 int reg;
3676 int pass;
3677 int load;
3678 int shift;
9ee6e8bb 3679 int n;
b0109805 3680 TCGv tmp;
8f8e3aa4 3681 TCGv tmp2;
9ee6e8bb
PB
3682
3683 if (!vfp_enabled(env))
3684 return 1;
3685 VFP_DREG_D(rd, insn);
3686 rn = (insn >> 16) & 0xf;
3687 rm = insn & 0xf;
3688 load = (insn & (1 << 21)) != 0;
3689 if ((insn & (1 << 23)) == 0) {
3690 /* Load store all elements. */
3691 op = (insn >> 8) & 0xf;
3692 size = (insn >> 6) & 3;
3693 if (op > 10 || size == 3)
3694 return 1;
3695 nregs = neon_ls_element_type[op].nregs;
3696 interleave = neon_ls_element_type[op].interleave;
3697 gen_movl_T1_reg(s, rn);
3698 stride = (1 << size) * interleave;
3699 for (reg = 0; reg < nregs; reg++) {
3700 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3701 gen_movl_T1_reg(s, rn);
3702 gen_op_addl_T1_im((1 << size) * reg);
3703 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3704 gen_movl_T1_reg(s, rn);
3705 gen_op_addl_T1_im(1 << size);
3706 }
3707 for (pass = 0; pass < 2; pass++) {
3708 if (size == 2) {
3709 if (load) {
b0109805 3710 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3711 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3712 } else {
ad69471c 3713 tmp = neon_load_reg(rd, pass);
b0109805 3714 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3715 }
3716 gen_op_addl_T1_im(stride);
3717 } else if (size == 1) {
3718 if (load) {
b0109805 3719 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3720 gen_op_addl_T1_im(stride);
8f8e3aa4 3721 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3722 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3723 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3724 dead_tmp(tmp2);
3725 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3726 } else {
8f8e3aa4
PB
3727 tmp = neon_load_reg(rd, pass);
3728 tmp2 = new_tmp();
3729 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3730 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3731 gen_op_addl_T1_im(stride);
8f8e3aa4 3732 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3733 gen_op_addl_T1_im(stride);
3734 }
3735 } else /* size == 0 */ {
3736 if (load) {
a50f5b91 3737 TCGV_UNUSED(tmp2);
9ee6e8bb 3738 for (n = 0; n < 4; n++) {
b0109805 3739 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3740 gen_op_addl_T1_im(stride);
3741 if (n == 0) {
8f8e3aa4 3742 tmp2 = tmp;
9ee6e8bb 3743 } else {
8f8e3aa4
PB
3744 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3745 dead_tmp(tmp);
9ee6e8bb 3746 }
9ee6e8bb 3747 }
8f8e3aa4 3748 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3749 } else {
8f8e3aa4 3750 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3751 for (n = 0; n < 4; n++) {
8f8e3aa4 3752 tmp = new_tmp();
9ee6e8bb 3753 if (n == 0) {
8f8e3aa4 3754 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3755 } else {
8f8e3aa4 3756 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3757 }
b0109805 3758 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3759 gen_op_addl_T1_im(stride);
9ee6e8bb 3760 }
8f8e3aa4 3761 dead_tmp(tmp2);
9ee6e8bb
PB
3762 }
3763 }
3764 }
3765 rd += neon_ls_element_type[op].spacing;
3766 }
3767 stride = nregs * 8;
3768 } else {
3769 size = (insn >> 10) & 3;
3770 if (size == 3) {
3771 /* Load single element to all lanes. */
3772 if (!load)
3773 return 1;
3774 size = (insn >> 6) & 3;
3775 nregs = ((insn >> 8) & 3) + 1;
3776 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3777 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3778 for (reg = 0; reg < nregs; reg++) {
3779 switch (size) {
3780 case 0:
b0109805 3781 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3782 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3783 break;
3784 case 1:
b0109805 3785 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3786 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3787 break;
3788 case 2:
b0109805 3789 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3790 break;
3791 case 3:
3792 return 1;
a50f5b91
PB
3793 default: /* Avoid compiler warnings. */
3794 abort();
99c475ab 3795 }
9ee6e8bb 3796 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3797 tmp2 = new_tmp();
3798 tcg_gen_mov_i32(tmp2, tmp);
3799 neon_store_reg(rd, 0, tmp2);
3018f259 3800 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3801 rd += stride;
3802 }
3803 stride = (1 << size) * nregs;
3804 } else {
3805 /* Single element. */
3806 pass = (insn >> 7) & 1;
3807 switch (size) {
3808 case 0:
3809 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3810 stride = 1;
3811 break;
3812 case 1:
3813 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3814 stride = (insn & (1 << 5)) ? 2 : 1;
3815 break;
3816 case 2:
3817 shift = 0;
9ee6e8bb
PB
3818 stride = (insn & (1 << 6)) ? 2 : 1;
3819 break;
3820 default:
3821 abort();
3822 }
3823 nregs = ((insn >> 8) & 3) + 1;
3824 gen_movl_T1_reg(s, rn);
3825 for (reg = 0; reg < nregs; reg++) {
3826 if (load) {
9ee6e8bb
PB
3827 switch (size) {
3828 case 0:
b0109805 3829 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3830 break;
3831 case 1:
b0109805 3832 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3833 break;
3834 case 2:
b0109805 3835 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3836 break;
a50f5b91
PB
3837 default: /* Avoid compiler warnings. */
3838 abort();
9ee6e8bb
PB
3839 }
3840 if (size != 2) {
8f8e3aa4
PB
3841 tmp2 = neon_load_reg(rd, pass);
3842 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3843 dead_tmp(tmp2);
9ee6e8bb 3844 }
8f8e3aa4 3845 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3846 } else { /* Store */
8f8e3aa4
PB
3847 tmp = neon_load_reg(rd, pass);
3848 if (shift)
3849 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3850 switch (size) {
3851 case 0:
b0109805 3852 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3853 break;
3854 case 1:
b0109805 3855 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3856 break;
3857 case 2:
b0109805 3858 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3859 break;
99c475ab 3860 }
99c475ab 3861 }
9ee6e8bb
PB
3862 rd += stride;
3863 gen_op_addl_T1_im(1 << size);
99c475ab 3864 }
9ee6e8bb 3865 stride = nregs * (1 << size);
99c475ab 3866 }
9ee6e8bb
PB
3867 }
3868 if (rm != 15) {
b26eefb6
PB
3869 TCGv base;
3870
3871 base = load_reg(s, rn);
9ee6e8bb 3872 if (rm == 13) {
b26eefb6 3873 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3874 } else {
b26eefb6
PB
3875 TCGv index;
3876 index = load_reg(s, rm);
3877 tcg_gen_add_i32(base, base, index);
3878 dead_tmp(index);
9ee6e8bb 3879 }
b26eefb6 3880 store_reg(s, rn, base);
9ee6e8bb
PB
3881 }
3882 return 0;
3883}
3b46e624 3884
8f8e3aa4
PB
3885/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3886static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3887{
3888 tcg_gen_and_i32(t, t, c);
3889 tcg_gen_bic_i32(f, f, c);
3890 tcg_gen_or_i32(dest, t, f);
3891}
3892
a7812ae4 3893static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3894{
3895 switch (size) {
3896 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3897 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3898 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3899 default: abort();
3900 }
3901}
3902
a7812ae4 3903static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3904{
3905 switch (size) {
3906 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3907 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3908 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3909 default: abort();
3910 }
3911}
3912
a7812ae4 3913static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3914{
3915 switch (size) {
3916 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3917 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3918 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3919 default: abort();
3920 }
3921}
3922
3923static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3924 int q, int u)
3925{
3926 if (q) {
3927 if (u) {
3928 switch (size) {
3929 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3930 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3931 default: abort();
3932 }
3933 } else {
3934 switch (size) {
3935 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3936 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3937 default: abort();
3938 }
3939 }
3940 } else {
3941 if (u) {
3942 switch (size) {
3943 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3944 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3945 default: abort();
3946 }
3947 } else {
3948 switch (size) {
3949 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3950 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3951 default: abort();
3952 }
3953 }
3954 }
3955}
3956
a7812ae4 3957static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3958{
3959 if (u) {
3960 switch (size) {
3961 case 0: gen_helper_neon_widen_u8(dest, src); break;
3962 case 1: gen_helper_neon_widen_u16(dest, src); break;
3963 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3964 default: abort();
3965 }
3966 } else {
3967 switch (size) {
3968 case 0: gen_helper_neon_widen_s8(dest, src); break;
3969 case 1: gen_helper_neon_widen_s16(dest, src); break;
3970 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3971 default: abort();
3972 }
3973 }
3974 dead_tmp(src);
3975}
3976
3977static inline void gen_neon_addl(int size)
3978{
3979 switch (size) {
3980 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3981 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3982 case 2: tcg_gen_add_i64(CPU_V001); break;
3983 default: abort();
3984 }
3985}
3986
3987static inline void gen_neon_subl(int size)
3988{
3989 switch (size) {
3990 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3991 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3992 case 2: tcg_gen_sub_i64(CPU_V001); break;
3993 default: abort();
3994 }
3995}
3996
a7812ae4 3997static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3998{
3999 switch (size) {
4000 case 0: gen_helper_neon_negl_u16(var, var); break;
4001 case 1: gen_helper_neon_negl_u32(var, var); break;
4002 case 2: gen_helper_neon_negl_u64(var, var); break;
4003 default: abort();
4004 }
4005}
4006
a7812ae4 4007static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4008{
4009 switch (size) {
4010 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4011 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4012 default: abort();
4013 }
4014}
4015
a7812ae4 4016static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4017{
a7812ae4 4018 TCGv_i64 tmp;
ad69471c
PB
4019
4020 switch ((size << 1) | u) {
4021 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4022 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4023 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4024 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4025 case 4:
4026 tmp = gen_muls_i64_i32(a, b);
4027 tcg_gen_mov_i64(dest, tmp);
4028 break;
4029 case 5:
4030 tmp = gen_mulu_i64_i32(a, b);
4031 tcg_gen_mov_i64(dest, tmp);
4032 break;
4033 default: abort();
4034 }
4035 if (size < 2) {
4036 dead_tmp(b);
4037 dead_tmp(a);
4038 }
4039}
4040
9ee6e8bb
PB
4041/* Translate a NEON data processing instruction. Return nonzero if the
4042 instruction is invalid.
ad69471c
PB
4043 We process data in a mixture of 32-bit and 64-bit chunks.
4044 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4045
9ee6e8bb
PB
4046static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4047{
4048 int op;
4049 int q;
4050 int rd, rn, rm;
4051 int size;
4052 int shift;
4053 int pass;
4054 int count;
4055 int pairwise;
4056 int u;
4057 int n;
4058 uint32_t imm;
8f8e3aa4
PB
4059 TCGv tmp;
4060 TCGv tmp2;
4061 TCGv tmp3;
a7812ae4 4062 TCGv_i64 tmp64;
9ee6e8bb
PB
4063
4064 if (!vfp_enabled(env))
4065 return 1;
4066 q = (insn & (1 << 6)) != 0;
4067 u = (insn >> 24) & 1;
4068 VFP_DREG_D(rd, insn);
4069 VFP_DREG_N(rn, insn);
4070 VFP_DREG_M(rm, insn);
4071 size = (insn >> 20) & 3;
4072 if ((insn & (1 << 23)) == 0) {
4073 /* Three register same length. */
4074 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4075 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4076 || op == 10 || op == 11 || op == 16)) {
4077 /* 64-bit element instructions. */
9ee6e8bb 4078 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4079 neon_load_reg64(cpu_V0, rn + pass);
4080 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4081 switch (op) {
4082 case 1: /* VQADD */
4083 if (u) {
ad69471c 4084 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4085 } else {
ad69471c 4086 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4087 }
9ee6e8bb
PB
4088 break;
4089 case 5: /* VQSUB */
4090 if (u) {
ad69471c
PB
4091 gen_helper_neon_sub_saturate_u64(CPU_V001);
4092 } else {
4093 gen_helper_neon_sub_saturate_s64(CPU_V001);
4094 }
4095 break;
4096 case 8: /* VSHL */
4097 if (u) {
4098 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4099 } else {
4100 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4101 }
4102 break;
4103 case 9: /* VQSHL */
4104 if (u) {
4105 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4106 cpu_V0, cpu_V0);
4107 } else {
4108 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4109 cpu_V1, cpu_V0);
4110 }
4111 break;
4112 case 10: /* VRSHL */
4113 if (u) {
4114 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4115 } else {
ad69471c
PB
4116 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4117 }
4118 break;
4119 case 11: /* VQRSHL */
4120 if (u) {
4121 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4122 cpu_V1, cpu_V0);
4123 } else {
4124 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4125 cpu_V1, cpu_V0);
1e8d4eec 4126 }
9ee6e8bb
PB
4127 break;
4128 case 16:
4129 if (u) {
ad69471c 4130 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4131 } else {
ad69471c 4132 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4133 }
4134 break;
4135 default:
4136 abort();
2c0262af 4137 }
ad69471c 4138 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4139 }
9ee6e8bb 4140 return 0;
2c0262af 4141 }
9ee6e8bb
PB
4142 switch (op) {
4143 case 8: /* VSHL */
4144 case 9: /* VQSHL */
4145 case 10: /* VRSHL */
ad69471c 4146 case 11: /* VQRSHL */
9ee6e8bb 4147 {
ad69471c
PB
4148 int rtmp;
4149 /* Shift instruction operands are reversed. */
4150 rtmp = rn;
9ee6e8bb 4151 rn = rm;
ad69471c 4152 rm = rtmp;
9ee6e8bb
PB
4153 pairwise = 0;
4154 }
2c0262af 4155 break;
9ee6e8bb
PB
4156 case 20: /* VPMAX */
4157 case 21: /* VPMIN */
4158 case 23: /* VPADD */
4159 pairwise = 1;
2c0262af 4160 break;
9ee6e8bb
PB
4161 case 26: /* VPADD (float) */
4162 pairwise = (u && size < 2);
2c0262af 4163 break;
9ee6e8bb
PB
4164 case 30: /* VPMIN/VPMAX (float) */
4165 pairwise = u;
2c0262af 4166 break;
9ee6e8bb
PB
4167 default:
4168 pairwise = 0;
2c0262af 4169 break;
9ee6e8bb
PB
4170 }
4171 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4172
4173 if (pairwise) {
4174 /* Pairwise. */
4175 if (q)
4176 n = (pass & 1) * 2;
2c0262af 4177 else
9ee6e8bb
PB
4178 n = 0;
4179 if (pass < q + 1) {
4180 NEON_GET_REG(T0, rn, n);
4181 NEON_GET_REG(T1, rn, n + 1);
4182 } else {
4183 NEON_GET_REG(T0, rm, n);
4184 NEON_GET_REG(T1, rm, n + 1);
4185 }
4186 } else {
4187 /* Elementwise. */
4188 NEON_GET_REG(T0, rn, pass);
4189 NEON_GET_REG(T1, rm, pass);
4190 }
4191 switch (op) {
4192 case 0: /* VHADD */
4193 GEN_NEON_INTEGER_OP(hadd);
4194 break;
4195 case 1: /* VQADD */
ad69471c 4196 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4197 break;
9ee6e8bb
PB
4198 case 2: /* VRHADD */
4199 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4200 break;
9ee6e8bb
PB
4201 case 3: /* Logic ops. */
4202 switch ((u << 2) | size) {
4203 case 0: /* VAND */
2c0262af 4204 gen_op_andl_T0_T1();
9ee6e8bb
PB
4205 break;
4206 case 1: /* BIC */
4207 gen_op_bicl_T0_T1();
4208 break;
4209 case 2: /* VORR */
4210 gen_op_orl_T0_T1();
4211 break;
4212 case 3: /* VORN */
4213 gen_op_notl_T1();
4214 gen_op_orl_T0_T1();
4215 break;
4216 case 4: /* VEOR */
4217 gen_op_xorl_T0_T1();
4218 break;
4219 case 5: /* VBSL */
8f8e3aa4
PB
4220 tmp = neon_load_reg(rd, pass);
4221 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4222 dead_tmp(tmp);
9ee6e8bb
PB
4223 break;
4224 case 6: /* VBIT */
8f8e3aa4
PB
4225 tmp = neon_load_reg(rd, pass);
4226 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4227 dead_tmp(tmp);
9ee6e8bb
PB
4228 break;
4229 case 7: /* VBIF */
8f8e3aa4
PB
4230 tmp = neon_load_reg(rd, pass);
4231 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4232 dead_tmp(tmp);
9ee6e8bb 4233 break;
2c0262af
FB
4234 }
4235 break;
9ee6e8bb
PB
4236 case 4: /* VHSUB */
4237 GEN_NEON_INTEGER_OP(hsub);
4238 break;
4239 case 5: /* VQSUB */
ad69471c 4240 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4241 break;
9ee6e8bb
PB
4242 case 6: /* VCGT */
4243 GEN_NEON_INTEGER_OP(cgt);
4244 break;
4245 case 7: /* VCGE */
4246 GEN_NEON_INTEGER_OP(cge);
4247 break;
4248 case 8: /* VSHL */
ad69471c 4249 GEN_NEON_INTEGER_OP(shl);
2c0262af 4250 break;
9ee6e8bb 4251 case 9: /* VQSHL */
ad69471c 4252 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4253 break;
9ee6e8bb 4254 case 10: /* VRSHL */
ad69471c 4255 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4256 break;
9ee6e8bb 4257 case 11: /* VQRSHL */
ad69471c 4258 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4259 break;
4260 case 12: /* VMAX */
4261 GEN_NEON_INTEGER_OP(max);
4262 break;
4263 case 13: /* VMIN */
4264 GEN_NEON_INTEGER_OP(min);
4265 break;
4266 case 14: /* VABD */
4267 GEN_NEON_INTEGER_OP(abd);
4268 break;
4269 case 15: /* VABA */
4270 GEN_NEON_INTEGER_OP(abd);
4271 NEON_GET_REG(T1, rd, pass);
4272 gen_neon_add(size);
4273 break;
4274 case 16:
4275 if (!u) { /* VADD */
4276 if (gen_neon_add(size))
4277 return 1;
4278 } else { /* VSUB */
4279 switch (size) {
ad69471c
PB
4280 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4281 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4282 case 2: gen_op_subl_T0_T1(); break;
4283 default: return 1;
4284 }
4285 }
4286 break;
4287 case 17:
4288 if (!u) { /* VTST */
4289 switch (size) {
ad69471c
PB
4290 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4291 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4292 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4293 default: return 1;
4294 }
4295 } else { /* VCEQ */
4296 switch (size) {
ad69471c
PB
4297 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4298 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4299 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4300 default: return 1;
4301 }
4302 }
4303 break;
4304 case 18: /* Multiply. */
4305 switch (size) {
ad69471c
PB
4306 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4307 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4308 case 2: gen_op_mul_T0_T1(); break;
4309 default: return 1;
4310 }
4311 NEON_GET_REG(T1, rd, pass);
4312 if (u) { /* VMLS */
ad69471c 4313 gen_neon_rsb(size);
9ee6e8bb
PB
4314 } else { /* VMLA */
4315 gen_neon_add(size);
4316 }
4317 break;
4318 case 19: /* VMUL */
4319 if (u) { /* polynomial */
ad69471c 4320 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4321 } else { /* Integer */
4322 switch (size) {
ad69471c
PB
4323 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4324 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4325 case 2: gen_op_mul_T0_T1(); break;
4326 default: return 1;
4327 }
4328 }
4329 break;
4330 case 20: /* VPMAX */
4331 GEN_NEON_INTEGER_OP(pmax);
4332 break;
4333 case 21: /* VPMIN */
4334 GEN_NEON_INTEGER_OP(pmin);
4335 break;
4336 case 22: /* Hultiply high. */
4337 if (!u) { /* VQDMULH */
4338 switch (size) {
ad69471c
PB
4339 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4340 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4341 default: return 1;
4342 }
4343 } else { /* VQRDHMUL */
4344 switch (size) {
ad69471c
PB
4345 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4346 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4347 default: return 1;
4348 }
4349 }
4350 break;
4351 case 23: /* VPADD */
4352 if (u)
4353 return 1;
4354 switch (size) {
ad69471c
PB
4355 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4356 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4357 case 2: gen_op_addl_T0_T1(); break;
4358 default: return 1;
4359 }
4360 break;
4361 case 26: /* Floating point arithnetic. */
4362 switch ((u << 2) | size) {
4363 case 0: /* VADD */
ad69471c 4364 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4365 break;
4366 case 2: /* VSUB */
ad69471c 4367 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4368 break;
4369 case 4: /* VPADD */
ad69471c 4370 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4371 break;
4372 case 6: /* VABD */
ad69471c 4373 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4374 break;
4375 default:
4376 return 1;
4377 }
4378 break;
4379 case 27: /* Float multiply. */
ad69471c 4380 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4381 if (!u) {
4382 NEON_GET_REG(T1, rd, pass);
4383 if (size == 0) {
ad69471c 4384 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4385 } else {
ad69471c 4386 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4387 }
4388 }
4389 break;
4390 case 28: /* Float compare. */
4391 if (!u) {
ad69471c 4392 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4393 } else {
9ee6e8bb 4394 if (size == 0)
ad69471c 4395 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4396 else
ad69471c 4397 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4398 }
2c0262af 4399 break;
9ee6e8bb
PB
4400 case 29: /* Float compare absolute. */
4401 if (!u)
4402 return 1;
4403 if (size == 0)
ad69471c 4404 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4405 else
ad69471c 4406 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4407 break;
9ee6e8bb
PB
4408 case 30: /* Float min/max. */
4409 if (size == 0)
ad69471c 4410 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4411 else
ad69471c 4412 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4413 break;
4414 case 31:
4415 if (size == 0)
4373f3ce 4416 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4417 else
4373f3ce 4418 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4419 break;
9ee6e8bb
PB
4420 default:
4421 abort();
2c0262af 4422 }
9ee6e8bb
PB
4423 /* Save the result. For elementwise operations we can put it
4424 straight into the destination register. For pairwise operations
4425 we have to be careful to avoid clobbering the source operands. */
4426 if (pairwise && rd == rm) {
4427 gen_neon_movl_scratch_T0(pass);
4428 } else {
4429 NEON_SET_REG(T0, rd, pass);
4430 }
4431
4432 } /* for pass */
4433 if (pairwise && rd == rm) {
4434 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4435 gen_neon_movl_T0_scratch(pass);
4436 NEON_SET_REG(T0, rd, pass);
4437 }
4438 }
ad69471c 4439 /* End of 3 register same size operations. */
9ee6e8bb
PB
4440 } else if (insn & (1 << 4)) {
4441 if ((insn & 0x00380080) != 0) {
4442 /* Two registers and shift. */
4443 op = (insn >> 8) & 0xf;
4444 if (insn & (1 << 7)) {
4445 /* 64-bit shift. */
4446 size = 3;
4447 } else {
4448 size = 2;
4449 while ((insn & (1 << (size + 19))) == 0)
4450 size--;
4451 }
4452 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4453 /* To avoid excessive dumplication of ops we implement shift
4454 by immediate using the variable shift operations. */
4455 if (op < 8) {
4456 /* Shift by immediate:
4457 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4458 /* Right shifts are encoded as N - shift, where N is the
4459 element size in bits. */
4460 if (op <= 4)
4461 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4462 if (size == 3) {
4463 count = q + 1;
4464 } else {
4465 count = q ? 4: 2;
4466 }
4467 switch (size) {
4468 case 0:
4469 imm = (uint8_t) shift;
4470 imm |= imm << 8;
4471 imm |= imm << 16;
4472 break;
4473 case 1:
4474 imm = (uint16_t) shift;
4475 imm |= imm << 16;
4476 break;
4477 case 2:
4478 case 3:
4479 imm = shift;
4480 break;
4481 default:
4482 abort();
4483 }
4484
4485 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4486 if (size == 3) {
4487 neon_load_reg64(cpu_V0, rm + pass);
4488 tcg_gen_movi_i64(cpu_V1, imm);
4489 switch (op) {
4490 case 0: /* VSHR */
4491 case 1: /* VSRA */
4492 if (u)
4493 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4494 else
ad69471c 4495 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4496 break;
ad69471c
PB
4497 case 2: /* VRSHR */
4498 case 3: /* VRSRA */
4499 if (u)
4500 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4501 else
ad69471c 4502 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4503 break;
ad69471c
PB
4504 case 4: /* VSRI */
4505 if (!u)
4506 return 1;
4507 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4508 break;
4509 case 5: /* VSHL, VSLI */
4510 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4511 break;
4512 case 6: /* VQSHL */
4513 if (u)
4514 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4515 else
ad69471c
PB
4516 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4517 break;
4518 case 7: /* VQSHLU */
4519 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4520 break;
9ee6e8bb 4521 }
ad69471c
PB
4522 if (op == 1 || op == 3) {
4523 /* Accumulate. */
4524 neon_load_reg64(cpu_V0, rd + pass);
4525 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4526 } else if (op == 4 || (op == 5 && u)) {
4527 /* Insert */
4528 cpu_abort(env, "VS[LR]I.64 not implemented");
4529 }
4530 neon_store_reg64(cpu_V0, rd + pass);
4531 } else { /* size < 3 */
4532 /* Operands in T0 and T1. */
4533 gen_op_movl_T1_im(imm);
4534 NEON_GET_REG(T0, rm, pass);
4535 switch (op) {
4536 case 0: /* VSHR */
4537 case 1: /* VSRA */
4538 GEN_NEON_INTEGER_OP(shl);
4539 break;
4540 case 2: /* VRSHR */
4541 case 3: /* VRSRA */
4542 GEN_NEON_INTEGER_OP(rshl);
4543 break;
4544 case 4: /* VSRI */
4545 if (!u)
4546 return 1;
4547 GEN_NEON_INTEGER_OP(shl);
4548 break;
4549 case 5: /* VSHL, VSLI */
4550 switch (size) {
4551 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4552 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4553 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4554 default: return 1;
4555 }
4556 break;
4557 case 6: /* VQSHL */
4558 GEN_NEON_INTEGER_OP_ENV(qshl);
4559 break;
4560 case 7: /* VQSHLU */
4561 switch (size) {
4562 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4563 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4564 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4565 default: return 1;
4566 }
4567 break;
4568 }
4569
4570 if (op == 1 || op == 3) {
4571 /* Accumulate. */
4572 NEON_GET_REG(T1, rd, pass);
4573 gen_neon_add(size);
4574 } else if (op == 4 || (op == 5 && u)) {
4575 /* Insert */
4576 switch (size) {
4577 case 0:
4578 if (op == 4)
4579 imm = 0xff >> -shift;
4580 else
4581 imm = (uint8_t)(0xff << shift);
4582 imm |= imm << 8;
4583 imm |= imm << 16;
4584 break;
4585 case 1:
4586 if (op == 4)
4587 imm = 0xffff >> -shift;
4588 else
4589 imm = (uint16_t)(0xffff << shift);
4590 imm |= imm << 16;
4591 break;
4592 case 2:
4593 if (op == 4)
4594 imm = 0xffffffffu >> -shift;
4595 else
4596 imm = 0xffffffffu << shift;
4597 break;
4598 default:
4599 abort();
4600 }
4601 tmp = neon_load_reg(rd, pass);
4602 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4603 tcg_gen_andi_i32(tmp, tmp, ~imm);
4604 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4605 }
9ee6e8bb
PB
4606 NEON_SET_REG(T0, rd, pass);
4607 }
4608 } /* for pass */
4609 } else if (op < 10) {
ad69471c 4610 /* Shift by immediate and narrow:
9ee6e8bb
PB
4611 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4612 shift = shift - (1 << (size + 3));
4613 size++;
9ee6e8bb
PB
4614 switch (size) {
4615 case 1:
ad69471c 4616 imm = (uint16_t)shift;
9ee6e8bb 4617 imm |= imm << 16;
ad69471c 4618 tmp2 = tcg_const_i32(imm);
a7812ae4 4619 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4620 break;
4621 case 2:
ad69471c
PB
4622 imm = (uint32_t)shift;
4623 tmp2 = tcg_const_i32(imm);
a7812ae4 4624 TCGV_UNUSED_I64(tmp64);
4cc633c3 4625 break;
9ee6e8bb 4626 case 3:
a7812ae4
PB
4627 tmp64 = tcg_const_i64(shift);
4628 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4629 break;
4630 default:
4631 abort();
4632 }
4633
ad69471c
PB
4634 for (pass = 0; pass < 2; pass++) {
4635 if (size == 3) {
4636 neon_load_reg64(cpu_V0, rm + pass);
4637 if (q) {
4638 if (u)
a7812ae4 4639 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4640 else
a7812ae4 4641 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4642 } else {
4643 if (u)
a7812ae4 4644 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4645 else
a7812ae4 4646 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4647 }
2c0262af 4648 } else {
ad69471c
PB
4649 tmp = neon_load_reg(rm + pass, 0);
4650 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4651 tmp3 = neon_load_reg(rm + pass, 1);
4652 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4653 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4654 dead_tmp(tmp);
36aa55dc 4655 dead_tmp(tmp3);
9ee6e8bb 4656 }
ad69471c
PB
4657 tmp = new_tmp();
4658 if (op == 8 && !u) {
4659 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4660 } else {
ad69471c
PB
4661 if (op == 8)
4662 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4663 else
ad69471c
PB
4664 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4665 }
4666 if (pass == 0) {
4667 tmp2 = tmp;
4668 } else {
4669 neon_store_reg(rd, 0, tmp2);
4670 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4671 }
4672 } /* for pass */
4673 } else if (op == 10) {
4674 /* VSHLL */
ad69471c 4675 if (q || size == 3)
9ee6e8bb 4676 return 1;
ad69471c
PB
4677 tmp = neon_load_reg(rm, 0);
4678 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4679 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4680 if (pass == 1)
4681 tmp = tmp2;
4682
4683 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4684
9ee6e8bb
PB
4685 if (shift != 0) {
4686 /* The shift is less than the width of the source
ad69471c
PB
4687 type, so we can just shift the whole register. */
4688 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4689 if (size < 2 || !u) {
4690 uint64_t imm64;
4691 if (size == 0) {
4692 imm = (0xffu >> (8 - shift));
4693 imm |= imm << 16;
4694 } else {
4695 imm = 0xffff >> (16 - shift);
9ee6e8bb 4696 }
ad69471c
PB
4697 imm64 = imm | (((uint64_t)imm) << 32);
4698 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4699 }
4700 }
ad69471c 4701 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4702 }
4703 } else if (op == 15 || op == 16) {
4704 /* VCVT fixed-point. */
4705 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4706 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4707 if (op & 1) {
4708 if (u)
4373f3ce 4709 gen_vfp_ulto(0, shift);
9ee6e8bb 4710 else
4373f3ce 4711 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4712 } else {
4713 if (u)
4373f3ce 4714 gen_vfp_toul(0, shift);
9ee6e8bb 4715 else
4373f3ce 4716 gen_vfp_tosl(0, shift);
2c0262af 4717 }
4373f3ce 4718 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4719 }
4720 } else {
9ee6e8bb
PB
4721 return 1;
4722 }
4723 } else { /* (insn & 0x00380080) == 0 */
4724 int invert;
4725
4726 op = (insn >> 8) & 0xf;
4727 /* One register and immediate. */
4728 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4729 invert = (insn & (1 << 5)) != 0;
4730 switch (op) {
4731 case 0: case 1:
4732 /* no-op */
4733 break;
4734 case 2: case 3:
4735 imm <<= 8;
4736 break;
4737 case 4: case 5:
4738 imm <<= 16;
4739 break;
4740 case 6: case 7:
4741 imm <<= 24;
4742 break;
4743 case 8: case 9:
4744 imm |= imm << 16;
4745 break;
4746 case 10: case 11:
4747 imm = (imm << 8) | (imm << 24);
4748 break;
4749 case 12:
4750 imm = (imm < 8) | 0xff;
4751 break;
4752 case 13:
4753 imm = (imm << 16) | 0xffff;
4754 break;
4755 case 14:
4756 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4757 if (invert)
4758 imm = ~imm;
4759 break;
4760 case 15:
4761 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4762 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4763 break;
4764 }
4765 if (invert)
4766 imm = ~imm;
4767
4768 if (op != 14 || !invert)
4769 gen_op_movl_T1_im(imm);
4770
4771 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4772 if (op & 1 && op < 12) {
ad69471c 4773 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4774 if (invert) {
4775 /* The immediate value has already been inverted, so
4776 BIC becomes AND. */
ad69471c 4777 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4778 } else {
ad69471c 4779 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4780 }
9ee6e8bb 4781 } else {
ad69471c
PB
4782 /* VMOV, VMVN. */
4783 tmp = new_tmp();
9ee6e8bb 4784 if (op == 14 && invert) {
ad69471c
PB
4785 uint32_t val;
4786 val = 0;
9ee6e8bb
PB
4787 for (n = 0; n < 4; n++) {
4788 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4789 val |= 0xff << (n * 8);
9ee6e8bb 4790 }
ad69471c
PB
4791 tcg_gen_movi_i32(tmp, val);
4792 } else {
4793 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4794 }
9ee6e8bb 4795 }
ad69471c 4796 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4797 }
4798 }
e4b3861d 4799 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4800 if (size != 3) {
4801 op = (insn >> 8) & 0xf;
4802 if ((insn & (1 << 6)) == 0) {
4803 /* Three registers of different lengths. */
4804 int src1_wide;
4805 int src2_wide;
4806 int prewiden;
4807 /* prewiden, src1_wide, src2_wide */
4808 static const int neon_3reg_wide[16][3] = {
4809 {1, 0, 0}, /* VADDL */
4810 {1, 1, 0}, /* VADDW */
4811 {1, 0, 0}, /* VSUBL */
4812 {1, 1, 0}, /* VSUBW */
4813 {0, 1, 1}, /* VADDHN */
4814 {0, 0, 0}, /* VABAL */
4815 {0, 1, 1}, /* VSUBHN */
4816 {0, 0, 0}, /* VABDL */
4817 {0, 0, 0}, /* VMLAL */
4818 {0, 0, 0}, /* VQDMLAL */
4819 {0, 0, 0}, /* VMLSL */
4820 {0, 0, 0}, /* VQDMLSL */
4821 {0, 0, 0}, /* Integer VMULL */
4822 {0, 0, 0}, /* VQDMULL */
4823 {0, 0, 0} /* Polynomial VMULL */
4824 };
4825
4826 prewiden = neon_3reg_wide[op][0];
4827 src1_wide = neon_3reg_wide[op][1];
4828 src2_wide = neon_3reg_wide[op][2];
4829
ad69471c
PB
4830 if (size == 0 && (op == 9 || op == 11 || op == 13))
4831 return 1;
4832
9ee6e8bb
PB
4833 /* Avoid overlapping operands. Wide source operands are
4834 always aligned so will never overlap with wide
4835 destinations in problematic ways. */
8f8e3aa4
PB
4836 if (rd == rm && !src2_wide) {
4837 NEON_GET_REG(T0, rm, 1);
4838 gen_neon_movl_scratch_T0(2);
4839 } else if (rd == rn && !src1_wide) {
4840 NEON_GET_REG(T0, rn, 1);
4841 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4842 }
a50f5b91 4843 TCGV_UNUSED(tmp3);
9ee6e8bb 4844 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4845 if (src1_wide) {
4846 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4847 TCGV_UNUSED(tmp);
9ee6e8bb 4848 } else {
ad69471c
PB
4849 if (pass == 1 && rd == rn) {
4850 gen_neon_movl_T0_scratch(2);
4851 tmp = new_tmp();
4852 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4853 } else {
ad69471c
PB
4854 tmp = neon_load_reg(rn, pass);
4855 }
4856 if (prewiden) {
4857 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4858 }
4859 }
ad69471c
PB
4860 if (src2_wide) {
4861 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4862 TCGV_UNUSED(tmp2);
9ee6e8bb 4863 } else {
ad69471c 4864 if (pass == 1 && rd == rm) {
8f8e3aa4 4865 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4866 tmp2 = new_tmp();
4867 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4868 } else {
ad69471c
PB
4869 tmp2 = neon_load_reg(rm, pass);
4870 }
4871 if (prewiden) {
4872 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4873 }
9ee6e8bb
PB
4874 }
4875 switch (op) {
4876 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4877 gen_neon_addl(size);
9ee6e8bb
PB
4878 break;
4879 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4880 gen_neon_subl(size);
9ee6e8bb
PB
4881 break;
4882 case 5: case 7: /* VABAL, VABDL */
4883 switch ((size << 1) | u) {
ad69471c
PB
4884 case 0:
4885 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4886 break;
4887 case 1:
4888 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4889 break;
4890 case 2:
4891 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4892 break;
4893 case 3:
4894 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4895 break;
4896 case 4:
4897 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4898 break;
4899 case 5:
4900 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4901 break;
9ee6e8bb
PB
4902 default: abort();
4903 }
ad69471c
PB
4904 dead_tmp(tmp2);
4905 dead_tmp(tmp);
9ee6e8bb
PB
4906 break;
4907 case 8: case 9: case 10: case 11: case 12: case 13:
4908 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4909 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4910 break;
4911 case 14: /* Polynomial VMULL */
4912 cpu_abort(env, "Polynomial VMULL not implemented");
4913
4914 default: /* 15 is RESERVED. */
4915 return 1;
4916 }
4917 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4918 /* Accumulate. */
4919 if (op == 10 || op == 11) {
ad69471c 4920 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4921 }
4922
9ee6e8bb 4923 if (op != 13) {
ad69471c 4924 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4925 }
4926
4927 switch (op) {
4928 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4929 gen_neon_addl(size);
9ee6e8bb
PB
4930 break;
4931 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4932 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4933 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4934 break;
9ee6e8bb
PB
4935 /* Fall through. */
4936 case 13: /* VQDMULL */
ad69471c 4937 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4938 break;
4939 default:
4940 abort();
4941 }
ad69471c 4942 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4943 } else if (op == 4 || op == 6) {
4944 /* Narrowing operation. */
ad69471c 4945 tmp = new_tmp();
9ee6e8bb
PB
4946 if (u) {
4947 switch (size) {
ad69471c
PB
4948 case 0:
4949 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4950 break;
4951 case 1:
4952 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4953 break;
4954 case 2:
4955 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4956 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4957 break;
9ee6e8bb
PB
4958 default: abort();
4959 }
4960 } else {
4961 switch (size) {
ad69471c
PB
4962 case 0:
4963 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4964 break;
4965 case 1:
4966 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4967 break;
4968 case 2:
4969 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4970 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4971 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4972 break;
9ee6e8bb
PB
4973 default: abort();
4974 }
4975 }
ad69471c
PB
4976 if (pass == 0) {
4977 tmp3 = tmp;
4978 } else {
4979 neon_store_reg(rd, 0, tmp3);
4980 neon_store_reg(rd, 1, tmp);
4981 }
9ee6e8bb
PB
4982 } else {
4983 /* Write back the result. */
ad69471c 4984 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4985 }
4986 }
4987 } else {
4988 /* Two registers and a scalar. */
4989 switch (op) {
4990 case 0: /* Integer VMLA scalar */
4991 case 1: /* Float VMLA scalar */
4992 case 4: /* Integer VMLS scalar */
4993 case 5: /* Floating point VMLS scalar */
4994 case 8: /* Integer VMUL scalar */
4995 case 9: /* Floating point VMUL scalar */
4996 case 12: /* VQDMULH scalar */
4997 case 13: /* VQRDMULH scalar */
4998 gen_neon_get_scalar(size, rm);
8f8e3aa4 4999 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5000 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5001 if (pass != 0)
8f8e3aa4 5002 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5003 NEON_GET_REG(T1, rn, pass);
5004 if (op == 12) {
5005 if (size == 1) {
ad69471c 5006 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5007 } else {
ad69471c 5008 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5009 }
5010 } else if (op == 13) {
5011 if (size == 1) {
ad69471c 5012 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5013 } else {
ad69471c 5014 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5015 }
5016 } else if (op & 1) {
ad69471c 5017 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5018 } else {
5019 switch (size) {
ad69471c
PB
5020 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5021 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5022 case 2: gen_op_mul_T0_T1(); break;
5023 default: return 1;
5024 }
5025 }
5026 if (op < 8) {
5027 /* Accumulate. */
5028 NEON_GET_REG(T1, rd, pass);
5029 switch (op) {
5030 case 0:
5031 gen_neon_add(size);
5032 break;
5033 case 1:
ad69471c 5034 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5035 break;
5036 case 4:
ad69471c 5037 gen_neon_rsb(size);
9ee6e8bb
PB
5038 break;
5039 case 5:
ad69471c 5040 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5041 break;
5042 default:
5043 abort();
5044 }
5045 }
5046 NEON_SET_REG(T0, rd, pass);
5047 }
5048 break;
5049 case 2: /* VMLAL sclar */
5050 case 3: /* VQDMLAL scalar */
5051 case 6: /* VMLSL scalar */
5052 case 7: /* VQDMLSL scalar */
5053 case 10: /* VMULL scalar */
5054 case 11: /* VQDMULL scalar */
ad69471c
PB
5055 if (size == 0 && (op == 3 || op == 7 || op == 11))
5056 return 1;
5057
9ee6e8bb 5058 gen_neon_get_scalar(size, rm);
ad69471c
PB
5059 NEON_GET_REG(T1, rn, 1);
5060
9ee6e8bb 5061 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5062 if (pass == 0) {
5063 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5064 } else {
ad69471c
PB
5065 tmp = new_tmp();
5066 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5067 }
ad69471c
PB
5068 tmp2 = new_tmp();
5069 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5070 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5071 if (op == 6 || op == 7) {
ad69471c
PB
5072 gen_neon_negl(cpu_V0, size);
5073 }
5074 if (op != 11) {
5075 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5076 }
9ee6e8bb
PB
5077 switch (op) {
5078 case 2: case 6:
ad69471c 5079 gen_neon_addl(size);
9ee6e8bb
PB
5080 break;
5081 case 3: case 7:
ad69471c
PB
5082 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5083 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5084 break;
5085 case 10:
5086 /* no-op */
5087 break;
5088 case 11:
ad69471c 5089 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5090 break;
5091 default:
5092 abort();
5093 }
ad69471c 5094 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5095 }
5096 break;
5097 default: /* 14 and 15 are RESERVED */
5098 return 1;
5099 }
5100 }
5101 } else { /* size == 3 */
5102 if (!u) {
5103 /* Extract. */
9ee6e8bb 5104 imm = (insn >> 8) & 0xf;
ad69471c
PB
5105 count = q + 1;
5106
5107 if (imm > 7 && !q)
5108 return 1;
5109
5110 if (imm == 0) {
5111 neon_load_reg64(cpu_V0, rn);
5112 if (q) {
5113 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5114 }
ad69471c
PB
5115 } else if (imm == 8) {
5116 neon_load_reg64(cpu_V0, rn + 1);
5117 if (q) {
5118 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5119 }
ad69471c 5120 } else if (q) {
a7812ae4 5121 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5122 if (imm < 8) {
5123 neon_load_reg64(cpu_V0, rn);
a7812ae4 5124 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5125 } else {
5126 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5127 neon_load_reg64(tmp64, rm);
ad69471c
PB
5128 }
5129 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5130 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5131 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5132 if (imm < 8) {
5133 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5134 } else {
ad69471c
PB
5135 neon_load_reg64(cpu_V1, rm + 1);
5136 imm -= 8;
9ee6e8bb 5137 }
ad69471c 5138 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5139 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5140 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5141 } else {
a7812ae4 5142 /* BUGFIX */
ad69471c 5143 neon_load_reg64(cpu_V0, rn);
a7812ae4 5144 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5145 neon_load_reg64(cpu_V1, rm);
a7812ae4 5146 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5147 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5148 }
5149 neon_store_reg64(cpu_V0, rd);
5150 if (q) {
5151 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5152 }
5153 } else if ((insn & (1 << 11)) == 0) {
5154 /* Two register misc. */
5155 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5156 size = (insn >> 18) & 3;
5157 switch (op) {
5158 case 0: /* VREV64 */
5159 if (size == 3)
5160 return 1;
5161 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5162 NEON_GET_REG(T0, rm, pass * 2);
5163 NEON_GET_REG(T1, rm, pass * 2 + 1);
5164 switch (size) {
b0109805 5165 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5166 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5167 case 2: /* no-op */ break;
5168 default: abort();
5169 }
5170 NEON_SET_REG(T0, rd, pass * 2 + 1);
5171 if (size == 2) {
5172 NEON_SET_REG(T1, rd, pass * 2);
5173 } else {
5174 gen_op_movl_T0_T1();
5175 switch (size) {
b0109805 5176 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5177 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5178 default: abort();
5179 }
5180 NEON_SET_REG(T0, rd, pass * 2);
5181 }
5182 }
5183 break;
5184 case 4: case 5: /* VPADDL */
5185 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5186 if (size == 3)
5187 return 1;
ad69471c
PB
5188 for (pass = 0; pass < q + 1; pass++) {
5189 tmp = neon_load_reg(rm, pass * 2);
5190 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5191 tmp = neon_load_reg(rm, pass * 2 + 1);
5192 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5193 switch (size) {
5194 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5195 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5196 case 2: tcg_gen_add_i64(CPU_V001); break;
5197 default: abort();
5198 }
9ee6e8bb
PB
5199 if (op >= 12) {
5200 /* Accumulate. */
ad69471c
PB
5201 neon_load_reg64(cpu_V1, rd + pass);
5202 gen_neon_addl(size);
9ee6e8bb 5203 }
ad69471c 5204 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5205 }
5206 break;
5207 case 33: /* VTRN */
5208 if (size == 2) {
5209 for (n = 0; n < (q ? 4 : 2); n += 2) {
5210 NEON_GET_REG(T0, rm, n);
5211 NEON_GET_REG(T1, rd, n + 1);
5212 NEON_SET_REG(T1, rm, n);
5213 NEON_SET_REG(T0, rd, n + 1);
5214 }
5215 } else {
5216 goto elementwise;
5217 }
5218 break;
5219 case 34: /* VUZP */
5220 /* Reg Before After
5221 Rd A3 A2 A1 A0 B2 B0 A2 A0
5222 Rm B3 B2 B1 B0 B3 B1 A3 A1
5223 */
5224 if (size == 3)
5225 return 1;
5226 gen_neon_unzip(rd, q, 0, size);
5227 gen_neon_unzip(rm, q, 4, size);
5228 if (q) {
5229 static int unzip_order_q[8] =
5230 {0, 2, 4, 6, 1, 3, 5, 7};
5231 for (n = 0; n < 8; n++) {
5232 int reg = (n < 4) ? rd : rm;
5233 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5234 NEON_SET_REG(T0, reg, n % 4);
5235 }
5236 } else {
5237 static int unzip_order[4] =
5238 {0, 4, 1, 5};
5239 for (n = 0; n < 4; n++) {
5240 int reg = (n < 2) ? rd : rm;
5241 gen_neon_movl_T0_scratch(unzip_order[n]);
5242 NEON_SET_REG(T0, reg, n % 2);
5243 }
5244 }
5245 break;
5246 case 35: /* VZIP */
5247 /* Reg Before After
5248 Rd A3 A2 A1 A0 B1 A1 B0 A0
5249 Rm B3 B2 B1 B0 B3 A3 B2 A2
5250 */
5251 if (size == 3)
5252 return 1;
5253 count = (q ? 4 : 2);
5254 for (n = 0; n < count; n++) {
5255 NEON_GET_REG(T0, rd, n);
5256 NEON_GET_REG(T1, rd, n);
5257 switch (size) {
ad69471c
PB
5258 case 0: gen_helper_neon_zip_u8(); break;
5259 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5260 case 2: /* no-op */; break;
5261 default: abort();
5262 }
5263 gen_neon_movl_scratch_T0(n * 2);
5264 gen_neon_movl_scratch_T1(n * 2 + 1);
5265 }
5266 for (n = 0; n < count * 2; n++) {
5267 int reg = (n < count) ? rd : rm;
5268 gen_neon_movl_T0_scratch(n);
5269 NEON_SET_REG(T0, reg, n % count);
5270 }
5271 break;
5272 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5273 if (size == 3)
5274 return 1;
a50f5b91 5275 TCGV_UNUSED(tmp2);
9ee6e8bb 5276 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5277 neon_load_reg64(cpu_V0, rm + pass);
5278 tmp = new_tmp();
9ee6e8bb 5279 if (op == 36 && q == 0) {
ad69471c 5280 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5281 } else if (q) {
ad69471c 5282 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5283 } else {
ad69471c
PB
5284 gen_neon_narrow_sats(size, tmp, cpu_V0);
5285 }
5286 if (pass == 0) {
5287 tmp2 = tmp;
5288 } else {
5289 neon_store_reg(rd, 0, tmp2);
5290 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5291 }
9ee6e8bb
PB
5292 }
5293 break;
5294 case 38: /* VSHLL */
ad69471c 5295 if (q || size == 3)
9ee6e8bb 5296 return 1;
ad69471c
PB
5297 tmp = neon_load_reg(rm, 0);
5298 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5299 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5300 if (pass == 1)
5301 tmp = tmp2;
5302 gen_neon_widen(cpu_V0, tmp, size, 1);
5303 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5304 }
5305 break;
5306 default:
5307 elementwise:
5308 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5309 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5310 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5311 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5312 } else {
5313 NEON_GET_REG(T0, rm, pass);
5314 }
5315 switch (op) {
5316 case 1: /* VREV32 */
5317 switch (size) {
b0109805 5318 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5319 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5320 default: return 1;
5321 }
5322 break;
5323 case 2: /* VREV16 */
5324 if (size != 0)
5325 return 1;
3670669c 5326 gen_rev16(cpu_T[0]);
9ee6e8bb 5327 break;
9ee6e8bb
PB
5328 case 8: /* CLS */
5329 switch (size) {
ad69471c
PB
5330 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5331 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5332 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5333 default: return 1;
5334 }
5335 break;
5336 case 9: /* CLZ */
5337 switch (size) {
ad69471c
PB
5338 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5339 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5340 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5341 default: return 1;
5342 }
5343 break;
5344 case 10: /* CNT */
5345 if (size != 0)
5346 return 1;
ad69471c 5347 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5348 break;
5349 case 11: /* VNOT */
5350 if (size != 0)
5351 return 1;
5352 gen_op_notl_T0();
5353 break;
5354 case 14: /* VQABS */
5355 switch (size) {
ad69471c
PB
5356 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5357 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5358 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5359 default: return 1;
5360 }
5361 break;
5362 case 15: /* VQNEG */
5363 switch (size) {
ad69471c
PB
5364 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5365 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5366 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5367 default: return 1;
5368 }
5369 break;
5370 case 16: case 19: /* VCGT #0, VCLE #0 */
5371 gen_op_movl_T1_im(0);
5372 switch(size) {
ad69471c
PB
5373 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5374 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5375 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5376 default: return 1;
5377 }
5378 if (op == 19)
5379 gen_op_notl_T0();
5380 break;
5381 case 17: case 20: /* VCGE #0, VCLT #0 */
5382 gen_op_movl_T1_im(0);
5383 switch(size) {
ad69471c
PB
5384 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5385 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5386 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5387 default: return 1;
5388 }
5389 if (op == 20)
5390 gen_op_notl_T0();
5391 break;
5392 case 18: /* VCEQ #0 */
5393 gen_op_movl_T1_im(0);
5394 switch(size) {
ad69471c
PB
5395 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5396 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5397 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5398 default: return 1;
5399 }
5400 break;
5401 case 22: /* VABS */
5402 switch(size) {
ad69471c
PB
5403 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5404 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5405 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5406 default: return 1;
5407 }
5408 break;
5409 case 23: /* VNEG */
5410 gen_op_movl_T1_im(0);
ad69471c
PB
5411 if (size == 3)
5412 return 1;
5413 gen_neon_rsb(size);
9ee6e8bb
PB
5414 break;
5415 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5416 gen_op_movl_T1_im(0);
ad69471c 5417 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5418 if (op == 27)
5419 gen_op_notl_T0();
5420 break;
5421 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5422 gen_op_movl_T1_im(0);
ad69471c 5423 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5424 if (op == 28)
5425 gen_op_notl_T0();
5426 break;
5427 case 26: /* Float VCEQ #0 */
5428 gen_op_movl_T1_im(0);
ad69471c 5429 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5430 break;
5431 case 30: /* Float VABS */
4373f3ce 5432 gen_vfp_abs(0);
9ee6e8bb
PB
5433 break;
5434 case 31: /* Float VNEG */
4373f3ce 5435 gen_vfp_neg(0);
9ee6e8bb
PB
5436 break;
5437 case 32: /* VSWP */
5438 NEON_GET_REG(T1, rd, pass);
5439 NEON_SET_REG(T1, rm, pass);
5440 break;
5441 case 33: /* VTRN */
5442 NEON_GET_REG(T1, rd, pass);
5443 switch (size) {
ad69471c
PB
5444 case 0: gen_helper_neon_trn_u8(); break;
5445 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5446 case 2: abort();
5447 default: return 1;
5448 }
5449 NEON_SET_REG(T1, rm, pass);
5450 break;
5451 case 56: /* Integer VRECPE */
4373f3ce 5452 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5453 break;
5454 case 57: /* Integer VRSQRTE */
4373f3ce 5455 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5456 break;
5457 case 58: /* Float VRECPE */
4373f3ce 5458 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5459 break;
5460 case 59: /* Float VRSQRTE */
4373f3ce 5461 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5462 break;
5463 case 60: /* VCVT.F32.S32 */
4373f3ce 5464 gen_vfp_tosiz(0);
9ee6e8bb
PB
5465 break;
5466 case 61: /* VCVT.F32.U32 */
4373f3ce 5467 gen_vfp_touiz(0);
9ee6e8bb
PB
5468 break;
5469 case 62: /* VCVT.S32.F32 */
4373f3ce 5470 gen_vfp_sito(0);
9ee6e8bb
PB
5471 break;
5472 case 63: /* VCVT.U32.F32 */
4373f3ce 5473 gen_vfp_uito(0);
9ee6e8bb
PB
5474 break;
5475 default:
5476 /* Reserved: 21, 29, 39-56 */
5477 return 1;
5478 }
5479 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5480 tcg_gen_st_f32(cpu_F0s, cpu_env,
5481 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5482 } else {
5483 NEON_SET_REG(T0, rd, pass);
5484 }
5485 }
5486 break;
5487 }
5488 } else if ((insn & (1 << 10)) == 0) {
5489 /* VTBL, VTBX. */
3018f259 5490 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5491 if (insn & (1 << 6)) {
8f8e3aa4 5492 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5493 } else {
8f8e3aa4
PB
5494 tmp = new_tmp();
5495 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5496 }
8f8e3aa4
PB
5497 tmp2 = neon_load_reg(rm, 0);
5498 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5499 tcg_const_i32(n));
3018f259 5500 dead_tmp(tmp);
9ee6e8bb 5501 if (insn & (1 << 6)) {
8f8e3aa4 5502 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5503 } else {
8f8e3aa4
PB
5504 tmp = new_tmp();
5505 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5506 }
8f8e3aa4
PB
5507 tmp3 = neon_load_reg(rm, 1);
5508 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5509 tcg_const_i32(n));
5510 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5511 neon_store_reg(rd, 1, tmp3);
5512 dead_tmp(tmp);
9ee6e8bb
PB
5513 } else if ((insn & 0x380) == 0) {
5514 /* VDUP */
5515 if (insn & (1 << 19)) {
5516 NEON_SET_REG(T0, rm, 1);
5517 } else {
5518 NEON_SET_REG(T0, rm, 0);
5519 }
5520 if (insn & (1 << 16)) {
ad69471c 5521 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5522 } else if (insn & (1 << 17)) {
5523 if ((insn >> 18) & 1)
ad69471c 5524 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5525 else
ad69471c 5526 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5527 }
5528 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5529 NEON_SET_REG(T0, rd, pass);
5530 }
5531 } else {
5532 return 1;
5533 }
5534 }
5535 }
5536 return 0;
5537}
5538
fe1479c3
PB
5539static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5540{
5541 int crn = (insn >> 16) & 0xf;
5542 int crm = insn & 0xf;
5543 int op1 = (insn >> 21) & 7;
5544 int op2 = (insn >> 5) & 7;
5545 int rt = (insn >> 12) & 0xf;
5546 TCGv tmp;
5547
5548 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5549 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5550 /* TEECR */
5551 if (IS_USER(s))
5552 return 1;
5553 tmp = load_cpu_field(teecr);
5554 store_reg(s, rt, tmp);
5555 return 0;
5556 }
5557 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5558 /* TEEHBR */
5559 if (IS_USER(s) && (env->teecr & 1))
5560 return 1;
5561 tmp = load_cpu_field(teehbr);
5562 store_reg(s, rt, tmp);
5563 return 0;
5564 }
5565 }
5566 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5567 op1, crn, crm, op2);
5568 return 1;
5569}
5570
5571static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5572{
5573 int crn = (insn >> 16) & 0xf;
5574 int crm = insn & 0xf;
5575 int op1 = (insn >> 21) & 7;
5576 int op2 = (insn >> 5) & 7;
5577 int rt = (insn >> 12) & 0xf;
5578 TCGv tmp;
5579
5580 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5581 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5582 /* TEECR */
5583 if (IS_USER(s))
5584 return 1;
5585 tmp = load_reg(s, rt);
5586 gen_helper_set_teecr(cpu_env, tmp);
5587 dead_tmp(tmp);
5588 return 0;
5589 }
5590 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5591 /* TEEHBR */
5592 if (IS_USER(s) && (env->teecr & 1))
5593 return 1;
5594 tmp = load_reg(s, rt);
5595 store_cpu_field(tmp, teehbr);
5596 return 0;
5597 }
5598 }
5599 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5600 op1, crn, crm, op2);
5601 return 1;
5602}
5603
9ee6e8bb
PB
5604static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5605{
5606 int cpnum;
5607
5608 cpnum = (insn >> 8) & 0xf;
5609 if (arm_feature(env, ARM_FEATURE_XSCALE)
5610 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5611 return 1;
5612
5613 switch (cpnum) {
5614 case 0:
5615 case 1:
5616 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5617 return disas_iwmmxt_insn(env, s, insn);
5618 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5619 return disas_dsp_insn(env, s, insn);
5620 }
5621 return 1;
5622 case 10:
5623 case 11:
5624 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5625 case 14:
5626 /* Coprocessors 7-15 are architecturally reserved by ARM.
5627 Unfortunately Intel decided to ignore this. */
5628 if (arm_feature(env, ARM_FEATURE_XSCALE))
5629 goto board;
5630 if (insn & (1 << 20))
5631 return disas_cp14_read(env, s, insn);
5632 else
5633 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5634 case 15:
5635 return disas_cp15_insn (env, s, insn);
5636 default:
fe1479c3 5637 board:
9ee6e8bb
PB
5638 /* Unknown coprocessor. See if the board has hooked it. */
5639 return disas_cp_insn (env, s, insn);
5640 }
5641}
5642
5e3f878a
PB
5643
5644/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5645static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5646{
5647 TCGv tmp;
5648 tmp = new_tmp();
5649 tcg_gen_trunc_i64_i32(tmp, val);
5650 store_reg(s, rlow, tmp);
5651 tmp = new_tmp();
5652 tcg_gen_shri_i64(val, val, 32);
5653 tcg_gen_trunc_i64_i32(tmp, val);
5654 store_reg(s, rhigh, tmp);
5655}
5656
5657/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5658static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5659{
a7812ae4 5660 TCGv_i64 tmp;
5e3f878a
PB
5661 TCGv tmp2;
5662
36aa55dc 5663 /* Load value and extend to 64 bits. */
a7812ae4 5664 tmp = tcg_temp_new_i64();
5e3f878a
PB
5665 tmp2 = load_reg(s, rlow);
5666 tcg_gen_extu_i32_i64(tmp, tmp2);
5667 dead_tmp(tmp2);
5668 tcg_gen_add_i64(val, val, tmp);
5669}
5670
5671/* load and add a 64-bit value from a register pair. */
a7812ae4 5672static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5673{
a7812ae4 5674 TCGv_i64 tmp;
36aa55dc
PB
5675 TCGv tmpl;
5676 TCGv tmph;
5e3f878a
PB
5677
5678 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5679 tmpl = load_reg(s, rlow);
5680 tmph = load_reg(s, rhigh);
a7812ae4 5681 tmp = tcg_temp_new_i64();
36aa55dc
PB
5682 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5683 dead_tmp(tmpl);
5684 dead_tmp(tmph);
5e3f878a
PB
5685 tcg_gen_add_i64(val, val, tmp);
5686}
5687
5688/* Set N and Z flags from a 64-bit value. */
a7812ae4 5689static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5690{
5691 TCGv tmp = new_tmp();
5692 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5693 gen_logic_CC(tmp);
5694 dead_tmp(tmp);
5e3f878a
PB
5695}
5696
9ee6e8bb
PB
5697static void disas_arm_insn(CPUState * env, DisasContext *s)
5698{
5699 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5700 TCGv tmp;
3670669c 5701 TCGv tmp2;
6ddbc6e4 5702 TCGv tmp3;
b0109805 5703 TCGv addr;
a7812ae4 5704 TCGv_i64 tmp64;
9ee6e8bb
PB
5705
5706 insn = ldl_code(s->pc);
5707 s->pc += 4;
5708
5709 /* M variants do not implement ARM mode. */
5710 if (IS_M(env))
5711 goto illegal_op;
5712 cond = insn >> 28;
5713 if (cond == 0xf){
5714 /* Unconditional instructions. */
5715 if (((insn >> 25) & 7) == 1) {
5716 /* NEON Data processing. */
5717 if (!arm_feature(env, ARM_FEATURE_NEON))
5718 goto illegal_op;
5719
5720 if (disas_neon_data_insn(env, s, insn))
5721 goto illegal_op;
5722 return;
5723 }
5724 if ((insn & 0x0f100000) == 0x04000000) {
5725 /* NEON load/store. */
5726 if (!arm_feature(env, ARM_FEATURE_NEON))
5727 goto illegal_op;
5728
5729 if (disas_neon_ls_insn(env, s, insn))
5730 goto illegal_op;
5731 return;
5732 }
5733 if ((insn & 0x0d70f000) == 0x0550f000)
5734 return; /* PLD */
5735 else if ((insn & 0x0ffffdff) == 0x01010000) {
5736 ARCH(6);
5737 /* setend */
5738 if (insn & (1 << 9)) {
5739 /* BE8 mode not implemented. */
5740 goto illegal_op;
5741 }
5742 return;
5743 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5744 switch ((insn >> 4) & 0xf) {
5745 case 1: /* clrex */
5746 ARCH(6K);
8f8e3aa4 5747 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5748 return;
5749 case 4: /* dsb */
5750 case 5: /* dmb */
5751 case 6: /* isb */
5752 ARCH(7);
5753 /* We don't emulate caches so these are a no-op. */
5754 return;
5755 default:
5756 goto illegal_op;
5757 }
5758 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5759 /* srs */
5760 uint32_t offset;
5761 if (IS_USER(s))
5762 goto illegal_op;
5763 ARCH(6);
5764 op1 = (insn & 0x1f);
5765 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5766 addr = load_reg(s, 13);
9ee6e8bb 5767 } else {
b0109805
PB
5768 addr = new_tmp();
5769 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5770 }
5771 i = (insn >> 23) & 3;
5772 switch (i) {
5773 case 0: offset = -4; break; /* DA */
5774 case 1: offset = -8; break; /* DB */
5775 case 2: offset = 0; break; /* IA */
5776 case 3: offset = 4; break; /* IB */
5777 default: abort();
5778 }
5779 if (offset)
b0109805
PB
5780 tcg_gen_addi_i32(addr, addr, offset);
5781 tmp = load_reg(s, 14);
5782 gen_st32(tmp, addr, 0);
5783 tmp = new_tmp();
5784 gen_helper_cpsr_read(tmp);
5785 tcg_gen_addi_i32(addr, addr, 4);
5786 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5787 if (insn & (1 << 21)) {
5788 /* Base writeback. */
5789 switch (i) {
5790 case 0: offset = -8; break;
5791 case 1: offset = -4; break;
5792 case 2: offset = 4; break;
5793 case 3: offset = 0; break;
5794 default: abort();
5795 }
5796 if (offset)
b0109805 5797 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5798 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5799 gen_movl_reg_T1(s, 13);
5800 } else {
b0109805 5801 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5802 }
b0109805
PB
5803 } else {
5804 dead_tmp(addr);
9ee6e8bb
PB
5805 }
5806 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5807 /* rfe */
5808 uint32_t offset;
5809 if (IS_USER(s))
5810 goto illegal_op;
5811 ARCH(6);
5812 rn = (insn >> 16) & 0xf;
b0109805 5813 addr = load_reg(s, rn);
9ee6e8bb
PB
5814 i = (insn >> 23) & 3;
5815 switch (i) {
b0109805
PB
5816 case 0: offset = -4; break; /* DA */
5817 case 1: offset = -8; break; /* DB */
5818 case 2: offset = 0; break; /* IA */
5819 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5820 default: abort();
5821 }
5822 if (offset)
b0109805
PB
5823 tcg_gen_addi_i32(addr, addr, offset);
5824 /* Load PC into tmp and CPSR into tmp2. */
5825 tmp = gen_ld32(addr, 0);
5826 tcg_gen_addi_i32(addr, addr, 4);
5827 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5828 if (insn & (1 << 21)) {
5829 /* Base writeback. */
5830 switch (i) {
b0109805
PB
5831 case 0: offset = -8; break;
5832 case 1: offset = -4; break;
5833 case 2: offset = 4; break;
5834 case 3: offset = 0; break;
9ee6e8bb
PB
5835 default: abort();
5836 }
5837 if (offset)
b0109805
PB
5838 tcg_gen_addi_i32(addr, addr, offset);
5839 store_reg(s, rn, addr);
5840 } else {
5841 dead_tmp(addr);
9ee6e8bb 5842 }
b0109805 5843 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5844 } else if ((insn & 0x0e000000) == 0x0a000000) {
5845 /* branch link and change to thumb (blx <offset>) */
5846 int32_t offset;
5847
5848 val = (uint32_t)s->pc;
d9ba4830
PB
5849 tmp = new_tmp();
5850 tcg_gen_movi_i32(tmp, val);
5851 store_reg(s, 14, tmp);
9ee6e8bb
PB
5852 /* Sign-extend the 24-bit offset */
5853 offset = (((int32_t)insn) << 8) >> 8;
5854 /* offset * 4 + bit24 * 2 + (thumb bit) */
5855 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5856 /* pipeline offset */
5857 val += 4;
d9ba4830 5858 gen_bx_im(s, val);
9ee6e8bb
PB
5859 return;
5860 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5861 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5862 /* iWMMXt register transfer. */
5863 if (env->cp15.c15_cpar & (1 << 1))
5864 if (!disas_iwmmxt_insn(env, s, insn))
5865 return;
5866 }
5867 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5868 /* Coprocessor double register transfer. */
5869 } else if ((insn & 0x0f000010) == 0x0e000010) {
5870 /* Additional coprocessor register transfer. */
7997d92f 5871 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5872 uint32_t mask;
5873 uint32_t val;
5874 /* cps (privileged) */
5875 if (IS_USER(s))
5876 return;
5877 mask = val = 0;
5878 if (insn & (1 << 19)) {
5879 if (insn & (1 << 8))
5880 mask |= CPSR_A;
5881 if (insn & (1 << 7))
5882 mask |= CPSR_I;
5883 if (insn & (1 << 6))
5884 mask |= CPSR_F;
5885 if (insn & (1 << 18))
5886 val |= mask;
5887 }
7997d92f 5888 if (insn & (1 << 17)) {
9ee6e8bb
PB
5889 mask |= CPSR_M;
5890 val |= (insn & 0x1f);
5891 }
5892 if (mask) {
5893 gen_op_movl_T0_im(val);
5894 gen_set_psr_T0(s, mask, 0);
5895 }
5896 return;
5897 }
5898 goto illegal_op;
5899 }
5900 if (cond != 0xe) {
5901 /* if not always execute, we generate a conditional jump to
5902 next instruction */
5903 s->condlabel = gen_new_label();
d9ba4830 5904 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5905 s->condjmp = 1;
5906 }
5907 if ((insn & 0x0f900000) == 0x03000000) {
5908 if ((insn & (1 << 21)) == 0) {
5909 ARCH(6T2);
5910 rd = (insn >> 12) & 0xf;
5911 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5912 if ((insn & (1 << 22)) == 0) {
5913 /* MOVW */
5e3f878a
PB
5914 tmp = new_tmp();
5915 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5916 } else {
5917 /* MOVT */
5e3f878a 5918 tmp = load_reg(s, rd);
86831435 5919 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5920 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5921 }
5e3f878a 5922 store_reg(s, rd, tmp);
9ee6e8bb
PB
5923 } else {
5924 if (((insn >> 12) & 0xf) != 0xf)
5925 goto illegal_op;
5926 if (((insn >> 16) & 0xf) == 0) {
5927 gen_nop_hint(s, insn & 0xff);
5928 } else {
5929 /* CPSR = immediate */
5930 val = insn & 0xff;
5931 shift = ((insn >> 8) & 0xf) * 2;
5932 if (shift)
5933 val = (val >> shift) | (val << (32 - shift));
5934 gen_op_movl_T0_im(val);
5935 i = ((insn & (1 << 22)) != 0);
5936 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5937 goto illegal_op;
5938 }
5939 }
5940 } else if ((insn & 0x0f900000) == 0x01000000
5941 && (insn & 0x00000090) != 0x00000090) {
5942 /* miscellaneous instructions */
5943 op1 = (insn >> 21) & 3;
5944 sh = (insn >> 4) & 0xf;
5945 rm = insn & 0xf;
5946 switch (sh) {
5947 case 0x0: /* move program status register */
5948 if (op1 & 1) {
5949 /* PSR = reg */
5950 gen_movl_T0_reg(s, rm);
5951 i = ((op1 & 2) != 0);
5952 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5953 goto illegal_op;
5954 } else {
5955 /* reg = PSR */
5956 rd = (insn >> 12) & 0xf;
5957 if (op1 & 2) {
5958 if (IS_USER(s))
5959 goto illegal_op;
d9ba4830 5960 tmp = load_cpu_field(spsr);
9ee6e8bb 5961 } else {
d9ba4830
PB
5962 tmp = new_tmp();
5963 gen_helper_cpsr_read(tmp);
9ee6e8bb 5964 }
d9ba4830 5965 store_reg(s, rd, tmp);
9ee6e8bb
PB
5966 }
5967 break;
5968 case 0x1:
5969 if (op1 == 1) {
5970 /* branch/exchange thumb (bx). */
d9ba4830
PB
5971 tmp = load_reg(s, rm);
5972 gen_bx(s, tmp);
9ee6e8bb
PB
5973 } else if (op1 == 3) {
5974 /* clz */
5975 rd = (insn >> 12) & 0xf;
1497c961
PB
5976 tmp = load_reg(s, rm);
5977 gen_helper_clz(tmp, tmp);
5978 store_reg(s, rd, tmp);
9ee6e8bb
PB
5979 } else {
5980 goto illegal_op;
5981 }
5982 break;
5983 case 0x2:
5984 if (op1 == 1) {
5985 ARCH(5J); /* bxj */
5986 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5987 tmp = load_reg(s, rm);
5988 gen_bx(s, tmp);
9ee6e8bb
PB
5989 } else {
5990 goto illegal_op;
5991 }
5992 break;
5993 case 0x3:
5994 if (op1 != 1)
5995 goto illegal_op;
5996
5997 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5998 tmp = load_reg(s, rm);
5999 tmp2 = new_tmp();
6000 tcg_gen_movi_i32(tmp2, s->pc);
6001 store_reg(s, 14, tmp2);
6002 gen_bx(s, tmp);
9ee6e8bb
PB
6003 break;
6004 case 0x5: /* saturating add/subtract */
6005 rd = (insn >> 12) & 0xf;
6006 rn = (insn >> 16) & 0xf;
b40d0353 6007 tmp = load_reg(s, rm);
5e3f878a 6008 tmp2 = load_reg(s, rn);
9ee6e8bb 6009 if (op1 & 2)
5e3f878a 6010 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6011 if (op1 & 1)
5e3f878a 6012 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6013 else
5e3f878a
PB
6014 gen_helper_add_saturate(tmp, tmp, tmp2);
6015 dead_tmp(tmp2);
6016 store_reg(s, rd, tmp);
9ee6e8bb
PB
6017 break;
6018 case 7: /* bkpt */
6019 gen_set_condexec(s);
5e3f878a 6020 gen_set_pc_im(s->pc - 4);
d9ba4830 6021 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6022 s->is_jmp = DISAS_JUMP;
6023 break;
6024 case 0x8: /* signed multiply */
6025 case 0xa:
6026 case 0xc:
6027 case 0xe:
6028 rs = (insn >> 8) & 0xf;
6029 rn = (insn >> 12) & 0xf;
6030 rd = (insn >> 16) & 0xf;
6031 if (op1 == 1) {
6032 /* (32 * 16) >> 16 */
5e3f878a
PB
6033 tmp = load_reg(s, rm);
6034 tmp2 = load_reg(s, rs);
9ee6e8bb 6035 if (sh & 4)
5e3f878a 6036 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6037 else
5e3f878a 6038 gen_sxth(tmp2);
a7812ae4
PB
6039 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6040 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6041 tmp = new_tmp();
a7812ae4 6042 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6043 if ((sh & 2) == 0) {
5e3f878a
PB
6044 tmp2 = load_reg(s, rn);
6045 gen_helper_add_setq(tmp, tmp, tmp2);
6046 dead_tmp(tmp2);
9ee6e8bb 6047 }
5e3f878a 6048 store_reg(s, rd, tmp);
9ee6e8bb
PB
6049 } else {
6050 /* 16 * 16 */
5e3f878a
PB
6051 tmp = load_reg(s, rm);
6052 tmp2 = load_reg(s, rs);
6053 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6054 dead_tmp(tmp2);
9ee6e8bb 6055 if (op1 == 2) {
a7812ae4
PB
6056 tmp64 = tcg_temp_new_i64();
6057 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6058 dead_tmp(tmp);
a7812ae4
PB
6059 gen_addq(s, tmp64, rn, rd);
6060 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6061 } else {
6062 if (op1 == 0) {
5e3f878a
PB
6063 tmp2 = load_reg(s, rn);
6064 gen_helper_add_setq(tmp, tmp, tmp2);
6065 dead_tmp(tmp2);
9ee6e8bb 6066 }
5e3f878a 6067 store_reg(s, rd, tmp);
9ee6e8bb
PB
6068 }
6069 }
6070 break;
6071 default:
6072 goto illegal_op;
6073 }
6074 } else if (((insn & 0x0e000000) == 0 &&
6075 (insn & 0x00000090) != 0x90) ||
6076 ((insn & 0x0e000000) == (1 << 25))) {
6077 int set_cc, logic_cc, shiftop;
6078
6079 op1 = (insn >> 21) & 0xf;
6080 set_cc = (insn >> 20) & 1;
6081 logic_cc = table_logic_cc[op1] & set_cc;
6082
6083 /* data processing instruction */
6084 if (insn & (1 << 25)) {
6085 /* immediate operand */
6086 val = insn & 0xff;
6087 shift = ((insn >> 8) & 0xf) * 2;
6088 if (shift)
6089 val = (val >> shift) | (val << (32 - shift));
6090 gen_op_movl_T1_im(val);
6091 if (logic_cc && shift)
b26eefb6 6092 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6093 } else {
6094 /* register */
6095 rm = (insn) & 0xf;
6096 gen_movl_T1_reg(s, rm);
6097 shiftop = (insn >> 5) & 3;
6098 if (!(insn & (1 << 4))) {
6099 shift = (insn >> 7) & 0x1f;
9a119ff6 6100 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6101 } else {
6102 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6103 tmp = load_reg(s, rs);
6104 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6105 }
6106 }
6107 if (op1 != 0x0f && op1 != 0x0d) {
6108 rn = (insn >> 16) & 0xf;
6109 gen_movl_T0_reg(s, rn);
6110 }
6111 rd = (insn >> 12) & 0xf;
6112 switch(op1) {
6113 case 0x00:
6114 gen_op_andl_T0_T1();
6115 gen_movl_reg_T0(s, rd);
6116 if (logic_cc)
6117 gen_op_logic_T0_cc();
6118 break;
6119 case 0x01:
6120 gen_op_xorl_T0_T1();
6121 gen_movl_reg_T0(s, rd);
6122 if (logic_cc)
6123 gen_op_logic_T0_cc();
6124 break;
6125 case 0x02:
6126 if (set_cc && rd == 15) {
6127 /* SUBS r15, ... is used for exception return. */
6128 if (IS_USER(s))
6129 goto illegal_op;
6130 gen_op_subl_T0_T1_cc();
6131 gen_exception_return(s);
6132 } else {
6133 if (set_cc)
6134 gen_op_subl_T0_T1_cc();
6135 else
6136 gen_op_subl_T0_T1();
6137 gen_movl_reg_T0(s, rd);
6138 }
6139 break;
6140 case 0x03:
6141 if (set_cc)
6142 gen_op_rsbl_T0_T1_cc();
6143 else
6144 gen_op_rsbl_T0_T1();
6145 gen_movl_reg_T0(s, rd);
6146 break;
6147 case 0x04:
6148 if (set_cc)
6149 gen_op_addl_T0_T1_cc();
6150 else
6151 gen_op_addl_T0_T1();
6152 gen_movl_reg_T0(s, rd);
6153 break;
6154 case 0x05:
6155 if (set_cc)
6156 gen_op_adcl_T0_T1_cc();
6157 else
b26eefb6 6158 gen_adc_T0_T1();
9ee6e8bb
PB
6159 gen_movl_reg_T0(s, rd);
6160 break;
6161 case 0x06:
6162 if (set_cc)
6163 gen_op_sbcl_T0_T1_cc();
6164 else
3670669c 6165 gen_sbc_T0_T1();
9ee6e8bb
PB
6166 gen_movl_reg_T0(s, rd);
6167 break;
6168 case 0x07:
6169 if (set_cc)
6170 gen_op_rscl_T0_T1_cc();
6171 else
3670669c 6172 gen_rsc_T0_T1();
9ee6e8bb
PB
6173 gen_movl_reg_T0(s, rd);
6174 break;
6175 case 0x08:
6176 if (set_cc) {
6177 gen_op_andl_T0_T1();
6178 gen_op_logic_T0_cc();
6179 }
6180 break;
6181 case 0x09:
6182 if (set_cc) {
6183 gen_op_xorl_T0_T1();
6184 gen_op_logic_T0_cc();
6185 }
6186 break;
6187 case 0x0a:
6188 if (set_cc) {
6189 gen_op_subl_T0_T1_cc();
6190 }
6191 break;
6192 case 0x0b:
6193 if (set_cc) {
6194 gen_op_addl_T0_T1_cc();
6195 }
6196 break;
6197 case 0x0c:
6198 gen_op_orl_T0_T1();
6199 gen_movl_reg_T0(s, rd);
6200 if (logic_cc)
6201 gen_op_logic_T0_cc();
6202 break;
6203 case 0x0d:
6204 if (logic_cc && rd == 15) {
6205 /* MOVS r15, ... is used for exception return. */
6206 if (IS_USER(s))
6207 goto illegal_op;
6208 gen_op_movl_T0_T1();
6209 gen_exception_return(s);
6210 } else {
6211 gen_movl_reg_T1(s, rd);
6212 if (logic_cc)
6213 gen_op_logic_T1_cc();
6214 }
6215 break;
6216 case 0x0e:
6217 gen_op_bicl_T0_T1();
6218 gen_movl_reg_T0(s, rd);
6219 if (logic_cc)
6220 gen_op_logic_T0_cc();
6221 break;
6222 default:
6223 case 0x0f:
6224 gen_op_notl_T1();
6225 gen_movl_reg_T1(s, rd);
6226 if (logic_cc)
6227 gen_op_logic_T1_cc();
6228 break;
6229 }
6230 } else {
6231 /* other instructions */
6232 op1 = (insn >> 24) & 0xf;
6233 switch(op1) {
6234 case 0x0:
6235 case 0x1:
6236 /* multiplies, extra load/stores */
6237 sh = (insn >> 5) & 3;
6238 if (sh == 0) {
6239 if (op1 == 0x0) {
6240 rd = (insn >> 16) & 0xf;
6241 rn = (insn >> 12) & 0xf;
6242 rs = (insn >> 8) & 0xf;
6243 rm = (insn) & 0xf;
6244 op1 = (insn >> 20) & 0xf;
6245 switch (op1) {
6246 case 0: case 1: case 2: case 3: case 6:
6247 /* 32 bit mul */
5e3f878a
PB
6248 tmp = load_reg(s, rs);
6249 tmp2 = load_reg(s, rm);
6250 tcg_gen_mul_i32(tmp, tmp, tmp2);
6251 dead_tmp(tmp2);
9ee6e8bb
PB
6252 if (insn & (1 << 22)) {
6253 /* Subtract (mls) */
6254 ARCH(6T2);
5e3f878a
PB
6255 tmp2 = load_reg(s, rn);
6256 tcg_gen_sub_i32(tmp, tmp2, tmp);
6257 dead_tmp(tmp2);
9ee6e8bb
PB
6258 } else if (insn & (1 << 21)) {
6259 /* Add */
5e3f878a
PB
6260 tmp2 = load_reg(s, rn);
6261 tcg_gen_add_i32(tmp, tmp, tmp2);
6262 dead_tmp(tmp2);
9ee6e8bb
PB
6263 }
6264 if (insn & (1 << 20))
5e3f878a
PB
6265 gen_logic_CC(tmp);
6266 store_reg(s, rd, tmp);
9ee6e8bb
PB
6267 break;
6268 default:
6269 /* 64 bit mul */
5e3f878a
PB
6270 tmp = load_reg(s, rs);
6271 tmp2 = load_reg(s, rm);
9ee6e8bb 6272 if (insn & (1 << 22))
a7812ae4 6273 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6274 else
a7812ae4 6275 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6276 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6277 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6278 if (!(insn & (1 << 23))) { /* double accumulate */
6279 ARCH(6);
a7812ae4
PB
6280 gen_addq_lo(s, tmp64, rn);
6281 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6282 }
6283 if (insn & (1 << 20))
a7812ae4
PB
6284 gen_logicq_cc(tmp64);
6285 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6286 break;
6287 }
6288 } else {
6289 rn = (insn >> 16) & 0xf;
6290 rd = (insn >> 12) & 0xf;
6291 if (insn & (1 << 23)) {
6292 /* load/store exclusive */
86753403
PB
6293 op1 = (insn >> 21) & 0x3;
6294 if (op1)
a47f43d2 6295 ARCH(6K);
86753403
PB
6296 else
6297 ARCH(6);
9ee6e8bb 6298 gen_movl_T1_reg(s, rn);
72f1c62f 6299 addr = cpu_T[1];
9ee6e8bb 6300 if (insn & (1 << 20)) {
8f8e3aa4 6301 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
86753403
PB
6302 switch (op1) {
6303 case 0: /* ldrex */
6304 tmp = gen_ld32(addr, IS_USER(s));
6305 break;
6306 case 1: /* ldrexd */
6307 tmp = gen_ld32(addr, IS_USER(s));
6308 store_reg(s, rd, tmp);
6309 tcg_gen_addi_i32(addr, addr, 4);
6310 tmp = gen_ld32(addr, IS_USER(s));
6311 rd++;
6312 break;
6313 case 2: /* ldrexb */
6314 tmp = gen_ld8u(addr, IS_USER(s));
6315 break;
6316 case 3: /* ldrexh */
6317 tmp = gen_ld16u(addr, IS_USER(s));
6318 break;
6319 default:
6320 abort();
6321 }
8f8e3aa4 6322 store_reg(s, rd, tmp);
9ee6e8bb 6323 } else {
8f8e3aa4 6324 int label = gen_new_label();
9ee6e8bb 6325 rm = insn & 0xf;
8f8e3aa4 6326 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6327 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6328 0, label);
8f8e3aa4 6329 tmp = load_reg(s,rm);
86753403
PB
6330 switch (op1) {
6331 case 0: /* strex */
6332 gen_st32(tmp, addr, IS_USER(s));
6333 break;
6334 case 1: /* strexd */
6335 gen_st32(tmp, addr, IS_USER(s));
6336 tcg_gen_addi_i32(addr, addr, 4);
6337 tmp = load_reg(s, rm + 1);
6338 gen_st32(tmp, addr, IS_USER(s));
6339 break;
6340 case 2: /* strexb */
6341 gen_st8(tmp, addr, IS_USER(s));
6342 break;
6343 case 3: /* strexh */
6344 gen_st16(tmp, addr, IS_USER(s));
6345 break;
6346 default:
6347 abort();
6348 }
2637a3be 6349 gen_set_label(label);
8f8e3aa4 6350 gen_movl_reg_T0(s, rd);
9ee6e8bb 6351 }
9ee6e8bb
PB
6352 } else {
6353 /* SWP instruction */
6354 rm = (insn) & 0xf;
6355
8984bd2e
PB
6356 /* ??? This is not really atomic. However we know
6357 we never have multiple CPUs running in parallel,
6358 so it is good enough. */
6359 addr = load_reg(s, rn);
6360 tmp = load_reg(s, rm);
9ee6e8bb 6361 if (insn & (1 << 22)) {
8984bd2e
PB
6362 tmp2 = gen_ld8u(addr, IS_USER(s));
6363 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6364 } else {
8984bd2e
PB
6365 tmp2 = gen_ld32(addr, IS_USER(s));
6366 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6367 }
8984bd2e
PB
6368 dead_tmp(addr);
6369 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6370 }
6371 }
6372 } else {
6373 int address_offset;
6374 int load;
6375 /* Misc load/store */
6376 rn = (insn >> 16) & 0xf;
6377 rd = (insn >> 12) & 0xf;
b0109805 6378 addr = load_reg(s, rn);
9ee6e8bb 6379 if (insn & (1 << 24))
b0109805 6380 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6381 address_offset = 0;
6382 if (insn & (1 << 20)) {
6383 /* load */
6384 switch(sh) {
6385 case 1:
b0109805 6386 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6387 break;
6388 case 2:
b0109805 6389 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6390 break;
6391 default:
6392 case 3:
b0109805 6393 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6394 break;
6395 }
6396 load = 1;
6397 } else if (sh & 2) {
6398 /* doubleword */
6399 if (sh & 1) {
6400 /* store */
b0109805
PB
6401 tmp = load_reg(s, rd);
6402 gen_st32(tmp, addr, IS_USER(s));
6403 tcg_gen_addi_i32(addr, addr, 4);
6404 tmp = load_reg(s, rd + 1);
6405 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6406 load = 0;
6407 } else {
6408 /* load */
b0109805
PB
6409 tmp = gen_ld32(addr, IS_USER(s));
6410 store_reg(s, rd, tmp);
6411 tcg_gen_addi_i32(addr, addr, 4);
6412 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6413 rd++;
6414 load = 1;
6415 }
6416 address_offset = -4;
6417 } else {
6418 /* store */
b0109805
PB
6419 tmp = load_reg(s, rd);
6420 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6421 load = 0;
6422 }
6423 /* Perform base writeback before the loaded value to
6424 ensure correct behavior with overlapping index registers.
6425 ldrd with base writeback is is undefined if the
6426 destination and index registers overlap. */
6427 if (!(insn & (1 << 24))) {
b0109805
PB
6428 gen_add_datah_offset(s, insn, address_offset, addr);
6429 store_reg(s, rn, addr);
9ee6e8bb
PB
6430 } else if (insn & (1 << 21)) {
6431 if (address_offset)
b0109805
PB
6432 tcg_gen_addi_i32(addr, addr, address_offset);
6433 store_reg(s, rn, addr);
6434 } else {
6435 dead_tmp(addr);
9ee6e8bb
PB
6436 }
6437 if (load) {
6438 /* Complete the load. */
b0109805 6439 store_reg(s, rd, tmp);
9ee6e8bb
PB
6440 }
6441 }
6442 break;
6443 case 0x4:
6444 case 0x5:
6445 goto do_ldst;
6446 case 0x6:
6447 case 0x7:
6448 if (insn & (1 << 4)) {
6449 ARCH(6);
6450 /* Armv6 Media instructions. */
6451 rm = insn & 0xf;
6452 rn = (insn >> 16) & 0xf;
2c0262af 6453 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6454 rs = (insn >> 8) & 0xf;
6455 switch ((insn >> 23) & 3) {
6456 case 0: /* Parallel add/subtract. */
6457 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6458 tmp = load_reg(s, rn);
6459 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6460 sh = (insn >> 5) & 7;
6461 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6462 goto illegal_op;
6ddbc6e4
PB
6463 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6464 dead_tmp(tmp2);
6465 store_reg(s, rd, tmp);
9ee6e8bb
PB
6466 break;
6467 case 1:
6468 if ((insn & 0x00700020) == 0) {
6c95676b 6469 /* Halfword pack. */
3670669c
PB
6470 tmp = load_reg(s, rn);
6471 tmp2 = load_reg(s, rm);
9ee6e8bb 6472 shift = (insn >> 7) & 0x1f;
3670669c
PB
6473 if (insn & (1 << 6)) {
6474 /* pkhtb */
22478e79
AZ
6475 if (shift == 0)
6476 shift = 31;
6477 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6478 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6479 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6480 } else {
6481 /* pkhbt */
22478e79
AZ
6482 if (shift)
6483 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6484 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6485 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6486 }
6487 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6488 dead_tmp(tmp2);
3670669c 6489 store_reg(s, rd, tmp);
9ee6e8bb
PB
6490 } else if ((insn & 0x00200020) == 0x00200000) {
6491 /* [us]sat */
6ddbc6e4 6492 tmp = load_reg(s, rm);
9ee6e8bb
PB
6493 shift = (insn >> 7) & 0x1f;
6494 if (insn & (1 << 6)) {
6495 if (shift == 0)
6496 shift = 31;
6ddbc6e4 6497 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6498 } else {
6ddbc6e4 6499 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6500 }
6501 sh = (insn >> 16) & 0x1f;
6502 if (sh != 0) {
6503 if (insn & (1 << 22))
6ddbc6e4 6504 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6505 else
6ddbc6e4 6506 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6507 }
6ddbc6e4 6508 store_reg(s, rd, tmp);
9ee6e8bb
PB
6509 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6510 /* [us]sat16 */
6ddbc6e4 6511 tmp = load_reg(s, rm);
9ee6e8bb
PB
6512 sh = (insn >> 16) & 0x1f;
6513 if (sh != 0) {
6514 if (insn & (1 << 22))
6ddbc6e4 6515 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6516 else
6ddbc6e4 6517 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6518 }
6ddbc6e4 6519 store_reg(s, rd, tmp);
9ee6e8bb
PB
6520 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6521 /* Select bytes. */
6ddbc6e4
PB
6522 tmp = load_reg(s, rn);
6523 tmp2 = load_reg(s, rm);
6524 tmp3 = new_tmp();
6525 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6526 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6527 dead_tmp(tmp3);
6528 dead_tmp(tmp2);
6529 store_reg(s, rd, tmp);
9ee6e8bb 6530 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6531 tmp = load_reg(s, rm);
9ee6e8bb
PB
6532 shift = (insn >> 10) & 3;
6533 /* ??? In many cases it's not neccessary to do a
6534 rotate, a shift is sufficient. */
6535 if (shift != 0)
5e3f878a 6536 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6537 op1 = (insn >> 20) & 7;
6538 switch (op1) {
5e3f878a
PB
6539 case 0: gen_sxtb16(tmp); break;
6540 case 2: gen_sxtb(tmp); break;
6541 case 3: gen_sxth(tmp); break;
6542 case 4: gen_uxtb16(tmp); break;
6543 case 6: gen_uxtb(tmp); break;
6544 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6545 default: goto illegal_op;
6546 }
6547 if (rn != 15) {
5e3f878a 6548 tmp2 = load_reg(s, rn);
9ee6e8bb 6549 if ((op1 & 3) == 0) {
5e3f878a 6550 gen_add16(tmp, tmp2);
9ee6e8bb 6551 } else {
5e3f878a
PB
6552 tcg_gen_add_i32(tmp, tmp, tmp2);
6553 dead_tmp(tmp2);
9ee6e8bb
PB
6554 }
6555 }
6c95676b 6556 store_reg(s, rd, tmp);
9ee6e8bb
PB
6557 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6558 /* rev */
b0109805 6559 tmp = load_reg(s, rm);
9ee6e8bb
PB
6560 if (insn & (1 << 22)) {
6561 if (insn & (1 << 7)) {
b0109805 6562 gen_revsh(tmp);
9ee6e8bb
PB
6563 } else {
6564 ARCH(6T2);
b0109805 6565 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6566 }
6567 } else {
6568 if (insn & (1 << 7))
b0109805 6569 gen_rev16(tmp);
9ee6e8bb 6570 else
b0109805 6571 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6572 }
b0109805 6573 store_reg(s, rd, tmp);
9ee6e8bb
PB
6574 } else {
6575 goto illegal_op;
6576 }
6577 break;
6578 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6579 tmp = load_reg(s, rm);
6580 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6581 if (insn & (1 << 20)) {
6582 /* Signed multiply most significant [accumulate]. */
a7812ae4 6583 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6584 if (insn & (1 << 5))
a7812ae4
PB
6585 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6586 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6587 tmp = new_tmp();
a7812ae4 6588 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6589 if (rd != 15) {
6590 tmp2 = load_reg(s, rd);
9ee6e8bb 6591 if (insn & (1 << 6)) {
5e3f878a 6592 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6593 } else {
5e3f878a 6594 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6595 }
5e3f878a 6596 dead_tmp(tmp2);
9ee6e8bb 6597 }
955a7dd5 6598 store_reg(s, rn, tmp);
9ee6e8bb
PB
6599 } else {
6600 if (insn & (1 << 5))
5e3f878a
PB
6601 gen_swap_half(tmp2);
6602 gen_smul_dual(tmp, tmp2);
6603 /* This addition cannot overflow. */
6604 if (insn & (1 << 6)) {
6605 tcg_gen_sub_i32(tmp, tmp, tmp2);
6606 } else {
6607 tcg_gen_add_i32(tmp, tmp, tmp2);
6608 }
6609 dead_tmp(tmp2);
9ee6e8bb 6610 if (insn & (1 << 22)) {
5e3f878a 6611 /* smlald, smlsld */
a7812ae4
PB
6612 tmp64 = tcg_temp_new_i64();
6613 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6614 dead_tmp(tmp);
a7812ae4
PB
6615 gen_addq(s, tmp64, rd, rn);
6616 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6617 } else {
5e3f878a 6618 /* smuad, smusd, smlad, smlsd */
22478e79 6619 if (rd != 15)
9ee6e8bb 6620 {
22478e79 6621 tmp2 = load_reg(s, rd);
5e3f878a
PB
6622 gen_helper_add_setq(tmp, tmp, tmp2);
6623 dead_tmp(tmp2);
9ee6e8bb 6624 }
22478e79 6625 store_reg(s, rn, tmp);
9ee6e8bb
PB
6626 }
6627 }
6628 break;
6629 case 3:
6630 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6631 switch (op1) {
6632 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6633 ARCH(6);
6634 tmp = load_reg(s, rm);
6635 tmp2 = load_reg(s, rs);
6636 gen_helper_usad8(tmp, tmp, tmp2);
6637 dead_tmp(tmp2);
ded9d295
AZ
6638 if (rd != 15) {
6639 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6640 tcg_gen_add_i32(tmp, tmp, tmp2);
6641 dead_tmp(tmp2);
9ee6e8bb 6642 }
ded9d295 6643 store_reg(s, rn, tmp);
9ee6e8bb
PB
6644 break;
6645 case 0x20: case 0x24: case 0x28: case 0x2c:
6646 /* Bitfield insert/clear. */
6647 ARCH(6T2);
6648 shift = (insn >> 7) & 0x1f;
6649 i = (insn >> 16) & 0x1f;
6650 i = i + 1 - shift;
6651 if (rm == 15) {
5e3f878a
PB
6652 tmp = new_tmp();
6653 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6654 } else {
5e3f878a 6655 tmp = load_reg(s, rm);
9ee6e8bb
PB
6656 }
6657 if (i != 32) {
5e3f878a 6658 tmp2 = load_reg(s, rd);
8f8e3aa4 6659 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6660 dead_tmp(tmp2);
9ee6e8bb 6661 }
5e3f878a 6662 store_reg(s, rd, tmp);
9ee6e8bb
PB
6663 break;
6664 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6665 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6666 ARCH(6T2);
5e3f878a 6667 tmp = load_reg(s, rm);
9ee6e8bb
PB
6668 shift = (insn >> 7) & 0x1f;
6669 i = ((insn >> 16) & 0x1f) + 1;
6670 if (shift + i > 32)
6671 goto illegal_op;
6672 if (i < 32) {
6673 if (op1 & 0x20) {
5e3f878a 6674 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6675 } else {
5e3f878a 6676 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6677 }
6678 }
5e3f878a 6679 store_reg(s, rd, tmp);
9ee6e8bb
PB
6680 break;
6681 default:
6682 goto illegal_op;
6683 }
6684 break;
6685 }
6686 break;
6687 }
6688 do_ldst:
6689 /* Check for undefined extension instructions
6690 * per the ARM Bible IE:
6691 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6692 */
6693 sh = (0xf << 20) | (0xf << 4);
6694 if (op1 == 0x7 && ((insn & sh) == sh))
6695 {
6696 goto illegal_op;
6697 }
6698 /* load/store byte/word */
6699 rn = (insn >> 16) & 0xf;
6700 rd = (insn >> 12) & 0xf;
b0109805 6701 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6702 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6703 if (insn & (1 << 24))
b0109805 6704 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6705 if (insn & (1 << 20)) {
6706 /* load */
9ee6e8bb 6707 if (insn & (1 << 22)) {
b0109805 6708 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6709 } else {
b0109805 6710 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6711 }
9ee6e8bb
PB
6712 } else {
6713 /* store */
b0109805 6714 tmp = load_reg(s, rd);
9ee6e8bb 6715 if (insn & (1 << 22))
b0109805 6716 gen_st8(tmp, tmp2, i);
9ee6e8bb 6717 else
b0109805 6718 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6719 }
6720 if (!(insn & (1 << 24))) {
b0109805
PB
6721 gen_add_data_offset(s, insn, tmp2);
6722 store_reg(s, rn, tmp2);
6723 } else if (insn & (1 << 21)) {
6724 store_reg(s, rn, tmp2);
6725 } else {
6726 dead_tmp(tmp2);
9ee6e8bb
PB
6727 }
6728 if (insn & (1 << 20)) {
6729 /* Complete the load. */
6730 if (rd == 15)
b0109805 6731 gen_bx(s, tmp);
9ee6e8bb 6732 else
b0109805 6733 store_reg(s, rd, tmp);
9ee6e8bb
PB
6734 }
6735 break;
6736 case 0x08:
6737 case 0x09:
6738 {
6739 int j, n, user, loaded_base;
b0109805 6740 TCGv loaded_var;
9ee6e8bb
PB
6741 /* load/store multiple words */
6742 /* XXX: store correct base if write back */
6743 user = 0;
6744 if (insn & (1 << 22)) {
6745 if (IS_USER(s))
6746 goto illegal_op; /* only usable in supervisor mode */
6747
6748 if ((insn & (1 << 15)) == 0)
6749 user = 1;
6750 }
6751 rn = (insn >> 16) & 0xf;
b0109805 6752 addr = load_reg(s, rn);
9ee6e8bb
PB
6753
6754 /* compute total size */
6755 loaded_base = 0;
a50f5b91 6756 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6757 n = 0;
6758 for(i=0;i<16;i++) {
6759 if (insn & (1 << i))
6760 n++;
6761 }
6762 /* XXX: test invalid n == 0 case ? */
6763 if (insn & (1 << 23)) {
6764 if (insn & (1 << 24)) {
6765 /* pre increment */
b0109805 6766 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6767 } else {
6768 /* post increment */
6769 }
6770 } else {
6771 if (insn & (1 << 24)) {
6772 /* pre decrement */
b0109805 6773 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6774 } else {
6775 /* post decrement */
6776 if (n != 1)
b0109805 6777 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6778 }
6779 }
6780 j = 0;
6781 for(i=0;i<16;i++) {
6782 if (insn & (1 << i)) {
6783 if (insn & (1 << 20)) {
6784 /* load */
b0109805 6785 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6786 if (i == 15) {
b0109805 6787 gen_bx(s, tmp);
9ee6e8bb 6788 } else if (user) {
b0109805
PB
6789 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6790 dead_tmp(tmp);
9ee6e8bb 6791 } else if (i == rn) {
b0109805 6792 loaded_var = tmp;
9ee6e8bb
PB
6793 loaded_base = 1;
6794 } else {
b0109805 6795 store_reg(s, i, tmp);
9ee6e8bb
PB
6796 }
6797 } else {
6798 /* store */
6799 if (i == 15) {
6800 /* special case: r15 = PC + 8 */
6801 val = (long)s->pc + 4;
b0109805
PB
6802 tmp = new_tmp();
6803 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6804 } else if (user) {
b0109805
PB
6805 tmp = new_tmp();
6806 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6807 } else {
b0109805 6808 tmp = load_reg(s, i);
9ee6e8bb 6809 }
b0109805 6810 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6811 }
6812 j++;
6813 /* no need to add after the last transfer */
6814 if (j != n)
b0109805 6815 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6816 }
6817 }
6818 if (insn & (1 << 21)) {
6819 /* write back */
6820 if (insn & (1 << 23)) {
6821 if (insn & (1 << 24)) {
6822 /* pre increment */
6823 } else {
6824 /* post increment */
b0109805 6825 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6826 }
6827 } else {
6828 if (insn & (1 << 24)) {
6829 /* pre decrement */
6830 if (n != 1)
b0109805 6831 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6832 } else {
6833 /* post decrement */
b0109805 6834 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6835 }
6836 }
b0109805
PB
6837 store_reg(s, rn, addr);
6838 } else {
6839 dead_tmp(addr);
9ee6e8bb
PB
6840 }
6841 if (loaded_base) {
b0109805 6842 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6843 }
6844 if ((insn & (1 << 22)) && !user) {
6845 /* Restore CPSR from SPSR. */
d9ba4830
PB
6846 tmp = load_cpu_field(spsr);
6847 gen_set_cpsr(tmp, 0xffffffff);
6848 dead_tmp(tmp);
9ee6e8bb
PB
6849 s->is_jmp = DISAS_UPDATE;
6850 }
6851 }
6852 break;
6853 case 0xa:
6854 case 0xb:
6855 {
6856 int32_t offset;
6857
6858 /* branch (and link) */
6859 val = (int32_t)s->pc;
6860 if (insn & (1 << 24)) {
5e3f878a
PB
6861 tmp = new_tmp();
6862 tcg_gen_movi_i32(tmp, val);
6863 store_reg(s, 14, tmp);
9ee6e8bb
PB
6864 }
6865 offset = (((int32_t)insn << 8) >> 8);
6866 val += (offset << 2) + 4;
6867 gen_jmp(s, val);
6868 }
6869 break;
6870 case 0xc:
6871 case 0xd:
6872 case 0xe:
6873 /* Coprocessor. */
6874 if (disas_coproc_insn(env, s, insn))
6875 goto illegal_op;
6876 break;
6877 case 0xf:
6878 /* swi */
5e3f878a 6879 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6880 s->is_jmp = DISAS_SWI;
6881 break;
6882 default:
6883 illegal_op:
6884 gen_set_condexec(s);
5e3f878a 6885 gen_set_pc_im(s->pc - 4);
d9ba4830 6886 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6887 s->is_jmp = DISAS_JUMP;
6888 break;
6889 }
6890 }
6891}
6892
6893/* Return true if this is a Thumb-2 logical op. */
6894static int
6895thumb2_logic_op(int op)
6896{
6897 return (op < 8);
6898}
6899
6900/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6901 then set condition code flags based on the result of the operation.
6902 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6903 to the high bit of T1.
6904 Returns zero if the opcode is valid. */
6905
6906static int
6907gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6908{
6909 int logic_cc;
6910
6911 logic_cc = 0;
6912 switch (op) {
6913 case 0: /* and */
6914 gen_op_andl_T0_T1();
6915 logic_cc = conds;
6916 break;
6917 case 1: /* bic */
6918 gen_op_bicl_T0_T1();
6919 logic_cc = conds;
6920 break;
6921 case 2: /* orr */
6922 gen_op_orl_T0_T1();
6923 logic_cc = conds;
6924 break;
6925 case 3: /* orn */
6926 gen_op_notl_T1();
6927 gen_op_orl_T0_T1();
6928 logic_cc = conds;
6929 break;
6930 case 4: /* eor */
6931 gen_op_xorl_T0_T1();
6932 logic_cc = conds;
6933 break;
6934 case 8: /* add */
6935 if (conds)
6936 gen_op_addl_T0_T1_cc();
6937 else
6938 gen_op_addl_T0_T1();
6939 break;
6940 case 10: /* adc */
6941 if (conds)
6942 gen_op_adcl_T0_T1_cc();
6943 else
b26eefb6 6944 gen_adc_T0_T1();
9ee6e8bb
PB
6945 break;
6946 case 11: /* sbc */
6947 if (conds)
6948 gen_op_sbcl_T0_T1_cc();
6949 else
3670669c 6950 gen_sbc_T0_T1();
9ee6e8bb
PB
6951 break;
6952 case 13: /* sub */
6953 if (conds)
6954 gen_op_subl_T0_T1_cc();
6955 else
6956 gen_op_subl_T0_T1();
6957 break;
6958 case 14: /* rsb */
6959 if (conds)
6960 gen_op_rsbl_T0_T1_cc();
6961 else
6962 gen_op_rsbl_T0_T1();
6963 break;
6964 default: /* 5, 6, 7, 9, 12, 15. */
6965 return 1;
6966 }
6967 if (logic_cc) {
6968 gen_op_logic_T0_cc();
6969 if (shifter_out)
b26eefb6 6970 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6971 }
6972 return 0;
6973}
6974
6975/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6976 is not legal. */
6977static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6978{
b0109805 6979 uint32_t insn, imm, shift, offset;
9ee6e8bb 6980 uint32_t rd, rn, rm, rs;
b26eefb6 6981 TCGv tmp;
6ddbc6e4
PB
6982 TCGv tmp2;
6983 TCGv tmp3;
b0109805 6984 TCGv addr;
a7812ae4 6985 TCGv_i64 tmp64;
9ee6e8bb
PB
6986 int op;
6987 int shiftop;
6988 int conds;
6989 int logic_cc;
6990
6991 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6992 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 6993 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
6994 16-bit instructions to get correct prefetch abort behavior. */
6995 insn = insn_hw1;
6996 if ((insn & (1 << 12)) == 0) {
6997 /* Second half of blx. */
6998 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6999 tmp = load_reg(s, 14);
7000 tcg_gen_addi_i32(tmp, tmp, offset);
7001 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7002
d9ba4830 7003 tmp2 = new_tmp();
b0109805 7004 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7005 store_reg(s, 14, tmp2);
7006 gen_bx(s, tmp);
9ee6e8bb
PB
7007 return 0;
7008 }
7009 if (insn & (1 << 11)) {
7010 /* Second half of bl. */
7011 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7012 tmp = load_reg(s, 14);
6a0d8a1d 7013 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7014
d9ba4830 7015 tmp2 = new_tmp();
b0109805 7016 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7017 store_reg(s, 14, tmp2);
7018 gen_bx(s, tmp);
9ee6e8bb
PB
7019 return 0;
7020 }
7021 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7022 /* Instruction spans a page boundary. Implement it as two
7023 16-bit instructions in case the second half causes an
7024 prefetch abort. */
7025 offset = ((int32_t)insn << 21) >> 9;
b0109805 7026 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
7027 gen_movl_reg_T0(s, 14);
7028 return 0;
7029 }
7030 /* Fall through to 32-bit decode. */
7031 }
7032
7033 insn = lduw_code(s->pc);
7034 s->pc += 2;
7035 insn |= (uint32_t)insn_hw1 << 16;
7036
7037 if ((insn & 0xf800e800) != 0xf000e800) {
7038 ARCH(6T2);
7039 }
7040
7041 rn = (insn >> 16) & 0xf;
7042 rs = (insn >> 12) & 0xf;
7043 rd = (insn >> 8) & 0xf;
7044 rm = insn & 0xf;
7045 switch ((insn >> 25) & 0xf) {
7046 case 0: case 1: case 2: case 3:
7047 /* 16-bit instructions. Should never happen. */
7048 abort();
7049 case 4:
7050 if (insn & (1 << 22)) {
7051 /* Other load/store, table branch. */
7052 if (insn & 0x01200000) {
7053 /* Load/store doubleword. */
7054 if (rn == 15) {
b0109805
PB
7055 addr = new_tmp();
7056 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7057 } else {
b0109805 7058 addr = load_reg(s, rn);
9ee6e8bb
PB
7059 }
7060 offset = (insn & 0xff) * 4;
7061 if ((insn & (1 << 23)) == 0)
7062 offset = -offset;
7063 if (insn & (1 << 24)) {
b0109805 7064 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7065 offset = 0;
7066 }
7067 if (insn & (1 << 20)) {
7068 /* ldrd */
b0109805
PB
7069 tmp = gen_ld32(addr, IS_USER(s));
7070 store_reg(s, rs, tmp);
7071 tcg_gen_addi_i32(addr, addr, 4);
7072 tmp = gen_ld32(addr, IS_USER(s));
7073 store_reg(s, rd, tmp);
9ee6e8bb
PB
7074 } else {
7075 /* strd */
b0109805
PB
7076 tmp = load_reg(s, rs);
7077 gen_st32(tmp, addr, IS_USER(s));
7078 tcg_gen_addi_i32(addr, addr, 4);
7079 tmp = load_reg(s, rd);
7080 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7081 }
7082 if (insn & (1 << 21)) {
7083 /* Base writeback. */
7084 if (rn == 15)
7085 goto illegal_op;
b0109805
PB
7086 tcg_gen_addi_i32(addr, addr, offset - 4);
7087 store_reg(s, rn, addr);
7088 } else {
7089 dead_tmp(addr);
9ee6e8bb
PB
7090 }
7091 } else if ((insn & (1 << 23)) == 0) {
7092 /* Load/store exclusive word. */
2c0262af 7093 gen_movl_T1_reg(s, rn);
72f1c62f 7094 addr = cpu_T[1];
2c0262af 7095 if (insn & (1 << 20)) {
8f8e3aa4
PB
7096 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7097 tmp = gen_ld32(addr, IS_USER(s));
7098 store_reg(s, rd, tmp);
9ee6e8bb 7099 } else {
8f8e3aa4
PB
7100 int label = gen_new_label();
7101 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7102 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7103 0, label);
8f8e3aa4
PB
7104 tmp = load_reg(s, rs);
7105 gen_st32(tmp, cpu_T[1], IS_USER(s));
7106 gen_set_label(label);
7107 gen_movl_reg_T0(s, rd);
9ee6e8bb 7108 }
9ee6e8bb
PB
7109 } else if ((insn & (1 << 6)) == 0) {
7110 /* Table Branch. */
7111 if (rn == 15) {
b0109805
PB
7112 addr = new_tmp();
7113 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7114 } else {
b0109805 7115 addr = load_reg(s, rn);
9ee6e8bb 7116 }
b26eefb6 7117 tmp = load_reg(s, rm);
b0109805 7118 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7119 if (insn & (1 << 4)) {
7120 /* tbh */
b0109805 7121 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7122 dead_tmp(tmp);
b0109805 7123 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7124 } else { /* tbb */
b26eefb6 7125 dead_tmp(tmp);
b0109805 7126 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7127 }
b0109805
PB
7128 dead_tmp(addr);
7129 tcg_gen_shli_i32(tmp, tmp, 1);
7130 tcg_gen_addi_i32(tmp, tmp, s->pc);
7131 store_reg(s, 15, tmp);
9ee6e8bb
PB
7132 } else {
7133 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7134 /* ??? These are not really atomic. However we know
7135 we never have multiple CPUs running in parallel,
7136 so it is good enough. */
9ee6e8bb 7137 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7138 /* Must use a global reg for the address because we have
7139 a conditional branch in the store instruction. */
9ee6e8bb 7140 gen_movl_T1_reg(s, rn);
8f8e3aa4 7141 addr = cpu_T[1];
9ee6e8bb 7142 if (insn & (1 << 20)) {
8f8e3aa4 7143 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7144 switch (op) {
7145 case 0:
8f8e3aa4 7146 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7147 break;
2c0262af 7148 case 1:
8f8e3aa4 7149 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7150 break;
9ee6e8bb 7151 case 3:
8f8e3aa4
PB
7152 tmp = gen_ld32(addr, IS_USER(s));
7153 tcg_gen_addi_i32(addr, addr, 4);
7154 tmp2 = gen_ld32(addr, IS_USER(s));
7155 store_reg(s, rd, tmp2);
2c0262af
FB
7156 break;
7157 default:
9ee6e8bb
PB
7158 goto illegal_op;
7159 }
8f8e3aa4 7160 store_reg(s, rs, tmp);
9ee6e8bb 7161 } else {
8f8e3aa4
PB
7162 int label = gen_new_label();
7163 /* Must use a global that is not killed by the branch. */
7164 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7165 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7166 tmp = load_reg(s, rs);
9ee6e8bb
PB
7167 switch (op) {
7168 case 0:
8f8e3aa4 7169 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7170 break;
7171 case 1:
8f8e3aa4 7172 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7173 break;
2c0262af 7174 case 3:
8f8e3aa4
PB
7175 gen_st32(tmp, addr, IS_USER(s));
7176 tcg_gen_addi_i32(addr, addr, 4);
7177 tmp = load_reg(s, rd);
7178 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7179 break;
9ee6e8bb
PB
7180 default:
7181 goto illegal_op;
2c0262af 7182 }
8f8e3aa4 7183 gen_set_label(label);
9ee6e8bb
PB
7184 gen_movl_reg_T0(s, rm);
7185 }
7186 }
7187 } else {
7188 /* Load/store multiple, RFE, SRS. */
7189 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7190 /* Not available in user mode. */
b0109805 7191 if (IS_USER(s))
9ee6e8bb
PB
7192 goto illegal_op;
7193 if (insn & (1 << 20)) {
7194 /* rfe */
b0109805
PB
7195 addr = load_reg(s, rn);
7196 if ((insn & (1 << 24)) == 0)
7197 tcg_gen_addi_i32(addr, addr, -8);
7198 /* Load PC into tmp and CPSR into tmp2. */
7199 tmp = gen_ld32(addr, 0);
7200 tcg_gen_addi_i32(addr, addr, 4);
7201 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7202 if (insn & (1 << 21)) {
7203 /* Base writeback. */
b0109805
PB
7204 if (insn & (1 << 24)) {
7205 tcg_gen_addi_i32(addr, addr, 4);
7206 } else {
7207 tcg_gen_addi_i32(addr, addr, -4);
7208 }
7209 store_reg(s, rn, addr);
7210 } else {
7211 dead_tmp(addr);
9ee6e8bb 7212 }
b0109805 7213 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7214 } else {
7215 /* srs */
7216 op = (insn & 0x1f);
7217 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7218 addr = load_reg(s, 13);
9ee6e8bb 7219 } else {
b0109805
PB
7220 addr = new_tmp();
7221 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7222 }
7223 if ((insn & (1 << 24)) == 0) {
b0109805 7224 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7225 }
b0109805
PB
7226 tmp = load_reg(s, 14);
7227 gen_st32(tmp, addr, 0);
7228 tcg_gen_addi_i32(addr, addr, 4);
7229 tmp = new_tmp();
7230 gen_helper_cpsr_read(tmp);
7231 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7232 if (insn & (1 << 21)) {
7233 if ((insn & (1 << 24)) == 0) {
b0109805 7234 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7235 } else {
b0109805 7236 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7237 }
7238 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7239 store_reg(s, 13, addr);
9ee6e8bb 7240 } else {
b0109805
PB
7241 gen_helper_set_r13_banked(cpu_env,
7242 tcg_const_i32(op), addr);
9ee6e8bb 7243 }
b0109805
PB
7244 } else {
7245 dead_tmp(addr);
9ee6e8bb
PB
7246 }
7247 }
7248 } else {
7249 int i;
7250 /* Load/store multiple. */
b0109805 7251 addr = load_reg(s, rn);
9ee6e8bb
PB
7252 offset = 0;
7253 for (i = 0; i < 16; i++) {
7254 if (insn & (1 << i))
7255 offset += 4;
7256 }
7257 if (insn & (1 << 24)) {
b0109805 7258 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7259 }
7260
7261 for (i = 0; i < 16; i++) {
7262 if ((insn & (1 << i)) == 0)
7263 continue;
7264 if (insn & (1 << 20)) {
7265 /* Load. */
b0109805 7266 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7267 if (i == 15) {
b0109805 7268 gen_bx(s, tmp);
9ee6e8bb 7269 } else {
b0109805 7270 store_reg(s, i, tmp);
9ee6e8bb
PB
7271 }
7272 } else {
7273 /* Store. */
b0109805
PB
7274 tmp = load_reg(s, i);
7275 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7276 }
b0109805 7277 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7278 }
7279 if (insn & (1 << 21)) {
7280 /* Base register writeback. */
7281 if (insn & (1 << 24)) {
b0109805 7282 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7283 }
7284 /* Fault if writeback register is in register list. */
7285 if (insn & (1 << rn))
7286 goto illegal_op;
b0109805
PB
7287 store_reg(s, rn, addr);
7288 } else {
7289 dead_tmp(addr);
9ee6e8bb
PB
7290 }
7291 }
7292 }
7293 break;
7294 case 5: /* Data processing register constant shift. */
7295 if (rn == 15)
7296 gen_op_movl_T0_im(0);
7297 else
7298 gen_movl_T0_reg(s, rn);
7299 gen_movl_T1_reg(s, rm);
7300 op = (insn >> 21) & 0xf;
7301 shiftop = (insn >> 4) & 3;
7302 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7303 conds = (insn & (1 << 20)) != 0;
7304 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7305 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7306 if (gen_thumb2_data_op(s, op, conds, 0))
7307 goto illegal_op;
7308 if (rd != 15)
7309 gen_movl_reg_T0(s, rd);
7310 break;
7311 case 13: /* Misc data processing. */
7312 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7313 if (op < 4 && (insn & 0xf000) != 0xf000)
7314 goto illegal_op;
7315 switch (op) {
7316 case 0: /* Register controlled shift. */
8984bd2e
PB
7317 tmp = load_reg(s, rn);
7318 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7319 if ((insn & 0x70) != 0)
7320 goto illegal_op;
7321 op = (insn >> 21) & 3;
8984bd2e
PB
7322 logic_cc = (insn & (1 << 20)) != 0;
7323 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7324 if (logic_cc)
7325 gen_logic_CC(tmp);
7326 store_reg(s, rd, tmp);
9ee6e8bb
PB
7327 break;
7328 case 1: /* Sign/zero extend. */
5e3f878a 7329 tmp = load_reg(s, rm);
9ee6e8bb
PB
7330 shift = (insn >> 4) & 3;
7331 /* ??? In many cases it's not neccessary to do a
7332 rotate, a shift is sufficient. */
7333 if (shift != 0)
5e3f878a 7334 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7335 op = (insn >> 20) & 7;
7336 switch (op) {
5e3f878a
PB
7337 case 0: gen_sxth(tmp); break;
7338 case 1: gen_uxth(tmp); break;
7339 case 2: gen_sxtb16(tmp); break;
7340 case 3: gen_uxtb16(tmp); break;
7341 case 4: gen_sxtb(tmp); break;
7342 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7343 default: goto illegal_op;
7344 }
7345 if (rn != 15) {
5e3f878a 7346 tmp2 = load_reg(s, rn);
9ee6e8bb 7347 if ((op >> 1) == 1) {
5e3f878a 7348 gen_add16(tmp, tmp2);
9ee6e8bb 7349 } else {
5e3f878a
PB
7350 tcg_gen_add_i32(tmp, tmp, tmp2);
7351 dead_tmp(tmp2);
9ee6e8bb
PB
7352 }
7353 }
5e3f878a 7354 store_reg(s, rd, tmp);
9ee6e8bb
PB
7355 break;
7356 case 2: /* SIMD add/subtract. */
7357 op = (insn >> 20) & 7;
7358 shift = (insn >> 4) & 7;
7359 if ((op & 3) == 3 || (shift & 3) == 3)
7360 goto illegal_op;
6ddbc6e4
PB
7361 tmp = load_reg(s, rn);
7362 tmp2 = load_reg(s, rm);
7363 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7364 dead_tmp(tmp2);
7365 store_reg(s, rd, tmp);
9ee6e8bb
PB
7366 break;
7367 case 3: /* Other data processing. */
7368 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7369 if (op < 4) {
7370 /* Saturating add/subtract. */
d9ba4830
PB
7371 tmp = load_reg(s, rn);
7372 tmp2 = load_reg(s, rm);
9ee6e8bb 7373 if (op & 2)
d9ba4830 7374 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7375 if (op & 1)
d9ba4830 7376 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7377 else
d9ba4830
PB
7378 gen_helper_add_saturate(tmp, tmp, tmp2);
7379 dead_tmp(tmp2);
9ee6e8bb 7380 } else {
d9ba4830 7381 tmp = load_reg(s, rn);
9ee6e8bb
PB
7382 switch (op) {
7383 case 0x0a: /* rbit */
d9ba4830 7384 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7385 break;
7386 case 0x08: /* rev */
d9ba4830 7387 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7388 break;
7389 case 0x09: /* rev16 */
d9ba4830 7390 gen_rev16(tmp);
9ee6e8bb
PB
7391 break;
7392 case 0x0b: /* revsh */
d9ba4830 7393 gen_revsh(tmp);
9ee6e8bb
PB
7394 break;
7395 case 0x10: /* sel */
d9ba4830 7396 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7397 tmp3 = new_tmp();
7398 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7399 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7400 dead_tmp(tmp3);
d9ba4830 7401 dead_tmp(tmp2);
9ee6e8bb
PB
7402 break;
7403 case 0x18: /* clz */
d9ba4830 7404 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7405 break;
7406 default:
7407 goto illegal_op;
7408 }
7409 }
d9ba4830 7410 store_reg(s, rd, tmp);
9ee6e8bb
PB
7411 break;
7412 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7413 op = (insn >> 4) & 0xf;
d9ba4830
PB
7414 tmp = load_reg(s, rn);
7415 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7416 switch ((insn >> 20) & 7) {
7417 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7418 tcg_gen_mul_i32(tmp, tmp, tmp2);
7419 dead_tmp(tmp2);
9ee6e8bb 7420 if (rs != 15) {
d9ba4830 7421 tmp2 = load_reg(s, rs);
9ee6e8bb 7422 if (op)
d9ba4830 7423 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7424 else
d9ba4830
PB
7425 tcg_gen_add_i32(tmp, tmp, tmp2);
7426 dead_tmp(tmp2);
9ee6e8bb 7427 }
9ee6e8bb
PB
7428 break;
7429 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7430 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7431 dead_tmp(tmp2);
9ee6e8bb 7432 if (rs != 15) {
d9ba4830
PB
7433 tmp2 = load_reg(s, rs);
7434 gen_helper_add_setq(tmp, tmp, tmp2);
7435 dead_tmp(tmp2);
9ee6e8bb 7436 }
9ee6e8bb
PB
7437 break;
7438 case 2: /* Dual multiply add. */
7439 case 4: /* Dual multiply subtract. */
7440 if (op)
d9ba4830
PB
7441 gen_swap_half(tmp2);
7442 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7443 /* This addition cannot overflow. */
7444 if (insn & (1 << 22)) {
d9ba4830 7445 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7446 } else {
d9ba4830 7447 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7448 }
d9ba4830 7449 dead_tmp(tmp2);
9ee6e8bb
PB
7450 if (rs != 15)
7451 {
d9ba4830
PB
7452 tmp2 = load_reg(s, rs);
7453 gen_helper_add_setq(tmp, tmp, tmp2);
7454 dead_tmp(tmp2);
9ee6e8bb 7455 }
9ee6e8bb
PB
7456 break;
7457 case 3: /* 32 * 16 -> 32msb */
7458 if (op)
d9ba4830 7459 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7460 else
d9ba4830 7461 gen_sxth(tmp2);
a7812ae4
PB
7462 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7463 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7464 tmp = new_tmp();
a7812ae4 7465 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7466 if (rs != 15)
7467 {
d9ba4830
PB
7468 tmp2 = load_reg(s, rs);
7469 gen_helper_add_setq(tmp, tmp, tmp2);
7470 dead_tmp(tmp2);
9ee6e8bb 7471 }
9ee6e8bb
PB
7472 break;
7473 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7474 gen_imull(tmp, tmp2);
7475 if (insn & (1 << 5)) {
7476 gen_roundqd(tmp, tmp2);
7477 dead_tmp(tmp2);
7478 } else {
7479 dead_tmp(tmp);
7480 tmp = tmp2;
7481 }
9ee6e8bb 7482 if (rs != 15) {
d9ba4830 7483 tmp2 = load_reg(s, rs);
9ee6e8bb 7484 if (insn & (1 << 21)) {
d9ba4830 7485 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7486 } else {
d9ba4830 7487 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7488 }
d9ba4830 7489 dead_tmp(tmp2);
2c0262af 7490 }
9ee6e8bb
PB
7491 break;
7492 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7493 gen_helper_usad8(tmp, tmp, tmp2);
7494 dead_tmp(tmp2);
9ee6e8bb 7495 if (rs != 15) {
d9ba4830
PB
7496 tmp2 = load_reg(s, rs);
7497 tcg_gen_add_i32(tmp, tmp, tmp2);
7498 dead_tmp(tmp2);
5fd46862 7499 }
9ee6e8bb 7500 break;
2c0262af 7501 }
d9ba4830 7502 store_reg(s, rd, tmp);
2c0262af 7503 break;
9ee6e8bb
PB
7504 case 6: case 7: /* 64-bit multiply, Divide. */
7505 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7506 tmp = load_reg(s, rn);
7507 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7508 if ((op & 0x50) == 0x10) {
7509 /* sdiv, udiv */
7510 if (!arm_feature(env, ARM_FEATURE_DIV))
7511 goto illegal_op;
7512 if (op & 0x20)
5e3f878a 7513 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7514 else
5e3f878a
PB
7515 gen_helper_sdiv(tmp, tmp, tmp2);
7516 dead_tmp(tmp2);
7517 store_reg(s, rd, tmp);
9ee6e8bb
PB
7518 } else if ((op & 0xe) == 0xc) {
7519 /* Dual multiply accumulate long. */
7520 if (op & 1)
5e3f878a
PB
7521 gen_swap_half(tmp2);
7522 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7523 if (op & 0x10) {
5e3f878a 7524 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7525 } else {
5e3f878a 7526 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7527 }
5e3f878a 7528 dead_tmp(tmp2);
a7812ae4
PB
7529 /* BUGFIX */
7530 tmp64 = tcg_temp_new_i64();
7531 tcg_gen_ext_i32_i64(tmp64, tmp);
7532 dead_tmp(tmp);
7533 gen_addq(s, tmp64, rs, rd);
7534 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7535 } else {
9ee6e8bb
PB
7536 if (op & 0x20) {
7537 /* Unsigned 64-bit multiply */
a7812ae4 7538 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7539 } else {
9ee6e8bb
PB
7540 if (op & 8) {
7541 /* smlalxy */
5e3f878a
PB
7542 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7543 dead_tmp(tmp2);
a7812ae4
PB
7544 tmp64 = tcg_temp_new_i64();
7545 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7546 dead_tmp(tmp);
9ee6e8bb
PB
7547 } else {
7548 /* Signed 64-bit multiply */
a7812ae4 7549 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7550 }
b5ff1b31 7551 }
9ee6e8bb
PB
7552 if (op & 4) {
7553 /* umaal */
a7812ae4
PB
7554 gen_addq_lo(s, tmp64, rs);
7555 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7556 } else if (op & 0x40) {
7557 /* 64-bit accumulate. */
a7812ae4 7558 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7559 }
a7812ae4 7560 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7561 }
2c0262af 7562 break;
9ee6e8bb
PB
7563 }
7564 break;
7565 case 6: case 7: case 14: case 15:
7566 /* Coprocessor. */
7567 if (((insn >> 24) & 3) == 3) {
7568 /* Translate into the equivalent ARM encoding. */
7569 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7570 if (disas_neon_data_insn(env, s, insn))
7571 goto illegal_op;
7572 } else {
7573 if (insn & (1 << 28))
7574 goto illegal_op;
7575 if (disas_coproc_insn (env, s, insn))
7576 goto illegal_op;
7577 }
7578 break;
7579 case 8: case 9: case 10: case 11:
7580 if (insn & (1 << 15)) {
7581 /* Branches, misc control. */
7582 if (insn & 0x5000) {
7583 /* Unconditional branch. */
7584 /* signextend(hw1[10:0]) -> offset[:12]. */
7585 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7586 /* hw1[10:0] -> offset[11:1]. */
7587 offset |= (insn & 0x7ff) << 1;
7588 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7589 offset[24:22] already have the same value because of the
7590 sign extension above. */
7591 offset ^= ((~insn) & (1 << 13)) << 10;
7592 offset ^= ((~insn) & (1 << 11)) << 11;
7593
9ee6e8bb
PB
7594 if (insn & (1 << 14)) {
7595 /* Branch and link. */
b0109805 7596 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7597 gen_movl_reg_T1(s, 14);
b5ff1b31 7598 }
3b46e624 7599
b0109805 7600 offset += s->pc;
9ee6e8bb
PB
7601 if (insn & (1 << 12)) {
7602 /* b/bl */
b0109805 7603 gen_jmp(s, offset);
9ee6e8bb
PB
7604 } else {
7605 /* blx */
b0109805
PB
7606 offset &= ~(uint32_t)2;
7607 gen_bx_im(s, offset);
2c0262af 7608 }
9ee6e8bb
PB
7609 } else if (((insn >> 23) & 7) == 7) {
7610 /* Misc control */
7611 if (insn & (1 << 13))
7612 goto illegal_op;
7613
7614 if (insn & (1 << 26)) {
7615 /* Secure monitor call (v6Z) */
7616 goto illegal_op; /* not implemented. */
2c0262af 7617 } else {
9ee6e8bb
PB
7618 op = (insn >> 20) & 7;
7619 switch (op) {
7620 case 0: /* msr cpsr. */
7621 if (IS_M(env)) {
8984bd2e
PB
7622 tmp = load_reg(s, rn);
7623 addr = tcg_const_i32(insn & 0xff);
7624 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7625 gen_lookup_tb(s);
7626 break;
7627 }
7628 /* fall through */
7629 case 1: /* msr spsr. */
7630 if (IS_M(env))
7631 goto illegal_op;
7632 gen_movl_T0_reg(s, rn);
7633 if (gen_set_psr_T0(s,
7634 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7635 op == 1))
7636 goto illegal_op;
7637 break;
7638 case 2: /* cps, nop-hint. */
7639 if (((insn >> 8) & 7) == 0) {
7640 gen_nop_hint(s, insn & 0xff);
7641 }
7642 /* Implemented as NOP in user mode. */
7643 if (IS_USER(s))
7644 break;
7645 offset = 0;
7646 imm = 0;
7647 if (insn & (1 << 10)) {
7648 if (insn & (1 << 7))
7649 offset |= CPSR_A;
7650 if (insn & (1 << 6))
7651 offset |= CPSR_I;
7652 if (insn & (1 << 5))
7653 offset |= CPSR_F;
7654 if (insn & (1 << 9))
7655 imm = CPSR_A | CPSR_I | CPSR_F;
7656 }
7657 if (insn & (1 << 8)) {
7658 offset |= 0x1f;
7659 imm |= (insn & 0x1f);
7660 }
7661 if (offset) {
7662 gen_op_movl_T0_im(imm);
7663 gen_set_psr_T0(s, offset, 0);
7664 }
7665 break;
7666 case 3: /* Special control operations. */
7667 op = (insn >> 4) & 0xf;
7668 switch (op) {
7669 case 2: /* clrex */
8f8e3aa4 7670 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7671 break;
7672 case 4: /* dsb */
7673 case 5: /* dmb */
7674 case 6: /* isb */
7675 /* These execute as NOPs. */
7676 ARCH(7);
7677 break;
7678 default:
7679 goto illegal_op;
7680 }
7681 break;
7682 case 4: /* bxj */
7683 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7684 tmp = load_reg(s, rn);
7685 gen_bx(s, tmp);
9ee6e8bb
PB
7686 break;
7687 case 5: /* Exception return. */
7688 /* Unpredictable in user mode. */
7689 goto illegal_op;
7690 case 6: /* mrs cpsr. */
8984bd2e 7691 tmp = new_tmp();
9ee6e8bb 7692 if (IS_M(env)) {
8984bd2e
PB
7693 addr = tcg_const_i32(insn & 0xff);
7694 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7695 } else {
8984bd2e 7696 gen_helper_cpsr_read(tmp);
9ee6e8bb 7697 }
8984bd2e 7698 store_reg(s, rd, tmp);
9ee6e8bb
PB
7699 break;
7700 case 7: /* mrs spsr. */
7701 /* Not accessible in user mode. */
7702 if (IS_USER(s) || IS_M(env))
7703 goto illegal_op;
d9ba4830
PB
7704 tmp = load_cpu_field(spsr);
7705 store_reg(s, rd, tmp);
9ee6e8bb 7706 break;
2c0262af
FB
7707 }
7708 }
9ee6e8bb
PB
7709 } else {
7710 /* Conditional branch. */
7711 op = (insn >> 22) & 0xf;
7712 /* Generate a conditional jump to next instruction. */
7713 s->condlabel = gen_new_label();
d9ba4830 7714 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7715 s->condjmp = 1;
7716
7717 /* offset[11:1] = insn[10:0] */
7718 offset = (insn & 0x7ff) << 1;
7719 /* offset[17:12] = insn[21:16]. */
7720 offset |= (insn & 0x003f0000) >> 4;
7721 /* offset[31:20] = insn[26]. */
7722 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7723 /* offset[18] = insn[13]. */
7724 offset |= (insn & (1 << 13)) << 5;
7725 /* offset[19] = insn[11]. */
7726 offset |= (insn & (1 << 11)) << 8;
7727
7728 /* jump to the offset */
b0109805 7729 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7730 }
7731 } else {
7732 /* Data processing immediate. */
7733 if (insn & (1 << 25)) {
7734 if (insn & (1 << 24)) {
7735 if (insn & (1 << 20))
7736 goto illegal_op;
7737 /* Bitfield/Saturate. */
7738 op = (insn >> 21) & 7;
7739 imm = insn & 0x1f;
7740 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7741 if (rn == 15) {
7742 tmp = new_tmp();
7743 tcg_gen_movi_i32(tmp, 0);
7744 } else {
7745 tmp = load_reg(s, rn);
7746 }
9ee6e8bb
PB
7747 switch (op) {
7748 case 2: /* Signed bitfield extract. */
7749 imm++;
7750 if (shift + imm > 32)
7751 goto illegal_op;
7752 if (imm < 32)
6ddbc6e4 7753 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7754 break;
7755 case 6: /* Unsigned bitfield extract. */
7756 imm++;
7757 if (shift + imm > 32)
7758 goto illegal_op;
7759 if (imm < 32)
6ddbc6e4 7760 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7761 break;
7762 case 3: /* Bitfield insert/clear. */
7763 if (imm < shift)
7764 goto illegal_op;
7765 imm = imm + 1 - shift;
7766 if (imm != 32) {
6ddbc6e4 7767 tmp2 = load_reg(s, rd);
8f8e3aa4 7768 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7769 dead_tmp(tmp2);
9ee6e8bb
PB
7770 }
7771 break;
7772 case 7:
7773 goto illegal_op;
7774 default: /* Saturate. */
9ee6e8bb
PB
7775 if (shift) {
7776 if (op & 1)
6ddbc6e4 7777 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7778 else
6ddbc6e4 7779 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7780 }
6ddbc6e4 7781 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7782 if (op & 4) {
7783 /* Unsigned. */
9ee6e8bb 7784 if ((op & 1) && shift == 0)
6ddbc6e4 7785 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7786 else
6ddbc6e4 7787 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7788 } else {
9ee6e8bb 7789 /* Signed. */
9ee6e8bb 7790 if ((op & 1) && shift == 0)
6ddbc6e4 7791 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7792 else
6ddbc6e4 7793 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7794 }
9ee6e8bb 7795 break;
2c0262af 7796 }
6ddbc6e4 7797 store_reg(s, rd, tmp);
9ee6e8bb
PB
7798 } else {
7799 imm = ((insn & 0x04000000) >> 15)
7800 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7801 if (insn & (1 << 22)) {
7802 /* 16-bit immediate. */
7803 imm |= (insn >> 4) & 0xf000;
7804 if (insn & (1 << 23)) {
7805 /* movt */
5e3f878a 7806 tmp = load_reg(s, rd);
86831435 7807 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7808 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7809 } else {
9ee6e8bb 7810 /* movw */
5e3f878a
PB
7811 tmp = new_tmp();
7812 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7813 }
7814 } else {
9ee6e8bb
PB
7815 /* Add/sub 12-bit immediate. */
7816 if (rn == 15) {
b0109805 7817 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7818 if (insn & (1 << 23))
b0109805 7819 offset -= imm;
9ee6e8bb 7820 else
b0109805 7821 offset += imm;
5e3f878a
PB
7822 tmp = new_tmp();
7823 tcg_gen_movi_i32(tmp, offset);
2c0262af 7824 } else {
5e3f878a 7825 tmp = load_reg(s, rn);
9ee6e8bb 7826 if (insn & (1 << 23))
5e3f878a 7827 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7828 else
5e3f878a 7829 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7830 }
9ee6e8bb 7831 }
5e3f878a 7832 store_reg(s, rd, tmp);
191abaa2 7833 }
9ee6e8bb
PB
7834 } else {
7835 int shifter_out = 0;
7836 /* modified 12-bit immediate. */
7837 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7838 imm = (insn & 0xff);
7839 switch (shift) {
7840 case 0: /* XY */
7841 /* Nothing to do. */
7842 break;
7843 case 1: /* 00XY00XY */
7844 imm |= imm << 16;
7845 break;
7846 case 2: /* XY00XY00 */
7847 imm |= imm << 16;
7848 imm <<= 8;
7849 break;
7850 case 3: /* XYXYXYXY */
7851 imm |= imm << 16;
7852 imm |= imm << 8;
7853 break;
7854 default: /* Rotated constant. */
7855 shift = (shift << 1) | (imm >> 7);
7856 imm |= 0x80;
7857 imm = imm << (32 - shift);
7858 shifter_out = 1;
7859 break;
b5ff1b31 7860 }
9ee6e8bb
PB
7861 gen_op_movl_T1_im(imm);
7862 rn = (insn >> 16) & 0xf;
7863 if (rn == 15)
7864 gen_op_movl_T0_im(0);
7865 else
7866 gen_movl_T0_reg(s, rn);
7867 op = (insn >> 21) & 0xf;
7868 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7869 shifter_out))
7870 goto illegal_op;
7871 rd = (insn >> 8) & 0xf;
7872 if (rd != 15) {
7873 gen_movl_reg_T0(s, rd);
2c0262af 7874 }
2c0262af 7875 }
9ee6e8bb
PB
7876 }
7877 break;
7878 case 12: /* Load/store single data item. */
7879 {
7880 int postinc = 0;
7881 int writeback = 0;
b0109805 7882 int user;
9ee6e8bb
PB
7883 if ((insn & 0x01100000) == 0x01000000) {
7884 if (disas_neon_ls_insn(env, s, insn))
c1713132 7885 goto illegal_op;
9ee6e8bb
PB
7886 break;
7887 }
b0109805 7888 user = IS_USER(s);
9ee6e8bb 7889 if (rn == 15) {
b0109805 7890 addr = new_tmp();
9ee6e8bb
PB
7891 /* PC relative. */
7892 /* s->pc has already been incremented by 4. */
7893 imm = s->pc & 0xfffffffc;
7894 if (insn & (1 << 23))
7895 imm += insn & 0xfff;
7896 else
7897 imm -= insn & 0xfff;
b0109805 7898 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7899 } else {
b0109805 7900 addr = load_reg(s, rn);
9ee6e8bb
PB
7901 if (insn & (1 << 23)) {
7902 /* Positive offset. */
7903 imm = insn & 0xfff;
b0109805 7904 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7905 } else {
7906 op = (insn >> 8) & 7;
7907 imm = insn & 0xff;
7908 switch (op) {
7909 case 0: case 8: /* Shifted Register. */
7910 shift = (insn >> 4) & 0xf;
7911 if (shift > 3)
18c9b560 7912 goto illegal_op;
b26eefb6 7913 tmp = load_reg(s, rm);
9ee6e8bb 7914 if (shift)
b26eefb6 7915 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7916 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7917 dead_tmp(tmp);
9ee6e8bb
PB
7918 break;
7919 case 4: /* Negative offset. */
b0109805 7920 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7921 break;
7922 case 6: /* User privilege. */
b0109805
PB
7923 tcg_gen_addi_i32(addr, addr, imm);
7924 user = 1;
9ee6e8bb
PB
7925 break;
7926 case 1: /* Post-decrement. */
7927 imm = -imm;
7928 /* Fall through. */
7929 case 3: /* Post-increment. */
9ee6e8bb
PB
7930 postinc = 1;
7931 writeback = 1;
7932 break;
7933 case 5: /* Pre-decrement. */
7934 imm = -imm;
7935 /* Fall through. */
7936 case 7: /* Pre-increment. */
b0109805 7937 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7938 writeback = 1;
7939 break;
7940 default:
b7bcbe95 7941 goto illegal_op;
9ee6e8bb
PB
7942 }
7943 }
7944 }
7945 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7946 if (insn & (1 << 20)) {
7947 /* Load. */
7948 if (rs == 15 && op != 2) {
7949 if (op & 2)
b5ff1b31 7950 goto illegal_op;
9ee6e8bb
PB
7951 /* Memory hint. Implemented as NOP. */
7952 } else {
7953 switch (op) {
b0109805
PB
7954 case 0: tmp = gen_ld8u(addr, user); break;
7955 case 4: tmp = gen_ld8s(addr, user); break;
7956 case 1: tmp = gen_ld16u(addr, user); break;
7957 case 5: tmp = gen_ld16s(addr, user); break;
7958 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7959 default: goto illegal_op;
7960 }
7961 if (rs == 15) {
b0109805 7962 gen_bx(s, tmp);
9ee6e8bb 7963 } else {
b0109805 7964 store_reg(s, rs, tmp);
9ee6e8bb
PB
7965 }
7966 }
7967 } else {
7968 /* Store. */
7969 if (rs == 15)
b7bcbe95 7970 goto illegal_op;
b0109805 7971 tmp = load_reg(s, rs);
9ee6e8bb 7972 switch (op) {
b0109805
PB
7973 case 0: gen_st8(tmp, addr, user); break;
7974 case 1: gen_st16(tmp, addr, user); break;
7975 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7976 default: goto illegal_op;
b7bcbe95 7977 }
2c0262af 7978 }
9ee6e8bb 7979 if (postinc)
b0109805
PB
7980 tcg_gen_addi_i32(addr, addr, imm);
7981 if (writeback) {
7982 store_reg(s, rn, addr);
7983 } else {
7984 dead_tmp(addr);
7985 }
9ee6e8bb
PB
7986 }
7987 break;
7988 default:
7989 goto illegal_op;
2c0262af 7990 }
9ee6e8bb
PB
7991 return 0;
7992illegal_op:
7993 return 1;
2c0262af
FB
7994}
7995
9ee6e8bb 7996static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7997{
7998 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7999 int32_t offset;
8000 int i;
b26eefb6 8001 TCGv tmp;
d9ba4830 8002 TCGv tmp2;
b0109805 8003 TCGv addr;
99c475ab 8004
9ee6e8bb
PB
8005 if (s->condexec_mask) {
8006 cond = s->condexec_cond;
8007 s->condlabel = gen_new_label();
d9ba4830 8008 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8009 s->condjmp = 1;
8010 }
8011
b5ff1b31 8012 insn = lduw_code(s->pc);
99c475ab 8013 s->pc += 2;
b5ff1b31 8014
99c475ab
FB
8015 switch (insn >> 12) {
8016 case 0: case 1:
8017 rd = insn & 7;
8018 op = (insn >> 11) & 3;
8019 if (op == 3) {
8020 /* add/subtract */
8021 rn = (insn >> 3) & 7;
8022 gen_movl_T0_reg(s, rn);
8023 if (insn & (1 << 10)) {
8024 /* immediate */
8025 gen_op_movl_T1_im((insn >> 6) & 7);
8026 } else {
8027 /* reg */
8028 rm = (insn >> 6) & 7;
8029 gen_movl_T1_reg(s, rm);
8030 }
9ee6e8bb
PB
8031 if (insn & (1 << 9)) {
8032 if (s->condexec_mask)
8033 gen_op_subl_T0_T1();
8034 else
8035 gen_op_subl_T0_T1_cc();
8036 } else {
8037 if (s->condexec_mask)
8038 gen_op_addl_T0_T1();
8039 else
8040 gen_op_addl_T0_T1_cc();
8041 }
99c475ab
FB
8042 gen_movl_reg_T0(s, rd);
8043 } else {
8044 /* shift immediate */
8045 rm = (insn >> 3) & 7;
8046 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8047 tmp = load_reg(s, rm);
8048 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8049 if (!s->condexec_mask)
8050 gen_logic_CC(tmp);
8051 store_reg(s, rd, tmp);
99c475ab
FB
8052 }
8053 break;
8054 case 2: case 3:
8055 /* arithmetic large immediate */
8056 op = (insn >> 11) & 3;
8057 rd = (insn >> 8) & 0x7;
8058 if (op == 0) {
8059 gen_op_movl_T0_im(insn & 0xff);
8060 } else {
8061 gen_movl_T0_reg(s, rd);
8062 gen_op_movl_T1_im(insn & 0xff);
8063 }
8064 switch (op) {
8065 case 0: /* mov */
9ee6e8bb
PB
8066 if (!s->condexec_mask)
8067 gen_op_logic_T0_cc();
99c475ab
FB
8068 break;
8069 case 1: /* cmp */
8070 gen_op_subl_T0_T1_cc();
8071 break;
8072 case 2: /* add */
9ee6e8bb
PB
8073 if (s->condexec_mask)
8074 gen_op_addl_T0_T1();
8075 else
8076 gen_op_addl_T0_T1_cc();
99c475ab
FB
8077 break;
8078 case 3: /* sub */
9ee6e8bb
PB
8079 if (s->condexec_mask)
8080 gen_op_subl_T0_T1();
8081 else
8082 gen_op_subl_T0_T1_cc();
99c475ab
FB
8083 break;
8084 }
8085 if (op != 1)
8086 gen_movl_reg_T0(s, rd);
8087 break;
8088 case 4:
8089 if (insn & (1 << 11)) {
8090 rd = (insn >> 8) & 7;
5899f386
FB
8091 /* load pc-relative. Bit 1 of PC is ignored. */
8092 val = s->pc + 2 + ((insn & 0xff) * 4);
8093 val &= ~(uint32_t)2;
b0109805
PB
8094 addr = new_tmp();
8095 tcg_gen_movi_i32(addr, val);
8096 tmp = gen_ld32(addr, IS_USER(s));
8097 dead_tmp(addr);
8098 store_reg(s, rd, tmp);
99c475ab
FB
8099 break;
8100 }
8101 if (insn & (1 << 10)) {
8102 /* data processing extended or blx */
8103 rd = (insn & 7) | ((insn >> 4) & 8);
8104 rm = (insn >> 3) & 0xf;
8105 op = (insn >> 8) & 3;
8106 switch (op) {
8107 case 0: /* add */
8108 gen_movl_T0_reg(s, rd);
8109 gen_movl_T1_reg(s, rm);
8110 gen_op_addl_T0_T1();
8111 gen_movl_reg_T0(s, rd);
8112 break;
8113 case 1: /* cmp */
8114 gen_movl_T0_reg(s, rd);
8115 gen_movl_T1_reg(s, rm);
8116 gen_op_subl_T0_T1_cc();
8117 break;
8118 case 2: /* mov/cpy */
8119 gen_movl_T0_reg(s, rm);
8120 gen_movl_reg_T0(s, rd);
8121 break;
8122 case 3:/* branch [and link] exchange thumb register */
b0109805 8123 tmp = load_reg(s, rm);
99c475ab
FB
8124 if (insn & (1 << 7)) {
8125 val = (uint32_t)s->pc | 1;
b0109805
PB
8126 tmp2 = new_tmp();
8127 tcg_gen_movi_i32(tmp2, val);
8128 store_reg(s, 14, tmp2);
99c475ab 8129 }
d9ba4830 8130 gen_bx(s, tmp);
99c475ab
FB
8131 break;
8132 }
8133 break;
8134 }
8135
8136 /* data processing register */
8137 rd = insn & 7;
8138 rm = (insn >> 3) & 7;
8139 op = (insn >> 6) & 0xf;
8140 if (op == 2 || op == 3 || op == 4 || op == 7) {
8141 /* the shift/rotate ops want the operands backwards */
8142 val = rm;
8143 rm = rd;
8144 rd = val;
8145 val = 1;
8146 } else {
8147 val = 0;
8148 }
8149
8150 if (op == 9) /* neg */
8151 gen_op_movl_T0_im(0);
8152 else if (op != 0xf) /* mvn doesn't read its first operand */
8153 gen_movl_T0_reg(s, rd);
8154
8155 gen_movl_T1_reg(s, rm);
5899f386 8156 switch (op) {
99c475ab
FB
8157 case 0x0: /* and */
8158 gen_op_andl_T0_T1();
9ee6e8bb
PB
8159 if (!s->condexec_mask)
8160 gen_op_logic_T0_cc();
99c475ab
FB
8161 break;
8162 case 0x1: /* eor */
8163 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8164 if (!s->condexec_mask)
8165 gen_op_logic_T0_cc();
99c475ab
FB
8166 break;
8167 case 0x2: /* lsl */
9ee6e8bb 8168 if (s->condexec_mask) {
8984bd2e 8169 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8170 } else {
8984bd2e 8171 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8172 gen_op_logic_T1_cc();
8173 }
99c475ab
FB
8174 break;
8175 case 0x3: /* lsr */
9ee6e8bb 8176 if (s->condexec_mask) {
8984bd2e 8177 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8178 } else {
8984bd2e 8179 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8180 gen_op_logic_T1_cc();
8181 }
99c475ab
FB
8182 break;
8183 case 0x4: /* asr */
9ee6e8bb 8184 if (s->condexec_mask) {
8984bd2e 8185 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8186 } else {
8984bd2e 8187 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8188 gen_op_logic_T1_cc();
8189 }
99c475ab
FB
8190 break;
8191 case 0x5: /* adc */
9ee6e8bb 8192 if (s->condexec_mask)
b26eefb6 8193 gen_adc_T0_T1();
9ee6e8bb
PB
8194 else
8195 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8196 break;
8197 case 0x6: /* sbc */
9ee6e8bb 8198 if (s->condexec_mask)
3670669c 8199 gen_sbc_T0_T1();
9ee6e8bb
PB
8200 else
8201 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8202 break;
8203 case 0x7: /* ror */
9ee6e8bb 8204 if (s->condexec_mask) {
8984bd2e 8205 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8206 } else {
8984bd2e 8207 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8208 gen_op_logic_T1_cc();
8209 }
99c475ab
FB
8210 break;
8211 case 0x8: /* tst */
8212 gen_op_andl_T0_T1();
8213 gen_op_logic_T0_cc();
8214 rd = 16;
5899f386 8215 break;
99c475ab 8216 case 0x9: /* neg */
9ee6e8bb 8217 if (s->condexec_mask)
390efc54 8218 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8219 else
8220 gen_op_subl_T0_T1_cc();
99c475ab
FB
8221 break;
8222 case 0xa: /* cmp */
8223 gen_op_subl_T0_T1_cc();
8224 rd = 16;
8225 break;
8226 case 0xb: /* cmn */
8227 gen_op_addl_T0_T1_cc();
8228 rd = 16;
8229 break;
8230 case 0xc: /* orr */
8231 gen_op_orl_T0_T1();
9ee6e8bb
PB
8232 if (!s->condexec_mask)
8233 gen_op_logic_T0_cc();
99c475ab
FB
8234 break;
8235 case 0xd: /* mul */
8236 gen_op_mull_T0_T1();
9ee6e8bb
PB
8237 if (!s->condexec_mask)
8238 gen_op_logic_T0_cc();
99c475ab
FB
8239 break;
8240 case 0xe: /* bic */
8241 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8242 if (!s->condexec_mask)
8243 gen_op_logic_T0_cc();
99c475ab
FB
8244 break;
8245 case 0xf: /* mvn */
8246 gen_op_notl_T1();
9ee6e8bb
PB
8247 if (!s->condexec_mask)
8248 gen_op_logic_T1_cc();
99c475ab 8249 val = 1;
5899f386 8250 rm = rd;
99c475ab
FB
8251 break;
8252 }
8253 if (rd != 16) {
8254 if (val)
5899f386 8255 gen_movl_reg_T1(s, rm);
99c475ab
FB
8256 else
8257 gen_movl_reg_T0(s, rd);
8258 }
8259 break;
8260
8261 case 5:
8262 /* load/store register offset. */
8263 rd = insn & 7;
8264 rn = (insn >> 3) & 7;
8265 rm = (insn >> 6) & 7;
8266 op = (insn >> 9) & 7;
b0109805 8267 addr = load_reg(s, rn);
b26eefb6 8268 tmp = load_reg(s, rm);
b0109805 8269 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8270 dead_tmp(tmp);
99c475ab
FB
8271
8272 if (op < 3) /* store */
b0109805 8273 tmp = load_reg(s, rd);
99c475ab
FB
8274
8275 switch (op) {
8276 case 0: /* str */
b0109805 8277 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8278 break;
8279 case 1: /* strh */
b0109805 8280 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8281 break;
8282 case 2: /* strb */
b0109805 8283 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8284 break;
8285 case 3: /* ldrsb */
b0109805 8286 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8287 break;
8288 case 4: /* ldr */
b0109805 8289 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8290 break;
8291 case 5: /* ldrh */
b0109805 8292 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8293 break;
8294 case 6: /* ldrb */
b0109805 8295 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8296 break;
8297 case 7: /* ldrsh */
b0109805 8298 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8299 break;
8300 }
8301 if (op >= 3) /* load */
b0109805
PB
8302 store_reg(s, rd, tmp);
8303 dead_tmp(addr);
99c475ab
FB
8304 break;
8305
8306 case 6:
8307 /* load/store word immediate offset */
8308 rd = insn & 7;
8309 rn = (insn >> 3) & 7;
b0109805 8310 addr = load_reg(s, rn);
99c475ab 8311 val = (insn >> 4) & 0x7c;
b0109805 8312 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8313
8314 if (insn & (1 << 11)) {
8315 /* load */
b0109805
PB
8316 tmp = gen_ld32(addr, IS_USER(s));
8317 store_reg(s, rd, tmp);
99c475ab
FB
8318 } else {
8319 /* store */
b0109805
PB
8320 tmp = load_reg(s, rd);
8321 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8322 }
b0109805 8323 dead_tmp(addr);
99c475ab
FB
8324 break;
8325
8326 case 7:
8327 /* load/store byte immediate offset */
8328 rd = insn & 7;
8329 rn = (insn >> 3) & 7;
b0109805 8330 addr = load_reg(s, rn);
99c475ab 8331 val = (insn >> 6) & 0x1f;
b0109805 8332 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8333
8334 if (insn & (1 << 11)) {
8335 /* load */
b0109805
PB
8336 tmp = gen_ld8u(addr, IS_USER(s));
8337 store_reg(s, rd, tmp);
99c475ab
FB
8338 } else {
8339 /* store */
b0109805
PB
8340 tmp = load_reg(s, rd);
8341 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8342 }
b0109805 8343 dead_tmp(addr);
99c475ab
FB
8344 break;
8345
8346 case 8:
8347 /* load/store halfword immediate offset */
8348 rd = insn & 7;
8349 rn = (insn >> 3) & 7;
b0109805 8350 addr = load_reg(s, rn);
99c475ab 8351 val = (insn >> 5) & 0x3e;
b0109805 8352 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8353
8354 if (insn & (1 << 11)) {
8355 /* load */
b0109805
PB
8356 tmp = gen_ld16u(addr, IS_USER(s));
8357 store_reg(s, rd, tmp);
99c475ab
FB
8358 } else {
8359 /* store */
b0109805
PB
8360 tmp = load_reg(s, rd);
8361 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8362 }
b0109805 8363 dead_tmp(addr);
99c475ab
FB
8364 break;
8365
8366 case 9:
8367 /* load/store from stack */
8368 rd = (insn >> 8) & 7;
b0109805 8369 addr = load_reg(s, 13);
99c475ab 8370 val = (insn & 0xff) * 4;
b0109805 8371 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8372
8373 if (insn & (1 << 11)) {
8374 /* load */
b0109805
PB
8375 tmp = gen_ld32(addr, IS_USER(s));
8376 store_reg(s, rd, tmp);
99c475ab
FB
8377 } else {
8378 /* store */
b0109805
PB
8379 tmp = load_reg(s, rd);
8380 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8381 }
b0109805 8382 dead_tmp(addr);
99c475ab
FB
8383 break;
8384
8385 case 10:
8386 /* add to high reg */
8387 rd = (insn >> 8) & 7;
5899f386
FB
8388 if (insn & (1 << 11)) {
8389 /* SP */
5e3f878a 8390 tmp = load_reg(s, 13);
5899f386
FB
8391 } else {
8392 /* PC. bit 1 is ignored. */
5e3f878a
PB
8393 tmp = new_tmp();
8394 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8395 }
99c475ab 8396 val = (insn & 0xff) * 4;
5e3f878a
PB
8397 tcg_gen_addi_i32(tmp, tmp, val);
8398 store_reg(s, rd, tmp);
99c475ab
FB
8399 break;
8400
8401 case 11:
8402 /* misc */
8403 op = (insn >> 8) & 0xf;
8404 switch (op) {
8405 case 0:
8406 /* adjust stack pointer */
b26eefb6 8407 tmp = load_reg(s, 13);
99c475ab
FB
8408 val = (insn & 0x7f) * 4;
8409 if (insn & (1 << 7))
6a0d8a1d 8410 val = -(int32_t)val;
b26eefb6
PB
8411 tcg_gen_addi_i32(tmp, tmp, val);
8412 store_reg(s, 13, tmp);
99c475ab
FB
8413 break;
8414
9ee6e8bb
PB
8415 case 2: /* sign/zero extend. */
8416 ARCH(6);
8417 rd = insn & 7;
8418 rm = (insn >> 3) & 7;
b0109805 8419 tmp = load_reg(s, rm);
9ee6e8bb 8420 switch ((insn >> 6) & 3) {
b0109805
PB
8421 case 0: gen_sxth(tmp); break;
8422 case 1: gen_sxtb(tmp); break;
8423 case 2: gen_uxth(tmp); break;
8424 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8425 }
b0109805 8426 store_reg(s, rd, tmp);
9ee6e8bb 8427 break;
99c475ab
FB
8428 case 4: case 5: case 0xc: case 0xd:
8429 /* push/pop */
b0109805 8430 addr = load_reg(s, 13);
5899f386
FB
8431 if (insn & (1 << 8))
8432 offset = 4;
99c475ab 8433 else
5899f386
FB
8434 offset = 0;
8435 for (i = 0; i < 8; i++) {
8436 if (insn & (1 << i))
8437 offset += 4;
8438 }
8439 if ((insn & (1 << 11)) == 0) {
b0109805 8440 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8441 }
99c475ab
FB
8442 for (i = 0; i < 8; i++) {
8443 if (insn & (1 << i)) {
8444 if (insn & (1 << 11)) {
8445 /* pop */
b0109805
PB
8446 tmp = gen_ld32(addr, IS_USER(s));
8447 store_reg(s, i, tmp);
99c475ab
FB
8448 } else {
8449 /* push */
b0109805
PB
8450 tmp = load_reg(s, i);
8451 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8452 }
5899f386 8453 /* advance to the next address. */
b0109805 8454 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8455 }
8456 }
a50f5b91 8457 TCGV_UNUSED(tmp);
99c475ab
FB
8458 if (insn & (1 << 8)) {
8459 if (insn & (1 << 11)) {
8460 /* pop pc */
b0109805 8461 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8462 /* don't set the pc until the rest of the instruction
8463 has completed */
8464 } else {
8465 /* push lr */
b0109805
PB
8466 tmp = load_reg(s, 14);
8467 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8468 }
b0109805 8469 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8470 }
5899f386 8471 if ((insn & (1 << 11)) == 0) {
b0109805 8472 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8473 }
99c475ab 8474 /* write back the new stack pointer */
b0109805 8475 store_reg(s, 13, addr);
99c475ab
FB
8476 /* set the new PC value */
8477 if ((insn & 0x0900) == 0x0900)
b0109805 8478 gen_bx(s, tmp);
99c475ab
FB
8479 break;
8480
9ee6e8bb
PB
8481 case 1: case 3: case 9: case 11: /* czb */
8482 rm = insn & 7;
d9ba4830 8483 tmp = load_reg(s, rm);
9ee6e8bb
PB
8484 s->condlabel = gen_new_label();
8485 s->condjmp = 1;
8486 if (insn & (1 << 11))
cb63669a 8487 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8488 else
cb63669a 8489 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8490 dead_tmp(tmp);
9ee6e8bb
PB
8491 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8492 val = (uint32_t)s->pc + 2;
8493 val += offset;
8494 gen_jmp(s, val);
8495 break;
8496
8497 case 15: /* IT, nop-hint. */
8498 if ((insn & 0xf) == 0) {
8499 gen_nop_hint(s, (insn >> 4) & 0xf);
8500 break;
8501 }
8502 /* If Then. */
8503 s->condexec_cond = (insn >> 4) & 0xe;
8504 s->condexec_mask = insn & 0x1f;
8505 /* No actual code generated for this insn, just setup state. */
8506 break;
8507
06c949e6 8508 case 0xe: /* bkpt */
9ee6e8bb 8509 gen_set_condexec(s);
5e3f878a 8510 gen_set_pc_im(s->pc - 2);
d9ba4830 8511 gen_exception(EXCP_BKPT);
06c949e6
PB
8512 s->is_jmp = DISAS_JUMP;
8513 break;
8514
9ee6e8bb
PB
8515 case 0xa: /* rev */
8516 ARCH(6);
8517 rn = (insn >> 3) & 0x7;
8518 rd = insn & 0x7;
b0109805 8519 tmp = load_reg(s, rn);
9ee6e8bb 8520 switch ((insn >> 6) & 3) {
b0109805
PB
8521 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8522 case 1: gen_rev16(tmp); break;
8523 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8524 default: goto illegal_op;
8525 }
b0109805 8526 store_reg(s, rd, tmp);
9ee6e8bb
PB
8527 break;
8528
8529 case 6: /* cps */
8530 ARCH(6);
8531 if (IS_USER(s))
8532 break;
8533 if (IS_M(env)) {
8984bd2e 8534 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8535 /* PRIMASK */
8984bd2e
PB
8536 if (insn & 1) {
8537 addr = tcg_const_i32(16);
8538 gen_helper_v7m_msr(cpu_env, addr, tmp);
8539 }
9ee6e8bb 8540 /* FAULTMASK */
8984bd2e
PB
8541 if (insn & 2) {
8542 addr = tcg_const_i32(17);
8543 gen_helper_v7m_msr(cpu_env, addr, tmp);
8544 }
9ee6e8bb
PB
8545 gen_lookup_tb(s);
8546 } else {
8547 if (insn & (1 << 4))
8548 shift = CPSR_A | CPSR_I | CPSR_F;
8549 else
8550 shift = 0;
8551
8552 val = ((insn & 7) << 6) & shift;
8553 gen_op_movl_T0_im(val);
8554 gen_set_psr_T0(s, shift, 0);
8555 }
8556 break;
8557
99c475ab
FB
8558 default:
8559 goto undef;
8560 }
8561 break;
8562
8563 case 12:
8564 /* load/store multiple */
8565 rn = (insn >> 8) & 0x7;
b0109805 8566 addr = load_reg(s, rn);
99c475ab
FB
8567 for (i = 0; i < 8; i++) {
8568 if (insn & (1 << i)) {
99c475ab
FB
8569 if (insn & (1 << 11)) {
8570 /* load */
b0109805
PB
8571 tmp = gen_ld32(addr, IS_USER(s));
8572 store_reg(s, i, tmp);
99c475ab
FB
8573 } else {
8574 /* store */
b0109805
PB
8575 tmp = load_reg(s, i);
8576 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8577 }
5899f386 8578 /* advance to the next address */
b0109805 8579 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8580 }
8581 }
5899f386 8582 /* Base register writeback. */
b0109805
PB
8583 if ((insn & (1 << rn)) == 0) {
8584 store_reg(s, rn, addr);
8585 } else {
8586 dead_tmp(addr);
8587 }
99c475ab
FB
8588 break;
8589
8590 case 13:
8591 /* conditional branch or swi */
8592 cond = (insn >> 8) & 0xf;
8593 if (cond == 0xe)
8594 goto undef;
8595
8596 if (cond == 0xf) {
8597 /* swi */
9ee6e8bb 8598 gen_set_condexec(s);
422ebf69 8599 gen_set_pc_im(s->pc);
9ee6e8bb 8600 s->is_jmp = DISAS_SWI;
99c475ab
FB
8601 break;
8602 }
8603 /* generate a conditional jump to next instruction */
e50e6a20 8604 s->condlabel = gen_new_label();
d9ba4830 8605 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8606 s->condjmp = 1;
99c475ab
FB
8607 gen_movl_T1_reg(s, 15);
8608
8609 /* jump to the offset */
5899f386 8610 val = (uint32_t)s->pc + 2;
99c475ab 8611 offset = ((int32_t)insn << 24) >> 24;
5899f386 8612 val += offset << 1;
8aaca4c0 8613 gen_jmp(s, val);
99c475ab
FB
8614 break;
8615
8616 case 14:
358bf29e 8617 if (insn & (1 << 11)) {
9ee6e8bb
PB
8618 if (disas_thumb2_insn(env, s, insn))
8619 goto undef32;
358bf29e
PB
8620 break;
8621 }
9ee6e8bb 8622 /* unconditional branch */
99c475ab
FB
8623 val = (uint32_t)s->pc;
8624 offset = ((int32_t)insn << 21) >> 21;
8625 val += (offset << 1) + 2;
8aaca4c0 8626 gen_jmp(s, val);
99c475ab
FB
8627 break;
8628
8629 case 15:
9ee6e8bb 8630 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8631 goto undef32;
9ee6e8bb 8632 break;
99c475ab
FB
8633 }
8634 return;
9ee6e8bb
PB
8635undef32:
8636 gen_set_condexec(s);
5e3f878a 8637 gen_set_pc_im(s->pc - 4);
d9ba4830 8638 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8639 s->is_jmp = DISAS_JUMP;
8640 return;
8641illegal_op:
99c475ab 8642undef:
9ee6e8bb 8643 gen_set_condexec(s);
5e3f878a 8644 gen_set_pc_im(s->pc - 2);
d9ba4830 8645 gen_exception(EXCP_UDEF);
99c475ab
FB
8646 s->is_jmp = DISAS_JUMP;
8647}
8648
2c0262af
FB
8649/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8650 basic block 'tb'. If search_pc is TRUE, also generate PC
8651 information for each intermediate instruction. */
2cfc5f17
TS
8652static inline void gen_intermediate_code_internal(CPUState *env,
8653 TranslationBlock *tb,
8654 int search_pc)
2c0262af
FB
8655{
8656 DisasContext dc1, *dc = &dc1;
a1d1bb31 8657 CPUBreakpoint *bp;
2c0262af
FB
8658 uint16_t *gen_opc_end;
8659 int j, lj;
0fa85d43 8660 target_ulong pc_start;
b5ff1b31 8661 uint32_t next_page_start;
2e70f6ef
PB
8662 int num_insns;
8663 int max_insns;
3b46e624 8664
2c0262af 8665 /* generate intermediate code */
b26eefb6
PB
8666 num_temps = 0;
8667 memset(temps, 0, sizeof(temps));
8668
0fa85d43 8669 pc_start = tb->pc;
3b46e624 8670
2c0262af
FB
8671 dc->tb = tb;
8672
2c0262af 8673 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8674
8675 dc->is_jmp = DISAS_NEXT;
8676 dc->pc = pc_start;
8aaca4c0 8677 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8678 dc->condjmp = 0;
5899f386 8679 dc->thumb = env->thumb;
9ee6e8bb
PB
8680 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8681 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8682#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8683 if (IS_M(env)) {
8684 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8685 } else {
8686 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8687 }
b5ff1b31 8688#endif
a7812ae4
PB
8689 cpu_F0s = tcg_temp_new_i32();
8690 cpu_F1s = tcg_temp_new_i32();
8691 cpu_F0d = tcg_temp_new_i64();
8692 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8693 cpu_V0 = cpu_F0d;
8694 cpu_V1 = cpu_F1d;
e677137d 8695 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8696 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8697 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8698 lj = -1;
2e70f6ef
PB
8699 num_insns = 0;
8700 max_insns = tb->cflags & CF_COUNT_MASK;
8701 if (max_insns == 0)
8702 max_insns = CF_COUNT_MASK;
8703
8704 gen_icount_start();
9ee6e8bb
PB
8705 /* Reset the conditional execution bits immediately. This avoids
8706 complications trying to do it at the end of the block. */
8707 if (env->condexec_bits)
8f01245e
PB
8708 {
8709 TCGv tmp = new_tmp();
8710 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8711 store_cpu_field(tmp, condexec_bits);
8f01245e 8712 }
2c0262af 8713 do {
fbb4a2e3
PB
8714#ifdef CONFIG_USER_ONLY
8715 /* Intercept jump to the magic kernel page. */
8716 if (dc->pc >= 0xffff0000) {
8717 /* We always get here via a jump, so know we are not in a
8718 conditional execution block. */
8719 gen_exception(EXCP_KERNEL_TRAP);
8720 dc->is_jmp = DISAS_UPDATE;
8721 break;
8722 }
8723#else
9ee6e8bb
PB
8724 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8725 /* We always get here via a jump, so know we are not in a
8726 conditional execution block. */
d9ba4830 8727 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8728 dc->is_jmp = DISAS_UPDATE;
8729 break;
9ee6e8bb
PB
8730 }
8731#endif
8732
c0ce998e
AL
8733 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8734 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8735 if (bp->pc == dc->pc) {
9ee6e8bb 8736 gen_set_condexec(dc);
5e3f878a 8737 gen_set_pc_im(dc->pc);
d9ba4830 8738 gen_exception(EXCP_DEBUG);
1fddef4b 8739 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8740 /* Advance PC so that clearing the breakpoint will
8741 invalidate this TB. */
8742 dc->pc += 2;
8743 goto done_generating;
1fddef4b
FB
8744 break;
8745 }
8746 }
8747 }
2c0262af
FB
8748 if (search_pc) {
8749 j = gen_opc_ptr - gen_opc_buf;
8750 if (lj < j) {
8751 lj++;
8752 while (lj < j)
8753 gen_opc_instr_start[lj++] = 0;
8754 }
0fa85d43 8755 gen_opc_pc[lj] = dc->pc;
2c0262af 8756 gen_opc_instr_start[lj] = 1;
2e70f6ef 8757 gen_opc_icount[lj] = num_insns;
2c0262af 8758 }
e50e6a20 8759
2e70f6ef
PB
8760 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8761 gen_io_start();
8762
9ee6e8bb
PB
8763 if (env->thumb) {
8764 disas_thumb_insn(env, dc);
8765 if (dc->condexec_mask) {
8766 dc->condexec_cond = (dc->condexec_cond & 0xe)
8767 | ((dc->condexec_mask >> 4) & 1);
8768 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8769 if (dc->condexec_mask == 0) {
8770 dc->condexec_cond = 0;
8771 }
8772 }
8773 } else {
8774 disas_arm_insn(env, dc);
8775 }
b26eefb6
PB
8776 if (num_temps) {
8777 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8778 num_temps = 0;
8779 }
e50e6a20
FB
8780
8781 if (dc->condjmp && !dc->is_jmp) {
8782 gen_set_label(dc->condlabel);
8783 dc->condjmp = 0;
8784 }
aaf2d97d 8785 /* Translation stops when a conditional branch is encountered.
e50e6a20 8786 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8787 * Also stop translation when a page boundary is reached. This
bf20dc07 8788 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8789 num_insns ++;
1fddef4b
FB
8790 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8791 !env->singlestep_enabled &&
2e70f6ef
PB
8792 dc->pc < next_page_start &&
8793 num_insns < max_insns);
8794
8795 if (tb->cflags & CF_LAST_IO) {
8796 if (dc->condjmp) {
8797 /* FIXME: This can theoretically happen with self-modifying
8798 code. */
8799 cpu_abort(env, "IO on conditional branch instruction");
8800 }
8801 gen_io_end();
8802 }
9ee6e8bb 8803
b5ff1b31 8804 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8805 instruction was a conditional branch or trap, and the PC has
8806 already been written. */
551bd27f 8807 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8808 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8809 if (dc->condjmp) {
9ee6e8bb
PB
8810 gen_set_condexec(dc);
8811 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8812 gen_exception(EXCP_SWI);
9ee6e8bb 8813 } else {
d9ba4830 8814 gen_exception(EXCP_DEBUG);
9ee6e8bb 8815 }
e50e6a20
FB
8816 gen_set_label(dc->condlabel);
8817 }
8818 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8819 gen_set_pc_im(dc->pc);
e50e6a20 8820 dc->condjmp = 0;
8aaca4c0 8821 }
9ee6e8bb
PB
8822 gen_set_condexec(dc);
8823 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8824 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8825 } else {
8826 /* FIXME: Single stepping a WFI insn will not halt
8827 the CPU. */
d9ba4830 8828 gen_exception(EXCP_DEBUG);
9ee6e8bb 8829 }
8aaca4c0 8830 } else {
9ee6e8bb
PB
8831 /* While branches must always occur at the end of an IT block,
8832 there are a few other things that can cause us to terminate
8833 the TB in the middel of an IT block:
8834 - Exception generating instructions (bkpt, swi, undefined).
8835 - Page boundaries.
8836 - Hardware watchpoints.
8837 Hardware breakpoints have already been handled and skip this code.
8838 */
8839 gen_set_condexec(dc);
8aaca4c0 8840 switch(dc->is_jmp) {
8aaca4c0 8841 case DISAS_NEXT:
6e256c93 8842 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8843 break;
8844 default:
8845 case DISAS_JUMP:
8846 case DISAS_UPDATE:
8847 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8848 tcg_gen_exit_tb(0);
8aaca4c0
FB
8849 break;
8850 case DISAS_TB_JUMP:
8851 /* nothing more to generate */
8852 break;
9ee6e8bb 8853 case DISAS_WFI:
d9ba4830 8854 gen_helper_wfi();
9ee6e8bb
PB
8855 break;
8856 case DISAS_SWI:
d9ba4830 8857 gen_exception(EXCP_SWI);
9ee6e8bb 8858 break;
8aaca4c0 8859 }
e50e6a20
FB
8860 if (dc->condjmp) {
8861 gen_set_label(dc->condlabel);
9ee6e8bb 8862 gen_set_condexec(dc);
6e256c93 8863 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8864 dc->condjmp = 0;
8865 }
2c0262af 8866 }
2e70f6ef 8867
9ee6e8bb 8868done_generating:
2e70f6ef 8869 gen_icount_end(tb, num_insns);
2c0262af
FB
8870 *gen_opc_ptr = INDEX_op_end;
8871
8872#ifdef DEBUG_DISAS
e19e89a5 8873 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8874 fprintf(logfile, "----------------\n");
8875 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8876 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8877 fprintf(logfile, "\n");
8878 }
8879#endif
b5ff1b31
FB
8880 if (search_pc) {
8881 j = gen_opc_ptr - gen_opc_buf;
8882 lj++;
8883 while (lj <= j)
8884 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8885 } else {
2c0262af 8886 tb->size = dc->pc - pc_start;
2e70f6ef 8887 tb->icount = num_insns;
b5ff1b31 8888 }
2c0262af
FB
8889}
8890
2cfc5f17 8891void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8892{
2cfc5f17 8893 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8894}
8895
2cfc5f17 8896void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8897{
2cfc5f17 8898 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8899}
8900
b5ff1b31
FB
8901static const char *cpu_mode_names[16] = {
8902 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8903 "???", "???", "???", "und", "???", "???", "???", "sys"
8904};
9ee6e8bb 8905
5fafdf24 8906void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8907 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8908 int flags)
2c0262af
FB
8909{
8910 int i;
06e80fc9 8911#if 0
bc380d17 8912 union {
b7bcbe95
FB
8913 uint32_t i;
8914 float s;
8915 } s0, s1;
8916 CPU_DoubleU d;
a94a6abf
PB
8917 /* ??? This assumes float64 and double have the same layout.
8918 Oh well, it's only debug dumps. */
8919 union {
8920 float64 f64;
8921 double d;
8922 } d0;
06e80fc9 8923#endif
b5ff1b31 8924 uint32_t psr;
2c0262af
FB
8925
8926 for(i=0;i<16;i++) {
7fe48483 8927 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8928 if ((i % 4) == 3)
7fe48483 8929 cpu_fprintf(f, "\n");
2c0262af 8930 else
7fe48483 8931 cpu_fprintf(f, " ");
2c0262af 8932 }
b5ff1b31 8933 psr = cpsr_read(env);
687fa640
TS
8934 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8935 psr,
b5ff1b31
FB
8936 psr & (1 << 31) ? 'N' : '-',
8937 psr & (1 << 30) ? 'Z' : '-',
8938 psr & (1 << 29) ? 'C' : '-',
8939 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8940 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8941 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8942
5e3f878a 8943#if 0
b7bcbe95 8944 for (i = 0; i < 16; i++) {
8e96005d
FB
8945 d.d = env->vfp.regs[i];
8946 s0.i = d.l.lower;
8947 s1.i = d.l.upper;
a94a6abf
PB
8948 d0.f64 = d.d;
8949 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8950 i * 2, (int)s0.i, s0.s,
a94a6abf 8951 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8952 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8953 d0.d);
b7bcbe95 8954 }
40f137e1 8955 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8956#endif
2c0262af 8957}
a6b025d3 8958
d2856f1a
AJ
8959void gen_pc_load(CPUState *env, TranslationBlock *tb,
8960 unsigned long searched_pc, int pc_pos, void *puc)
8961{
8962 env->regs[15] = gen_opc_pc[pc_pos];
8963}