]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
ARM: cosmetics (Laurent Desnogues).
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
79383c9c 32#include "qemu-log.h"
1497c961 33
a7812ae4 34#include "helpers.h"
1497c961 35#define GEN_HELPER 1
b26eefb6 36#include "helpers.h"
2c0262af 37
9ee6e8bb
PB
38#define ENABLE_ARCH_5J 0
39#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 43
86753403 44#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 45
2c0262af
FB
46/* internal defines */
47typedef struct DisasContext {
0fa85d43 48 target_ulong pc;
2c0262af 49 int is_jmp;
e50e6a20
FB
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
9ee6e8bb
PB
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
2c0262af 57 struct TranslationBlock *tb;
8aaca4c0 58 int singlestep_enabled;
5899f386 59 int thumb;
b5ff1b31
FB
60#if !defined(CONFIG_USER_ONLY)
61 int user;
62#endif
2c0262af
FB
63} DisasContext;
64
b5ff1b31
FB
65#if defined(CONFIG_USER_ONLY)
66#define IS_USER(s) 1
67#else
68#define IS_USER(s) (s->user)
69#endif
70
9ee6e8bb
PB
71/* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73#define DISAS_WFI 4
74#define DISAS_SWI 5
2c0262af 75
a7812ae4 76static TCGv_ptr cpu_env;
ad69471c 77/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 78static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
b26eefb6
PB
88/* initialize TCG globals. */
89void arm_translate_init(void)
90{
a7812ae4
PB
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92
93 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
94 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 95
a7812ae4
PB
96#define GEN_HELPER 2
97#include "helpers.h"
b26eefb6
PB
98}
99
100/* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
102#define MAX_TEMPS 8
103static int num_temps;
104static TCGv temps[MAX_TEMPS];
105
106/* Allocate a temporary variable. */
a7812ae4 107static TCGv_i32 new_tmp(void)
b26eefb6
PB
108{
109 TCGv tmp;
110 if (num_temps == MAX_TEMPS)
111 abort();
112
a7812ae4 113 if (GET_TCGV_I32(temps[num_temps]))
b26eefb6
PB
114 return temps[num_temps++];
115
a7812ae4 116 tmp = tcg_temp_new_i32();
b26eefb6
PB
117 temps[num_temps++] = tmp;
118 return tmp;
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
124 int i;
125 num_temps--;
126 i = num_temps;
a7812ae4 127 if (TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
128 return;
129
130 /* Shuffle this temp to the last slot. */
a7812ae4 131 while (!TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
132 i--;
133 while (i < num_temps) {
134 temps[i] = temps[i + 1];
135 i++;
136 }
137 temps[i] = tmp;
138}
139
d9ba4830
PB
140static inline TCGv load_cpu_offset(int offset)
141{
142 TCGv tmp = new_tmp();
143 tcg_gen_ld_i32(tmp, cpu_env, offset);
144 return tmp;
145}
146
147#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
148
149static inline void store_cpu_offset(TCGv var, int offset)
150{
151 tcg_gen_st_i32(var, cpu_env, offset);
152 dead_tmp(var);
153}
154
155#define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
157
b26eefb6
PB
158/* Set a variable to the value of a CPU register. */
159static void load_reg_var(DisasContext *s, TCGv var, int reg)
160{
161 if (reg == 15) {
162 uint32_t addr;
163 /* normaly, since we updated PC, we need only to add one insn */
164 if (s->thumb)
165 addr = (long)s->pc + 2;
166 else
167 addr = (long)s->pc + 4;
168 tcg_gen_movi_i32(var, addr);
169 } else {
170 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
171 }
172}
173
174/* Create a new temporary and set it to the value of a CPU register. */
175static inline TCGv load_reg(DisasContext *s, int reg)
176{
177 TCGv tmp = new_tmp();
178 load_reg_var(s, tmp, reg);
179 return tmp;
180}
181
182/* Set a CPU register. The source must be a temporary and will be
183 marked as dead. */
184static void store_reg(DisasContext *s, int reg, TCGv var)
185{
186 if (reg == 15) {
187 tcg_gen_andi_i32(var, var, ~1);
188 s->is_jmp = DISAS_JUMP;
189 }
190 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
191 dead_tmp(var);
192}
193
194
195/* Basic operations. */
196#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
197#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
199
200#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
204
8984bd2e
PB
205#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
211
b26eefb6
PB
212#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
219
b26eefb6
PB
220#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
221#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
222
223/* Value extensions. */
86831435
PB
224#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
226#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228
1497c961
PB
229#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
231
232#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 233
d9ba4830
PB
234#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235/* Set NZCV flags from the high 4 bits of var. */
236#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237
238static void gen_exception(int excp)
239{
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
244}
245
3670669c
PB
246static void gen_smul_dual(TCGv a, TCGv b)
247{
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
22478e79
AZ
250 tcg_gen_ext16s_i32(tmp1, a);
251 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
259}
260
261/* Byteswap each halfword. */
262static void gen_rev16(TCGv var)
263{
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
271}
272
273/* Byteswap low halfword and sign extend. */
274static void gen_revsh(TCGv var)
275{
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
283}
284
285/* Unsigned bitfield extract. */
286static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287{
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
291}
292
293/* Signed bitfield extract. */
294static void gen_sbfx(TCGv var, int shift, int width)
295{
296 uint32_t signbit;
297
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
305 }
306}
307
308/* Bitfield insertion. Insert val into base. Clobbers base and val. */
309static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310{
3670669c 311 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
314 tcg_gen_or_i32(dest, base, val);
315}
316
d9ba4830
PB
317/* Round the top 32 bits of a 64-bit value. */
318static void gen_roundqd(TCGv a, TCGv b)
3670669c 319{
d9ba4830
PB
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
3670669c
PB
322}
323
8f01245e
PB
324/* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
5e3f878a 326/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 327static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 328{
a7812ae4
PB
329 TCGv_i64 tmp1 = tcg_temp_new_i64();
330 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
331
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338}
339
a7812ae4 340static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 341{
a7812ae4
PB
342 TCGv_i64 tmp1 = tcg_temp_new_i64();
343 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
351}
352
8f01245e
PB
353/* Unsigned 32x32->64 multiply. */
354static void gen_op_mull_T0_T1(void)
355{
a7812ae4
PB
356 TCGv_i64 tmp1 = tcg_temp_new_i64();
357 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e
PB
358
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365}
366
367/* Signed 32x32->64 multiply. */
d9ba4830 368static void gen_imull(TCGv a, TCGv b)
8f01245e 369{
a7812ae4
PB
370 TCGv_i64 tmp1 = tcg_temp_new_i64();
371 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 372
d9ba4830
PB
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 376 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 377 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
378 tcg_gen_trunc_i64_i32(b, tmp1);
379}
d9ba4830 380
8f01245e
PB
381/* Swap low and high halfwords. */
382static void gen_swap_half(TCGv var)
383{
384 TCGv tmp = new_tmp();
385 tcg_gen_shri_i32(tmp, var, 16);
386 tcg_gen_shli_i32(var, var, 16);
387 tcg_gen_or_i32(var, var, tmp);
3670669c 388 dead_tmp(tmp);
8f01245e
PB
389}
390
b26eefb6
PB
391/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
392 tmp = (t0 ^ t1) & 0x8000;
393 t0 &= ~0x8000;
394 t1 &= ~0x8000;
395 t0 = (t0 + t1) ^ tmp;
396 */
397
398static void gen_add16(TCGv t0, TCGv t1)
399{
400 TCGv tmp = new_tmp();
401 tcg_gen_xor_i32(tmp, t0, t1);
402 tcg_gen_andi_i32(tmp, tmp, 0x8000);
403 tcg_gen_andi_i32(t0, t0, ~0x8000);
404 tcg_gen_andi_i32(t1, t1, ~0x8000);
405 tcg_gen_add_i32(t0, t0, t1);
406 tcg_gen_xor_i32(t0, t0, tmp);
407 dead_tmp(tmp);
408 dead_tmp(t1);
409}
410
9a119ff6
PB
411#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
412
b26eefb6
PB
413/* Set CF to the top bit of var. */
414static void gen_set_CF_bit31(TCGv var)
415{
416 TCGv tmp = new_tmp();
417 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 418 gen_set_CF(var);
b26eefb6
PB
419 dead_tmp(tmp);
420}
421
422/* Set N and Z flags from var. */
423static inline void gen_logic_CC(TCGv var)
424{
6fbe23d5
PB
425 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
427}
428
429/* T0 += T1 + CF. */
430static void gen_adc_T0_T1(void)
431{
d9ba4830 432 TCGv tmp;
b26eefb6 433 gen_op_addl_T0_T1();
d9ba4830 434 tmp = load_cpu_field(CF);
b26eefb6
PB
435 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
436 dead_tmp(tmp);
437}
438
3670669c
PB
439/* dest = T0 - T1 + CF - 1. */
440static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
441{
d9ba4830 442 TCGv tmp;
3670669c 443 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 444 tmp = load_cpu_field(CF);
3670669c
PB
445 tcg_gen_add_i32(dest, dest, tmp);
446 tcg_gen_subi_i32(dest, dest, 1);
447 dead_tmp(tmp);
448}
449
450#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
451#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
452
b26eefb6
PB
453/* T0 &= ~T1. Clobbers T1. */
454/* FIXME: Implement bic natively. */
8f8e3aa4
PB
455static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
456{
457 TCGv tmp = new_tmp();
458 tcg_gen_not_i32(tmp, t1);
459 tcg_gen_and_i32(dest, t0, tmp);
460 dead_tmp(tmp);
461}
b26eefb6
PB
462static inline void gen_op_bicl_T0_T1(void)
463{
464 gen_op_notl_T1();
465 gen_op_andl_T0_T1();
466}
467
ad69471c
PB
468/* FIXME: Implement this natively. */
469#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
470
b26eefb6
PB
471/* FIXME: Implement this natively. */
472static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
473{
474 TCGv tmp;
475
476 if (i == 0)
477 return;
478
479 tmp = new_tmp();
480 tcg_gen_shri_i32(tmp, t1, i);
481 tcg_gen_shli_i32(t1, t1, 32 - i);
482 tcg_gen_or_i32(t0, t1, tmp);
483 dead_tmp(tmp);
484}
485
9a119ff6 486static void shifter_out_im(TCGv var, int shift)
b26eefb6 487{
9a119ff6
PB
488 TCGv tmp = new_tmp();
489 if (shift == 0) {
490 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 491 } else {
9a119ff6
PB
492 tcg_gen_shri_i32(tmp, var, shift);
493 if (shift != 31);
494 tcg_gen_andi_i32(tmp, tmp, 1);
495 }
496 gen_set_CF(tmp);
497 dead_tmp(tmp);
498}
b26eefb6 499
9a119ff6
PB
500/* Shift by immediate. Includes special handling for shift == 0. */
501static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
502{
503 switch (shiftop) {
504 case 0: /* LSL */
505 if (shift != 0) {
506 if (flags)
507 shifter_out_im(var, 32 - shift);
508 tcg_gen_shli_i32(var, var, shift);
509 }
510 break;
511 case 1: /* LSR */
512 if (shift == 0) {
513 if (flags) {
514 tcg_gen_shri_i32(var, var, 31);
515 gen_set_CF(var);
516 }
517 tcg_gen_movi_i32(var, 0);
518 } else {
519 if (flags)
520 shifter_out_im(var, shift - 1);
521 tcg_gen_shri_i32(var, var, shift);
522 }
523 break;
524 case 2: /* ASR */
525 if (shift == 0)
526 shift = 32;
527 if (flags)
528 shifter_out_im(var, shift - 1);
529 if (shift == 32)
530 shift = 31;
531 tcg_gen_sari_i32(var, var, shift);
532 break;
533 case 3: /* ROR/RRX */
534 if (shift != 0) {
535 if (flags)
536 shifter_out_im(var, shift - 1);
537 tcg_gen_rori_i32(var, var, shift); break;
538 } else {
d9ba4830 539 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
540 if (flags)
541 shifter_out_im(var, 0);
542 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
543 tcg_gen_shli_i32(tmp, tmp, 31);
544 tcg_gen_or_i32(var, var, tmp);
545 dead_tmp(tmp);
b26eefb6
PB
546 }
547 }
548};
549
8984bd2e
PB
550static inline void gen_arm_shift_reg(TCGv var, int shiftop,
551 TCGv shift, int flags)
552{
553 if (flags) {
554 switch (shiftop) {
555 case 0: gen_helper_shl_cc(var, var, shift); break;
556 case 1: gen_helper_shr_cc(var, var, shift); break;
557 case 2: gen_helper_sar_cc(var, var, shift); break;
558 case 3: gen_helper_ror_cc(var, var, shift); break;
559 }
560 } else {
561 switch (shiftop) {
562 case 0: gen_helper_shl(var, var, shift); break;
563 case 1: gen_helper_shr(var, var, shift); break;
564 case 2: gen_helper_sar(var, var, shift); break;
565 case 3: gen_helper_ror(var, var, shift); break;
566 }
567 }
568 dead_tmp(shift);
569}
570
6ddbc6e4
PB
571#define PAS_OP(pfx) \
572 switch (op2) { \
573 case 0: gen_pas_helper(glue(pfx,add16)); break; \
574 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
575 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
576 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
577 case 4: gen_pas_helper(glue(pfx,add8)); break; \
578 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
579 }
d9ba4830 580static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 581{
a7812ae4 582 TCGv_ptr tmp;
6ddbc6e4
PB
583
584 switch (op1) {
585#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
586 case 1:
a7812ae4 587 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
588 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
589 PAS_OP(s)
590 break;
591 case 5:
a7812ae4 592 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
593 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
594 PAS_OP(u)
595 break;
596#undef gen_pas_helper
597#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
598 case 2:
599 PAS_OP(q);
600 break;
601 case 3:
602 PAS_OP(sh);
603 break;
604 case 6:
605 PAS_OP(uq);
606 break;
607 case 7:
608 PAS_OP(uh);
609 break;
610#undef gen_pas_helper
611 }
612}
9ee6e8bb
PB
613#undef PAS_OP
614
6ddbc6e4
PB
615/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
616#define PAS_OP(pfx) \
617 switch (op2) { \
618 case 0: gen_pas_helper(glue(pfx,add8)); break; \
619 case 1: gen_pas_helper(glue(pfx,add16)); break; \
620 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
621 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
622 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
623 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
624 }
d9ba4830 625static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 626{
a7812ae4 627 TCGv_ptr tmp;
6ddbc6e4
PB
628
629 switch (op1) {
630#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
631 case 0:
a7812ae4 632 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
633 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
634 PAS_OP(s)
635 break;
636 case 4:
a7812ae4 637 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
638 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
639 PAS_OP(u)
640 break;
641#undef gen_pas_helper
642#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
643 case 1:
644 PAS_OP(q);
645 break;
646 case 2:
647 PAS_OP(sh);
648 break;
649 case 5:
650 PAS_OP(uq);
651 break;
652 case 6:
653 PAS_OP(uh);
654 break;
655#undef gen_pas_helper
656 }
657}
9ee6e8bb
PB
658#undef PAS_OP
659
d9ba4830
PB
660static void gen_test_cc(int cc, int label)
661{
662 TCGv tmp;
663 TCGv tmp2;
d9ba4830
PB
664 int inv;
665
d9ba4830
PB
666 switch (cc) {
667 case 0: /* eq: Z */
6fbe23d5 668 tmp = load_cpu_field(ZF);
cb63669a 669 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
670 break;
671 case 1: /* ne: !Z */
6fbe23d5 672 tmp = load_cpu_field(ZF);
cb63669a 673 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
674 break;
675 case 2: /* cs: C */
676 tmp = load_cpu_field(CF);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
678 break;
679 case 3: /* cc: !C */
680 tmp = load_cpu_field(CF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
682 break;
683 case 4: /* mi: N */
6fbe23d5 684 tmp = load_cpu_field(NF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
686 break;
687 case 5: /* pl: !N */
6fbe23d5 688 tmp = load_cpu_field(NF);
cb63669a 689 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
690 break;
691 case 6: /* vs: V */
692 tmp = load_cpu_field(VF);
cb63669a 693 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
694 break;
695 case 7: /* vc: !V */
696 tmp = load_cpu_field(VF);
cb63669a 697 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
698 break;
699 case 8: /* hi: C && !Z */
700 inv = gen_new_label();
701 tmp = load_cpu_field(CF);
cb63669a 702 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 703 dead_tmp(tmp);
6fbe23d5 704 tmp = load_cpu_field(ZF);
cb63669a 705 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
706 gen_set_label(inv);
707 break;
708 case 9: /* ls: !C || Z */
709 tmp = load_cpu_field(CF);
cb63669a 710 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 711 dead_tmp(tmp);
6fbe23d5 712 tmp = load_cpu_field(ZF);
cb63669a 713 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
714 break;
715 case 10: /* ge: N == V -> N ^ V == 0 */
716 tmp = load_cpu_field(VF);
6fbe23d5 717 tmp2 = load_cpu_field(NF);
d9ba4830
PB
718 tcg_gen_xor_i32(tmp, tmp, tmp2);
719 dead_tmp(tmp2);
cb63669a 720 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
721 break;
722 case 11: /* lt: N != V -> N ^ V != 0 */
723 tmp = load_cpu_field(VF);
6fbe23d5 724 tmp2 = load_cpu_field(NF);
d9ba4830
PB
725 tcg_gen_xor_i32(tmp, tmp, tmp2);
726 dead_tmp(tmp2);
cb63669a 727 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
728 break;
729 case 12: /* gt: !Z && N == V */
730 inv = gen_new_label();
6fbe23d5 731 tmp = load_cpu_field(ZF);
cb63669a 732 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
733 dead_tmp(tmp);
734 tmp = load_cpu_field(VF);
6fbe23d5 735 tmp2 = load_cpu_field(NF);
d9ba4830
PB
736 tcg_gen_xor_i32(tmp, tmp, tmp2);
737 dead_tmp(tmp2);
cb63669a 738 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
739 gen_set_label(inv);
740 break;
741 case 13: /* le: Z || N != V */
6fbe23d5 742 tmp = load_cpu_field(ZF);
cb63669a 743 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
744 dead_tmp(tmp);
745 tmp = load_cpu_field(VF);
6fbe23d5 746 tmp2 = load_cpu_field(NF);
d9ba4830
PB
747 tcg_gen_xor_i32(tmp, tmp, tmp2);
748 dead_tmp(tmp2);
cb63669a 749 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
750 break;
751 default:
752 fprintf(stderr, "Bad condition code 0x%x\n", cc);
753 abort();
754 }
755 dead_tmp(tmp);
756}
2c0262af 757
b1d8e52e 758static const uint8_t table_logic_cc[16] = {
2c0262af
FB
759 1, /* and */
760 1, /* xor */
761 0, /* sub */
762 0, /* rsb */
763 0, /* add */
764 0, /* adc */
765 0, /* sbc */
766 0, /* rsc */
767 1, /* andl */
768 1, /* xorl */
769 0, /* cmp */
770 0, /* cmn */
771 1, /* orr */
772 1, /* mov */
773 1, /* bic */
774 1, /* mvn */
775};
3b46e624 776
d9ba4830
PB
777/* Set PC and Thumb state from an immediate address. */
778static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 779{
b26eefb6 780 TCGv tmp;
99c475ab 781
b26eefb6
PB
782 s->is_jmp = DISAS_UPDATE;
783 tmp = new_tmp();
d9ba4830
PB
784 if (s->thumb != (addr & 1)) {
785 tcg_gen_movi_i32(tmp, addr & 1);
786 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
787 }
788 tcg_gen_movi_i32(tmp, addr & ~1);
789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 790 dead_tmp(tmp);
d9ba4830
PB
791}
792
793/* Set PC and Thumb state from var. var is marked as dead. */
794static inline void gen_bx(DisasContext *s, TCGv var)
795{
796 TCGv tmp;
797
798 s->is_jmp = DISAS_UPDATE;
799 tmp = new_tmp();
800 tcg_gen_andi_i32(tmp, var, 1);
801 store_cpu_field(tmp, thumb);
802 tcg_gen_andi_i32(var, var, ~1);
803 store_cpu_field(var, regs[15]);
804}
805
806/* TODO: This should be removed. Use gen_bx instead. */
807static inline void gen_bx_T0(DisasContext *s)
808{
809 TCGv tmp = new_tmp();
810 tcg_gen_mov_i32(tmp, cpu_T[0]);
811 gen_bx(s, tmp);
b26eefb6 812}
b5ff1b31 813
b0109805
PB
814static inline TCGv gen_ld8s(TCGv addr, int index)
815{
816 TCGv tmp = new_tmp();
817 tcg_gen_qemu_ld8s(tmp, addr, index);
818 return tmp;
819}
820static inline TCGv gen_ld8u(TCGv addr, int index)
821{
822 TCGv tmp = new_tmp();
823 tcg_gen_qemu_ld8u(tmp, addr, index);
824 return tmp;
825}
826static inline TCGv gen_ld16s(TCGv addr, int index)
827{
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld16s(tmp, addr, index);
830 return tmp;
831}
832static inline TCGv gen_ld16u(TCGv addr, int index)
833{
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld16u(tmp, addr, index);
836 return tmp;
837}
838static inline TCGv gen_ld32(TCGv addr, int index)
839{
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld32u(tmp, addr, index);
842 return tmp;
843}
844static inline void gen_st8(TCGv val, TCGv addr, int index)
845{
846 tcg_gen_qemu_st8(val, addr, index);
847 dead_tmp(val);
848}
849static inline void gen_st16(TCGv val, TCGv addr, int index)
850{
851 tcg_gen_qemu_st16(val, addr, index);
852 dead_tmp(val);
853}
854static inline void gen_st32(TCGv val, TCGv addr, int index)
855{
856 tcg_gen_qemu_st32(val, addr, index);
857 dead_tmp(val);
858}
b5ff1b31 859
2c0262af
FB
860static inline void gen_movl_T0_reg(DisasContext *s, int reg)
861{
b26eefb6 862 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
863}
864
865static inline void gen_movl_T1_reg(DisasContext *s, int reg)
866{
b26eefb6 867 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
868}
869
870static inline void gen_movl_T2_reg(DisasContext *s, int reg)
871{
b26eefb6
PB
872 load_reg_var(s, cpu_T[2], reg);
873}
874
5e3f878a
PB
875static inline void gen_set_pc_im(uint32_t val)
876{
877 TCGv tmp = new_tmp();
878 tcg_gen_movi_i32(tmp, val);
879 store_cpu_field(tmp, regs[15]);
880}
881
2c0262af
FB
882static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
883{
b26eefb6
PB
884 TCGv tmp;
885 if (reg == 15) {
886 tmp = new_tmp();
887 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
888 } else {
889 tmp = cpu_T[t];
890 }
891 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 892 if (reg == 15) {
b26eefb6 893 dead_tmp(tmp);
2c0262af
FB
894 s->is_jmp = DISAS_JUMP;
895 }
896}
897
898static inline void gen_movl_reg_T0(DisasContext *s, int reg)
899{
900 gen_movl_reg_TN(s, reg, 0);
901}
902
903static inline void gen_movl_reg_T1(DisasContext *s, int reg)
904{
905 gen_movl_reg_TN(s, reg, 1);
906}
907
b5ff1b31
FB
908/* Force a TB lookup after an instruction that changes the CPU state. */
909static inline void gen_lookup_tb(DisasContext *s)
910{
911 gen_op_movl_T0_im(s->pc);
912 gen_movl_reg_T0(s, 15);
913 s->is_jmp = DISAS_UPDATE;
914}
915
b0109805
PB
916static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
917 TCGv var)
2c0262af 918{
1e8d4eec 919 int val, rm, shift, shiftop;
b26eefb6 920 TCGv offset;
2c0262af
FB
921
922 if (!(insn & (1 << 25))) {
923 /* immediate */
924 val = insn & 0xfff;
925 if (!(insn & (1 << 23)))
926 val = -val;
537730b9 927 if (val != 0)
b0109805 928 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
929 } else {
930 /* shift/register */
931 rm = (insn) & 0xf;
932 shift = (insn >> 7) & 0x1f;
1e8d4eec 933 shiftop = (insn >> 5) & 3;
b26eefb6 934 offset = load_reg(s, rm);
9a119ff6 935 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 936 if (!(insn & (1 << 23)))
b0109805 937 tcg_gen_sub_i32(var, var, offset);
2c0262af 938 else
b0109805 939 tcg_gen_add_i32(var, var, offset);
b26eefb6 940 dead_tmp(offset);
2c0262af
FB
941 }
942}
943
191f9a93 944static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 945 int extra, TCGv var)
2c0262af
FB
946{
947 int val, rm;
b26eefb6 948 TCGv offset;
3b46e624 949
2c0262af
FB
950 if (insn & (1 << 22)) {
951 /* immediate */
952 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
953 if (!(insn & (1 << 23)))
954 val = -val;
18acad92 955 val += extra;
537730b9 956 if (val != 0)
b0109805 957 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
958 } else {
959 /* register */
191f9a93 960 if (extra)
b0109805 961 tcg_gen_addi_i32(var, var, extra);
2c0262af 962 rm = (insn) & 0xf;
b26eefb6 963 offset = load_reg(s, rm);
2c0262af 964 if (!(insn & (1 << 23)))
b0109805 965 tcg_gen_sub_i32(var, var, offset);
2c0262af 966 else
b0109805 967 tcg_gen_add_i32(var, var, offset);
b26eefb6 968 dead_tmp(offset);
2c0262af
FB
969 }
970}
971
4373f3ce
PB
972#define VFP_OP2(name) \
973static inline void gen_vfp_##name(int dp) \
974{ \
975 if (dp) \
976 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
977 else \
978 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
979}
980
4373f3ce
PB
981VFP_OP2(add)
982VFP_OP2(sub)
983VFP_OP2(mul)
984VFP_OP2(div)
985
986#undef VFP_OP2
987
988static inline void gen_vfp_abs(int dp)
989{
990 if (dp)
991 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
992 else
993 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
994}
995
996static inline void gen_vfp_neg(int dp)
997{
998 if (dp)
999 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1000 else
1001 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1002}
1003
1004static inline void gen_vfp_sqrt(int dp)
1005{
1006 if (dp)
1007 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1008 else
1009 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1010}
1011
1012static inline void gen_vfp_cmp(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1016 else
1017 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1018}
1019
1020static inline void gen_vfp_cmpe(int dp)
1021{
1022 if (dp)
1023 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1024 else
1025 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1026}
1027
1028static inline void gen_vfp_F1_ld0(int dp)
1029{
1030 if (dp)
5b340b51 1031 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1032 else
5b340b51 1033 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1034}
1035
1036static inline void gen_vfp_uito(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1040 else
1041 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1042}
1043
1044static inline void gen_vfp_sito(int dp)
1045{
1046 if (dp)
66230e0d 1047 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1048 else
66230e0d 1049 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1050}
1051
1052static inline void gen_vfp_toui(int dp)
1053{
1054 if (dp)
1055 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1056 else
1057 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1058}
1059
1060static inline void gen_vfp_touiz(int dp)
1061{
1062 if (dp)
1063 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1064 else
1065 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1066}
1067
1068static inline void gen_vfp_tosi(int dp)
1069{
1070 if (dp)
1071 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1072 else
1073 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1074}
1075
1076static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1077{
1078 if (dp)
4373f3ce 1079 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1080 else
4373f3ce
PB
1081 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1082}
1083
1084#define VFP_GEN_FIX(name) \
1085static inline void gen_vfp_##name(int dp, int shift) \
1086{ \
1087 if (dp) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1089 else \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1091}
4373f3ce
PB
1092VFP_GEN_FIX(tosh)
1093VFP_GEN_FIX(tosl)
1094VFP_GEN_FIX(touh)
1095VFP_GEN_FIX(toul)
1096VFP_GEN_FIX(shto)
1097VFP_GEN_FIX(slto)
1098VFP_GEN_FIX(uhto)
1099VFP_GEN_FIX(ulto)
1100#undef VFP_GEN_FIX
9ee6e8bb 1101
b5ff1b31
FB
1102static inline void gen_vfp_ld(DisasContext *s, int dp)
1103{
1104 if (dp)
4373f3ce 1105 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1106 else
4373f3ce 1107 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1108}
1109
1110static inline void gen_vfp_st(DisasContext *s, int dp)
1111{
1112 if (dp)
4373f3ce 1113 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1114 else
4373f3ce 1115 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1116}
1117
8e96005d
FB
1118static inline long
1119vfp_reg_offset (int dp, int reg)
1120{
1121 if (dp)
1122 return offsetof(CPUARMState, vfp.regs[reg]);
1123 else if (reg & 1) {
1124 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1125 + offsetof(CPU_DoubleU, l.upper);
1126 } else {
1127 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1128 + offsetof(CPU_DoubleU, l.lower);
1129 }
1130}
9ee6e8bb
PB
1131
1132/* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1134static inline long
1135neon_reg_offset (int reg, int n)
1136{
1137 int sreg;
1138 sreg = reg * 2 + n;
1139 return vfp_reg_offset(0, sreg);
1140}
1141
ad69471c
PB
1142/* FIXME: Remove these. */
1143#define neon_T0 cpu_T[0]
1144#define neon_T1 cpu_T[1]
1145#define NEON_GET_REG(T, reg, n) \
1146 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1147#define NEON_SET_REG(T, reg, n) \
1148 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1149
8f8e3aa4
PB
1150static TCGv neon_load_reg(int reg, int pass)
1151{
1152 TCGv tmp = new_tmp();
1153 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1154 return tmp;
1155}
1156
1157static void neon_store_reg(int reg, int pass, TCGv var)
1158{
1159 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1160 dead_tmp(var);
1161}
1162
a7812ae4 1163static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1164{
1165 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1166}
1167
a7812ae4 1168static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1169{
1170 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1171}
1172
4373f3ce
PB
1173#define tcg_gen_ld_f32 tcg_gen_ld_i32
1174#define tcg_gen_ld_f64 tcg_gen_ld_i64
1175#define tcg_gen_st_f32 tcg_gen_st_i32
1176#define tcg_gen_st_f64 tcg_gen_st_i64
1177
b7bcbe95
FB
1178static inline void gen_mov_F0_vreg(int dp, int reg)
1179{
1180 if (dp)
4373f3ce 1181 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1182 else
4373f3ce 1183 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1184}
1185
1186static inline void gen_mov_F1_vreg(int dp, int reg)
1187{
1188 if (dp)
4373f3ce 1189 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1190 else
4373f3ce 1191 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1192}
1193
1194static inline void gen_mov_vreg_F0(int dp, int reg)
1195{
1196 if (dp)
4373f3ce 1197 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1198 else
4373f3ce 1199 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1200}
1201
18c9b560
AZ
1202#define ARM_CP_RW_BIT (1 << 20)
1203
a7812ae4 1204static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1205{
1206 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1207}
1208
a7812ae4 1209static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1210{
1211 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1212}
1213
1214static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1215{
1216 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1217}
1218
1219static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1220{
1221 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1222}
1223
1224static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1225{
1226 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1227}
1228
1229static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1230{
1231 iwmmxt_store_reg(cpu_M0, rn);
1232}
1233
1234static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1235{
1236 iwmmxt_load_reg(cpu_M0, rn);
1237}
1238
1239static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1240{
1241 iwmmxt_load_reg(cpu_V1, rn);
1242 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1243}
1244
1245static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1246{
1247 iwmmxt_load_reg(cpu_V1, rn);
1248 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1249}
1250
1251static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1252{
1253 iwmmxt_load_reg(cpu_V1, rn);
1254 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1255}
1256
1257#define IWMMXT_OP(name) \
1258static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1259{ \
1260 iwmmxt_load_reg(cpu_V1, rn); \
1261 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1262}
1263
1264#define IWMMXT_OP_ENV(name) \
1265static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1266{ \
1267 iwmmxt_load_reg(cpu_V1, rn); \
1268 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1269}
1270
1271#define IWMMXT_OP_ENV_SIZE(name) \
1272IWMMXT_OP_ENV(name##b) \
1273IWMMXT_OP_ENV(name##w) \
1274IWMMXT_OP_ENV(name##l)
1275
1276#define IWMMXT_OP_ENV1(name) \
1277static inline void gen_op_iwmmxt_##name##_M0(void) \
1278{ \
1279 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1280}
1281
1282IWMMXT_OP(maddsq)
1283IWMMXT_OP(madduq)
1284IWMMXT_OP(sadb)
1285IWMMXT_OP(sadw)
1286IWMMXT_OP(mulslw)
1287IWMMXT_OP(mulshw)
1288IWMMXT_OP(mululw)
1289IWMMXT_OP(muluhw)
1290IWMMXT_OP(macsw)
1291IWMMXT_OP(macuw)
1292
1293IWMMXT_OP_ENV_SIZE(unpackl)
1294IWMMXT_OP_ENV_SIZE(unpackh)
1295
1296IWMMXT_OP_ENV1(unpacklub)
1297IWMMXT_OP_ENV1(unpackluw)
1298IWMMXT_OP_ENV1(unpacklul)
1299IWMMXT_OP_ENV1(unpackhub)
1300IWMMXT_OP_ENV1(unpackhuw)
1301IWMMXT_OP_ENV1(unpackhul)
1302IWMMXT_OP_ENV1(unpacklsb)
1303IWMMXT_OP_ENV1(unpacklsw)
1304IWMMXT_OP_ENV1(unpacklsl)
1305IWMMXT_OP_ENV1(unpackhsb)
1306IWMMXT_OP_ENV1(unpackhsw)
1307IWMMXT_OP_ENV1(unpackhsl)
1308
1309IWMMXT_OP_ENV_SIZE(cmpeq)
1310IWMMXT_OP_ENV_SIZE(cmpgtu)
1311IWMMXT_OP_ENV_SIZE(cmpgts)
1312
1313IWMMXT_OP_ENV_SIZE(mins)
1314IWMMXT_OP_ENV_SIZE(minu)
1315IWMMXT_OP_ENV_SIZE(maxs)
1316IWMMXT_OP_ENV_SIZE(maxu)
1317
1318IWMMXT_OP_ENV_SIZE(subn)
1319IWMMXT_OP_ENV_SIZE(addn)
1320IWMMXT_OP_ENV_SIZE(subu)
1321IWMMXT_OP_ENV_SIZE(addu)
1322IWMMXT_OP_ENV_SIZE(subs)
1323IWMMXT_OP_ENV_SIZE(adds)
1324
1325IWMMXT_OP_ENV(avgb0)
1326IWMMXT_OP_ENV(avgb1)
1327IWMMXT_OP_ENV(avgw0)
1328IWMMXT_OP_ENV(avgw1)
1329
1330IWMMXT_OP(msadb)
1331
1332IWMMXT_OP_ENV(packuw)
1333IWMMXT_OP_ENV(packul)
1334IWMMXT_OP_ENV(packuq)
1335IWMMXT_OP_ENV(packsw)
1336IWMMXT_OP_ENV(packsl)
1337IWMMXT_OP_ENV(packsq)
1338
1339static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1340{
1341 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1342}
1343
1344static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1345{
1346 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1347}
1348
1349static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1350{
1351 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1352}
1353
1354static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1355{
1356 iwmmxt_load_reg(cpu_V1, rn);
1357 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1358}
1359
1360static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1361{
1362 TCGv tmp = tcg_const_i32(shift);
1363 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1364}
1365
1366static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1367{
1368 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1369 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1370 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1371}
1372
1373static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1374{
1375 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1376 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1377 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1378}
1379
1380static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1381{
1382 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1383 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1384 if (mask != ~0u)
1385 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1386}
1387
1388static void gen_op_iwmmxt_set_mup(void)
1389{
1390 TCGv tmp;
1391 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1392 tcg_gen_ori_i32(tmp, tmp, 2);
1393 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1394}
1395
1396static void gen_op_iwmmxt_set_cup(void)
1397{
1398 TCGv tmp;
1399 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1400 tcg_gen_ori_i32(tmp, tmp, 1);
1401 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1402}
1403
1404static void gen_op_iwmmxt_setpsr_nz(void)
1405{
1406 TCGv tmp = new_tmp();
1407 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1408 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1409}
1410
1411static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1412{
1413 iwmmxt_load_reg(cpu_V1, rn);
86831435 1414 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1415 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1416}
1417
1418
1419static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1420{
1421 iwmmxt_load_reg(cpu_V0, rn);
1422 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1423 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1424 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1425}
1426
1427static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1428{
36aa55dc 1429 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1430 iwmmxt_store_reg(cpu_V0, rn);
1431}
1432
18c9b560
AZ
1433static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1434{
1435 int rd;
1436 uint32_t offset;
1437
1438 rd = (insn >> 16) & 0xf;
1439 gen_movl_T1_reg(s, rd);
1440
1441 offset = (insn & 0xff) << ((insn >> 7) & 2);
1442 if (insn & (1 << 24)) {
1443 /* Pre indexed */
1444 if (insn & (1 << 23))
1445 gen_op_addl_T1_im(offset);
1446 else
1447 gen_op_addl_T1_im(-offset);
1448
1449 if (insn & (1 << 21))
1450 gen_movl_reg_T1(s, rd);
1451 } else if (insn & (1 << 21)) {
1452 /* Post indexed */
1453 if (insn & (1 << 23))
1454 gen_op_movl_T0_im(offset);
1455 else
1456 gen_op_movl_T0_im(- offset);
1457 gen_op_addl_T0_T1();
1458 gen_movl_reg_T0(s, rd);
1459 } else if (!(insn & (1 << 23)))
1460 return 1;
1461 return 0;
1462}
1463
1464static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1465{
1466 int rd = (insn >> 0) & 0xf;
1467
1468 if (insn & (1 << 8))
1469 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1470 return 1;
1471 else
1472 gen_op_iwmmxt_movl_T0_wCx(rd);
1473 else
e677137d 1474 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1475
1476 gen_op_movl_T1_im(mask);
1477 gen_op_andl_T0_T1();
1478 return 0;
1479}
1480
1481/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1482 (ie. an undefined instruction). */
1483static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1484{
1485 int rd, wrd;
1486 int rdhi, rdlo, rd0, rd1, i;
b0109805 1487 TCGv tmp;
18c9b560
AZ
1488
1489 if ((insn & 0x0e000e00) == 0x0c000000) {
1490 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1491 wrd = insn & 0xf;
1492 rdlo = (insn >> 12) & 0xf;
1493 rdhi = (insn >> 16) & 0xf;
1494 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1495 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1496 gen_movl_reg_T0(s, rdlo);
1497 gen_movl_reg_T1(s, rdhi);
1498 } else { /* TMCRR */
1499 gen_movl_T0_reg(s, rdlo);
1500 gen_movl_T1_reg(s, rdhi);
e677137d 1501 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1502 gen_op_iwmmxt_set_mup();
1503 }
1504 return 0;
1505 }
1506
1507 wrd = (insn >> 12) & 0xf;
1508 if (gen_iwmmxt_address(s, insn))
1509 return 1;
1510 if (insn & ARM_CP_RW_BIT) {
1511 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1512 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1513 tcg_gen_mov_i32(cpu_T[0], tmp);
1514 dead_tmp(tmp);
18c9b560
AZ
1515 gen_op_iwmmxt_movl_wCx_T0(wrd);
1516 } else {
e677137d
PB
1517 i = 1;
1518 if (insn & (1 << 8)) {
1519 if (insn & (1 << 22)) { /* WLDRD */
1520 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1521 i = 0;
1522 } else { /* WLDRW wRd */
1523 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1524 }
1525 } else {
1526 if (insn & (1 << 22)) { /* WLDRH */
1527 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1528 } else { /* WLDRB */
1529 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1530 }
1531 }
1532 if (i) {
1533 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1534 dead_tmp(tmp);
1535 }
18c9b560
AZ
1536 gen_op_iwmmxt_movq_wRn_M0(wrd);
1537 }
1538 } else {
1539 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1540 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1541 tmp = new_tmp();
1542 tcg_gen_mov_i32(tmp, cpu_T[0]);
1543 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1544 } else {
1545 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1546 tmp = new_tmp();
1547 if (insn & (1 << 8)) {
1548 if (insn & (1 << 22)) { /* WSTRD */
1549 dead_tmp(tmp);
1550 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1551 } else { /* WSTRW wRd */
1552 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1553 gen_st32(tmp, cpu_T[1], IS_USER(s));
1554 }
1555 } else {
1556 if (insn & (1 << 22)) { /* WSTRH */
1557 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1558 gen_st16(tmp, cpu_T[1], IS_USER(s));
1559 } else { /* WSTRB */
1560 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1561 gen_st8(tmp, cpu_T[1], IS_USER(s));
1562 }
1563 }
18c9b560
AZ
1564 }
1565 }
1566 return 0;
1567 }
1568
1569 if ((insn & 0x0f000000) != 0x0e000000)
1570 return 1;
1571
1572 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1573 case 0x000: /* WOR */
1574 wrd = (insn >> 12) & 0xf;
1575 rd0 = (insn >> 0) & 0xf;
1576 rd1 = (insn >> 16) & 0xf;
1577 gen_op_iwmmxt_movq_M0_wRn(rd0);
1578 gen_op_iwmmxt_orq_M0_wRn(rd1);
1579 gen_op_iwmmxt_setpsr_nz();
1580 gen_op_iwmmxt_movq_wRn_M0(wrd);
1581 gen_op_iwmmxt_set_mup();
1582 gen_op_iwmmxt_set_cup();
1583 break;
1584 case 0x011: /* TMCR */
1585 if (insn & 0xf)
1586 return 1;
1587 rd = (insn >> 12) & 0xf;
1588 wrd = (insn >> 16) & 0xf;
1589 switch (wrd) {
1590 case ARM_IWMMXT_wCID:
1591 case ARM_IWMMXT_wCASF:
1592 break;
1593 case ARM_IWMMXT_wCon:
1594 gen_op_iwmmxt_set_cup();
1595 /* Fall through. */
1596 case ARM_IWMMXT_wCSSF:
1597 gen_op_iwmmxt_movl_T0_wCx(wrd);
1598 gen_movl_T1_reg(s, rd);
1599 gen_op_bicl_T0_T1();
1600 gen_op_iwmmxt_movl_wCx_T0(wrd);
1601 break;
1602 case ARM_IWMMXT_wCGR0:
1603 case ARM_IWMMXT_wCGR1:
1604 case ARM_IWMMXT_wCGR2:
1605 case ARM_IWMMXT_wCGR3:
1606 gen_op_iwmmxt_set_cup();
1607 gen_movl_reg_T0(s, rd);
1608 gen_op_iwmmxt_movl_wCx_T0(wrd);
1609 break;
1610 default:
1611 return 1;
1612 }
1613 break;
1614 case 0x100: /* WXOR */
1615 wrd = (insn >> 12) & 0xf;
1616 rd0 = (insn >> 0) & 0xf;
1617 rd1 = (insn >> 16) & 0xf;
1618 gen_op_iwmmxt_movq_M0_wRn(rd0);
1619 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1620 gen_op_iwmmxt_setpsr_nz();
1621 gen_op_iwmmxt_movq_wRn_M0(wrd);
1622 gen_op_iwmmxt_set_mup();
1623 gen_op_iwmmxt_set_cup();
1624 break;
1625 case 0x111: /* TMRC */
1626 if (insn & 0xf)
1627 return 1;
1628 rd = (insn >> 12) & 0xf;
1629 wrd = (insn >> 16) & 0xf;
1630 gen_op_iwmmxt_movl_T0_wCx(wrd);
1631 gen_movl_reg_T0(s, rd);
1632 break;
1633 case 0x300: /* WANDN */
1634 wrd = (insn >> 12) & 0xf;
1635 rd0 = (insn >> 0) & 0xf;
1636 rd1 = (insn >> 16) & 0xf;
1637 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1638 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1639 gen_op_iwmmxt_andq_M0_wRn(rd1);
1640 gen_op_iwmmxt_setpsr_nz();
1641 gen_op_iwmmxt_movq_wRn_M0(wrd);
1642 gen_op_iwmmxt_set_mup();
1643 gen_op_iwmmxt_set_cup();
1644 break;
1645 case 0x200: /* WAND */
1646 wrd = (insn >> 12) & 0xf;
1647 rd0 = (insn >> 0) & 0xf;
1648 rd1 = (insn >> 16) & 0xf;
1649 gen_op_iwmmxt_movq_M0_wRn(rd0);
1650 gen_op_iwmmxt_andq_M0_wRn(rd1);
1651 gen_op_iwmmxt_setpsr_nz();
1652 gen_op_iwmmxt_movq_wRn_M0(wrd);
1653 gen_op_iwmmxt_set_mup();
1654 gen_op_iwmmxt_set_cup();
1655 break;
1656 case 0x810: case 0xa10: /* WMADD */
1657 wrd = (insn >> 12) & 0xf;
1658 rd0 = (insn >> 0) & 0xf;
1659 rd1 = (insn >> 16) & 0xf;
1660 gen_op_iwmmxt_movq_M0_wRn(rd0);
1661 if (insn & (1 << 21))
1662 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1663 else
1664 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 break;
1668 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1669 wrd = (insn >> 12) & 0xf;
1670 rd0 = (insn >> 16) & 0xf;
1671 rd1 = (insn >> 0) & 0xf;
1672 gen_op_iwmmxt_movq_M0_wRn(rd0);
1673 switch ((insn >> 22) & 3) {
1674 case 0:
1675 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1676 break;
1677 case 1:
1678 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1679 break;
1680 case 2:
1681 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1682 break;
1683 case 3:
1684 return 1;
1685 }
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 gen_op_iwmmxt_set_cup();
1689 break;
1690 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1691 wrd = (insn >> 12) & 0xf;
1692 rd0 = (insn >> 16) & 0xf;
1693 rd1 = (insn >> 0) & 0xf;
1694 gen_op_iwmmxt_movq_M0_wRn(rd0);
1695 switch ((insn >> 22) & 3) {
1696 case 0:
1697 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1698 break;
1699 case 1:
1700 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1701 break;
1702 case 2:
1703 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1704 break;
1705 case 3:
1706 return 1;
1707 }
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 gen_op_iwmmxt_set_cup();
1711 break;
1712 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1713 wrd = (insn >> 12) & 0xf;
1714 rd0 = (insn >> 16) & 0xf;
1715 rd1 = (insn >> 0) & 0xf;
1716 gen_op_iwmmxt_movq_M0_wRn(rd0);
1717 if (insn & (1 << 22))
1718 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1719 else
1720 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1721 if (!(insn & (1 << 20)))
1722 gen_op_iwmmxt_addl_M0_wRn(wrd);
1723 gen_op_iwmmxt_movq_wRn_M0(wrd);
1724 gen_op_iwmmxt_set_mup();
1725 break;
1726 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1731 if (insn & (1 << 21)) {
1732 if (insn & (1 << 20))
1733 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1736 } else {
1737 if (insn & (1 << 20))
1738 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1739 else
1740 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1741 }
18c9b560
AZ
1742 gen_op_iwmmxt_movq_wRn_M0(wrd);
1743 gen_op_iwmmxt_set_mup();
1744 break;
1745 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1746 wrd = (insn >> 12) & 0xf;
1747 rd0 = (insn >> 16) & 0xf;
1748 rd1 = (insn >> 0) & 0xf;
1749 gen_op_iwmmxt_movq_M0_wRn(rd0);
1750 if (insn & (1 << 21))
1751 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1752 else
1753 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1754 if (!(insn & (1 << 20))) {
e677137d
PB
1755 iwmmxt_load_reg(cpu_V1, wrd);
1756 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1757 }
1758 gen_op_iwmmxt_movq_wRn_M0(wrd);
1759 gen_op_iwmmxt_set_mup();
1760 break;
1761 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1762 wrd = (insn >> 12) & 0xf;
1763 rd0 = (insn >> 16) & 0xf;
1764 rd1 = (insn >> 0) & 0xf;
1765 gen_op_iwmmxt_movq_M0_wRn(rd0);
1766 switch ((insn >> 22) & 3) {
1767 case 0:
1768 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1769 break;
1770 case 1:
1771 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1772 break;
1773 case 2:
1774 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1775 break;
1776 case 3:
1777 return 1;
1778 }
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 gen_op_iwmmxt_set_cup();
1782 break;
1783 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1784 wrd = (insn >> 12) & 0xf;
1785 rd0 = (insn >> 16) & 0xf;
1786 rd1 = (insn >> 0) & 0xf;
1787 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1788 if (insn & (1 << 22)) {
1789 if (insn & (1 << 20))
1790 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1791 else
1792 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1793 } else {
1794 if (insn & (1 << 20))
1795 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1796 else
1797 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1798 }
18c9b560
AZ
1799 gen_op_iwmmxt_movq_wRn_M0(wrd);
1800 gen_op_iwmmxt_set_mup();
1801 gen_op_iwmmxt_set_cup();
1802 break;
1803 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1804 wrd = (insn >> 12) & 0xf;
1805 rd0 = (insn >> 16) & 0xf;
1806 rd1 = (insn >> 0) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1809 gen_op_movl_T1_im(7);
1810 gen_op_andl_T0_T1();
1811 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1812 gen_op_iwmmxt_movq_wRn_M0(wrd);
1813 gen_op_iwmmxt_set_mup();
1814 break;
1815 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1816 rd = (insn >> 12) & 0xf;
1817 wrd = (insn >> 16) & 0xf;
1818 gen_movl_T0_reg(s, rd);
1819 gen_op_iwmmxt_movq_M0_wRn(wrd);
1820 switch ((insn >> 6) & 3) {
1821 case 0:
1822 gen_op_movl_T1_im(0xff);
1823 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1824 break;
1825 case 1:
1826 gen_op_movl_T1_im(0xffff);
1827 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1828 break;
1829 case 2:
1830 gen_op_movl_T1_im(0xffffffff);
1831 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1832 break;
1833 case 3:
1834 return 1;
1835 }
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 break;
1839 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1840 rd = (insn >> 12) & 0xf;
1841 wrd = (insn >> 16) & 0xf;
1842 if (rd == 15)
1843 return 1;
1844 gen_op_iwmmxt_movq_M0_wRn(wrd);
1845 switch ((insn >> 22) & 3) {
1846 case 0:
1847 if (insn & 8)
1848 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1849 else {
e677137d 1850 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1851 }
1852 break;
1853 case 1:
1854 if (insn & 8)
1855 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1856 else {
e677137d 1857 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1858 }
1859 break;
1860 case 2:
e677137d 1861 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1862 break;
1863 case 3:
1864 return 1;
1865 }
b26eefb6 1866 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1867 break;
1868 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1869 if ((insn & 0x000ff008) != 0x0003f000)
1870 return 1;
1871 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1872 switch ((insn >> 22) & 3) {
1873 case 0:
1874 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1875 break;
1876 case 1:
1877 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1878 break;
1879 case 2:
1880 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1881 break;
1882 case 3:
1883 return 1;
1884 }
1885 gen_op_shll_T1_im(28);
d9ba4830 1886 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1887 break;
1888 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1889 rd = (insn >> 12) & 0xf;
1890 wrd = (insn >> 16) & 0xf;
1891 gen_movl_T0_reg(s, rd);
1892 switch ((insn >> 6) & 3) {
1893 case 0:
e677137d 1894 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1895 break;
1896 case 1:
e677137d 1897 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1898 break;
1899 case 2:
e677137d 1900 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1901 break;
1902 case 3:
1903 return 1;
1904 }
1905 gen_op_iwmmxt_movq_wRn_M0(wrd);
1906 gen_op_iwmmxt_set_mup();
1907 break;
1908 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1909 if ((insn & 0x000ff00f) != 0x0003f000)
1910 return 1;
1911 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1912 switch ((insn >> 22) & 3) {
1913 case 0:
1914 for (i = 0; i < 7; i ++) {
1915 gen_op_shll_T1_im(4);
1916 gen_op_andl_T0_T1();
1917 }
1918 break;
1919 case 1:
1920 for (i = 0; i < 3; i ++) {
1921 gen_op_shll_T1_im(8);
1922 gen_op_andl_T0_T1();
1923 }
1924 break;
1925 case 2:
1926 gen_op_shll_T1_im(16);
1927 gen_op_andl_T0_T1();
1928 break;
1929 case 3:
1930 return 1;
1931 }
d9ba4830 1932 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1933 break;
1934 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1935 wrd = (insn >> 12) & 0xf;
1936 rd0 = (insn >> 16) & 0xf;
1937 gen_op_iwmmxt_movq_M0_wRn(rd0);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
e677137d 1940 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1941 break;
1942 case 1:
e677137d 1943 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1944 break;
1945 case 2:
e677137d 1946 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1947 break;
1948 case 3:
1949 return 1;
1950 }
1951 gen_op_iwmmxt_movq_wRn_M0(wrd);
1952 gen_op_iwmmxt_set_mup();
1953 break;
1954 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1955 if ((insn & 0x000ff00f) != 0x0003f000)
1956 return 1;
1957 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1958 switch ((insn >> 22) & 3) {
1959 case 0:
1960 for (i = 0; i < 7; i ++) {
1961 gen_op_shll_T1_im(4);
1962 gen_op_orl_T0_T1();
1963 }
1964 break;
1965 case 1:
1966 for (i = 0; i < 3; i ++) {
1967 gen_op_shll_T1_im(8);
1968 gen_op_orl_T0_T1();
1969 }
1970 break;
1971 case 2:
1972 gen_op_shll_T1_im(16);
1973 gen_op_orl_T0_T1();
1974 break;
1975 case 3:
1976 return 1;
1977 }
d9ba4830 1978 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1979 break;
1980 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1981 rd = (insn >> 12) & 0xf;
1982 rd0 = (insn >> 16) & 0xf;
1983 if ((insn & 0xf) != 0)
1984 return 1;
1985 gen_op_iwmmxt_movq_M0_wRn(rd0);
1986 switch ((insn >> 22) & 3) {
1987 case 0:
e677137d 1988 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
1989 break;
1990 case 1:
e677137d 1991 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
1992 break;
1993 case 2:
e677137d 1994 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
1995 break;
1996 case 3:
1997 return 1;
1998 }
1999 gen_movl_reg_T0(s, rd);
2000 break;
2001 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2002 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2003 wrd = (insn >> 12) & 0xf;
2004 rd0 = (insn >> 16) & 0xf;
2005 rd1 = (insn >> 0) & 0xf;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 switch ((insn >> 22) & 3) {
2008 case 0:
2009 if (insn & (1 << 21))
2010 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2011 else
2012 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2013 break;
2014 case 1:
2015 if (insn & (1 << 21))
2016 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2017 else
2018 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2019 break;
2020 case 2:
2021 if (insn & (1 << 21))
2022 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2023 else
2024 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2025 break;
2026 case 3:
2027 return 1;
2028 }
2029 gen_op_iwmmxt_movq_wRn_M0(wrd);
2030 gen_op_iwmmxt_set_mup();
2031 gen_op_iwmmxt_set_cup();
2032 break;
2033 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2034 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
2040 if (insn & (1 << 21))
2041 gen_op_iwmmxt_unpacklsb_M0();
2042 else
2043 gen_op_iwmmxt_unpacklub_M0();
2044 break;
2045 case 1:
2046 if (insn & (1 << 21))
2047 gen_op_iwmmxt_unpacklsw_M0();
2048 else
2049 gen_op_iwmmxt_unpackluw_M0();
2050 break;
2051 case 2:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_unpacklsl_M0();
2054 else
2055 gen_op_iwmmxt_unpacklul_M0();
2056 break;
2057 case 3:
2058 return 1;
2059 }
2060 gen_op_iwmmxt_movq_wRn_M0(wrd);
2061 gen_op_iwmmxt_set_mup();
2062 gen_op_iwmmxt_set_cup();
2063 break;
2064 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2065 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2066 wrd = (insn >> 12) & 0xf;
2067 rd0 = (insn >> 16) & 0xf;
2068 gen_op_iwmmxt_movq_M0_wRn(rd0);
2069 switch ((insn >> 22) & 3) {
2070 case 0:
2071 if (insn & (1 << 21))
2072 gen_op_iwmmxt_unpackhsb_M0();
2073 else
2074 gen_op_iwmmxt_unpackhub_M0();
2075 break;
2076 case 1:
2077 if (insn & (1 << 21))
2078 gen_op_iwmmxt_unpackhsw_M0();
2079 else
2080 gen_op_iwmmxt_unpackhuw_M0();
2081 break;
2082 case 2:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_unpackhsl_M0();
2085 else
2086 gen_op_iwmmxt_unpackhul_M0();
2087 break;
2088 case 3:
2089 return 1;
2090 }
2091 gen_op_iwmmxt_movq_wRn_M0(wrd);
2092 gen_op_iwmmxt_set_mup();
2093 gen_op_iwmmxt_set_cup();
2094 break;
2095 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2096 case 0x214: case 0x614: case 0xa14: case 0xe14:
2097 wrd = (insn >> 12) & 0xf;
2098 rd0 = (insn >> 16) & 0xf;
2099 gen_op_iwmmxt_movq_M0_wRn(rd0);
2100 if (gen_iwmmxt_shift(insn, 0xff))
2101 return 1;
2102 switch ((insn >> 22) & 3) {
2103 case 0:
2104 return 1;
2105 case 1:
e677137d 2106 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2107 break;
2108 case 2:
e677137d 2109 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2110 break;
2111 case 3:
e677137d 2112 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2113 break;
2114 }
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2120 case 0x014: case 0x414: case 0x814: case 0xc14:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (gen_iwmmxt_shift(insn, 0xff))
2125 return 1;
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 return 1;
2129 case 1:
e677137d 2130 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2131 break;
2132 case 2:
e677137d 2133 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2134 break;
2135 case 3:
e677137d 2136 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2137 break;
2138 }
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2144 case 0x114: case 0x514: case 0x914: case 0xd14:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 if (gen_iwmmxt_shift(insn, 0xff))
2149 return 1;
2150 switch ((insn >> 22) & 3) {
2151 case 0:
2152 return 1;
2153 case 1:
e677137d 2154 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2155 break;
2156 case 2:
e677137d 2157 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2158 break;
2159 case 3:
e677137d 2160 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2161 break;
2162 }
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2166 break;
2167 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2168 case 0x314: case 0x714: case 0xb14: case 0xf14:
2169 wrd = (insn >> 12) & 0xf;
2170 rd0 = (insn >> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0);
2172 switch ((insn >> 22) & 3) {
2173 case 0:
2174 return 1;
2175 case 1:
2176 if (gen_iwmmxt_shift(insn, 0xf))
2177 return 1;
e677137d 2178 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2179 break;
2180 case 2:
2181 if (gen_iwmmxt_shift(insn, 0x1f))
2182 return 1;
e677137d 2183 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2184 break;
2185 case 3:
2186 if (gen_iwmmxt_shift(insn, 0x3f))
2187 return 1;
e677137d 2188 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2189 break;
2190 }
2191 gen_op_iwmmxt_movq_wRn_M0(wrd);
2192 gen_op_iwmmxt_set_mup();
2193 gen_op_iwmmxt_set_cup();
2194 break;
2195 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2196 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2197 wrd = (insn >> 12) & 0xf;
2198 rd0 = (insn >> 16) & 0xf;
2199 rd1 = (insn >> 0) & 0xf;
2200 gen_op_iwmmxt_movq_M0_wRn(rd0);
2201 switch ((insn >> 22) & 3) {
2202 case 0:
2203 if (insn & (1 << 21))
2204 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2205 else
2206 gen_op_iwmmxt_minub_M0_wRn(rd1);
2207 break;
2208 case 1:
2209 if (insn & (1 << 21))
2210 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2211 else
2212 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2213 break;
2214 case 2:
2215 if (insn & (1 << 21))
2216 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2217 else
2218 gen_op_iwmmxt_minul_M0_wRn(rd1);
2219 break;
2220 case 3:
2221 return 1;
2222 }
2223 gen_op_iwmmxt_movq_wRn_M0(wrd);
2224 gen_op_iwmmxt_set_mup();
2225 break;
2226 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2227 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 rd1 = (insn >> 0) & 0xf;
2231 gen_op_iwmmxt_movq_M0_wRn(rd0);
2232 switch ((insn >> 22) & 3) {
2233 case 0:
2234 if (insn & (1 << 21))
2235 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2236 else
2237 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2238 break;
2239 case 1:
2240 if (insn & (1 << 21))
2241 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2242 else
2243 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2244 break;
2245 case 2:
2246 if (insn & (1 << 21))
2247 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2248 else
2249 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2250 break;
2251 case 3:
2252 return 1;
2253 }
2254 gen_op_iwmmxt_movq_wRn_M0(wrd);
2255 gen_op_iwmmxt_set_mup();
2256 break;
2257 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2258 case 0x402: case 0x502: case 0x602: case 0x702:
2259 wrd = (insn >> 12) & 0xf;
2260 rd0 = (insn >> 16) & 0xf;
2261 rd1 = (insn >> 0) & 0xf;
2262 gen_op_iwmmxt_movq_M0_wRn(rd0);
2263 gen_op_movl_T0_im((insn >> 20) & 3);
2264 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2265 gen_op_iwmmxt_movq_wRn_M0(wrd);
2266 gen_op_iwmmxt_set_mup();
2267 break;
2268 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2269 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2270 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2271 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2272 wrd = (insn >> 12) & 0xf;
2273 rd0 = (insn >> 16) & 0xf;
2274 rd1 = (insn >> 0) & 0xf;
2275 gen_op_iwmmxt_movq_M0_wRn(rd0);
2276 switch ((insn >> 20) & 0xf) {
2277 case 0x0:
2278 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2279 break;
2280 case 0x1:
2281 gen_op_iwmmxt_subub_M0_wRn(rd1);
2282 break;
2283 case 0x3:
2284 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2285 break;
2286 case 0x4:
2287 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2288 break;
2289 case 0x5:
2290 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2291 break;
2292 case 0x7:
2293 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2294 break;
2295 case 0x8:
2296 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2297 break;
2298 case 0x9:
2299 gen_op_iwmmxt_subul_M0_wRn(rd1);
2300 break;
2301 case 0xb:
2302 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2303 break;
2304 default:
2305 return 1;
2306 }
2307 gen_op_iwmmxt_movq_wRn_M0(wrd);
2308 gen_op_iwmmxt_set_mup();
2309 gen_op_iwmmxt_set_cup();
2310 break;
2311 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2312 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2313 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2314 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2315 wrd = (insn >> 12) & 0xf;
2316 rd0 = (insn >> 16) & 0xf;
2317 gen_op_iwmmxt_movq_M0_wRn(rd0);
2318 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2319 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2320 gen_op_iwmmxt_movq_wRn_M0(wrd);
2321 gen_op_iwmmxt_set_mup();
2322 gen_op_iwmmxt_set_cup();
2323 break;
2324 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2325 case 0x418: case 0x518: case 0x618: case 0x718:
2326 case 0x818: case 0x918: case 0xa18: case 0xb18:
2327 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2328 wrd = (insn >> 12) & 0xf;
2329 rd0 = (insn >> 16) & 0xf;
2330 rd1 = (insn >> 0) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 switch ((insn >> 20) & 0xf) {
2333 case 0x0:
2334 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2335 break;
2336 case 0x1:
2337 gen_op_iwmmxt_addub_M0_wRn(rd1);
2338 break;
2339 case 0x3:
2340 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2341 break;
2342 case 0x4:
2343 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2344 break;
2345 case 0x5:
2346 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2347 break;
2348 case 0x7:
2349 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2350 break;
2351 case 0x8:
2352 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2353 break;
2354 case 0x9:
2355 gen_op_iwmmxt_addul_M0_wRn(rd1);
2356 break;
2357 case 0xb:
2358 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2359 break;
2360 default:
2361 return 1;
2362 }
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2368 case 0x408: case 0x508: case 0x608: case 0x708:
2369 case 0x808: case 0x908: case 0xa08: case 0xb08:
2370 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 if (!(insn & (1 << 20)))
2376 return 1;
2377 switch ((insn >> 22) & 3) {
2378 case 0:
2379 return 1;
2380 case 1:
2381 if (insn & (1 << 21))
2382 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2383 else
2384 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2385 break;
2386 case 2:
2387 if (insn & (1 << 21))
2388 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2389 else
2390 gen_op_iwmmxt_packul_M0_wRn(rd1);
2391 break;
2392 case 3:
2393 if (insn & (1 << 21))
2394 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2395 else
2396 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2397 break;
2398 }
2399 gen_op_iwmmxt_movq_wRn_M0(wrd);
2400 gen_op_iwmmxt_set_mup();
2401 gen_op_iwmmxt_set_cup();
2402 break;
2403 case 0x201: case 0x203: case 0x205: case 0x207:
2404 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2405 case 0x211: case 0x213: case 0x215: case 0x217:
2406 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2407 wrd = (insn >> 5) & 0xf;
2408 rd0 = (insn >> 12) & 0xf;
2409 rd1 = (insn >> 0) & 0xf;
2410 if (rd0 == 0xf || rd1 == 0xf)
2411 return 1;
2412 gen_op_iwmmxt_movq_M0_wRn(wrd);
2413 switch ((insn >> 16) & 0xf) {
2414 case 0x0: /* TMIA */
b26eefb6
PB
2415 gen_movl_T0_reg(s, rd0);
2416 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2417 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2418 break;
2419 case 0x8: /* TMIAPH */
b26eefb6
PB
2420 gen_movl_T0_reg(s, rd0);
2421 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2422 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2423 break;
2424 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2425 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2426 if (insn & (1 << 16))
2427 gen_op_shrl_T1_im(16);
2428 gen_op_movl_T0_T1();
b26eefb6 2429 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2430 if (insn & (1 << 17))
2431 gen_op_shrl_T1_im(16);
2432 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2433 break;
2434 default:
2435 return 1;
2436 }
2437 gen_op_iwmmxt_movq_wRn_M0(wrd);
2438 gen_op_iwmmxt_set_mup();
2439 break;
2440 default:
2441 return 1;
2442 }
2443
2444 return 0;
2445}
2446
2447/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2448 (ie. an undefined instruction). */
2449static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2450{
2451 int acc, rd0, rd1, rdhi, rdlo;
2452
2453 if ((insn & 0x0ff00f10) == 0x0e200010) {
2454 /* Multiply with Internal Accumulate Format */
2455 rd0 = (insn >> 12) & 0xf;
2456 rd1 = insn & 0xf;
2457 acc = (insn >> 5) & 7;
2458
2459 if (acc != 0)
2460 return 1;
2461
2462 switch ((insn >> 16) & 0xf) {
2463 case 0x0: /* MIA */
b26eefb6
PB
2464 gen_movl_T0_reg(s, rd0);
2465 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2466 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2467 break;
2468 case 0x8: /* MIAPH */
b26eefb6
PB
2469 gen_movl_T0_reg(s, rd0);
2470 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2471 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2472 break;
2473 case 0xc: /* MIABB */
2474 case 0xd: /* MIABT */
2475 case 0xe: /* MIATB */
2476 case 0xf: /* MIATT */
b26eefb6 2477 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2478 if (insn & (1 << 16))
2479 gen_op_shrl_T1_im(16);
2480 gen_op_movl_T0_T1();
b26eefb6 2481 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2482 if (insn & (1 << 17))
2483 gen_op_shrl_T1_im(16);
2484 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2485 break;
2486 default:
2487 return 1;
2488 }
2489
2490 gen_op_iwmmxt_movq_wRn_M0(acc);
2491 return 0;
2492 }
2493
2494 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2495 /* Internal Accumulator Access Format */
2496 rdhi = (insn >> 16) & 0xf;
2497 rdlo = (insn >> 12) & 0xf;
2498 acc = insn & 7;
2499
2500 if (acc != 0)
2501 return 1;
2502
2503 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2504 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2505 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2506 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2507 gen_op_andl_T0_T1();
b26eefb6 2508 gen_movl_reg_T0(s, rdhi);
18c9b560 2509 } else { /* MAR */
b26eefb6
PB
2510 gen_movl_T0_reg(s, rdlo);
2511 gen_movl_T1_reg(s, rdhi);
e677137d 2512 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2513 }
2514 return 0;
2515 }
2516
2517 return 1;
2518}
2519
c1713132
AZ
2520/* Disassemble system coprocessor instruction. Return nonzero if
2521 instruction is not defined. */
2522static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2523{
8984bd2e 2524 TCGv tmp;
c1713132
AZ
2525 uint32_t rd = (insn >> 12) & 0xf;
2526 uint32_t cp = (insn >> 8) & 0xf;
2527 if (IS_USER(s)) {
2528 return 1;
2529 }
2530
18c9b560 2531 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2532 if (!env->cp[cp].cp_read)
2533 return 1;
8984bd2e
PB
2534 gen_set_pc_im(s->pc);
2535 tmp = new_tmp();
2536 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2537 store_reg(s, rd, tmp);
c1713132
AZ
2538 } else {
2539 if (!env->cp[cp].cp_write)
2540 return 1;
8984bd2e
PB
2541 gen_set_pc_im(s->pc);
2542 tmp = load_reg(s, rd);
2543 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2544 dead_tmp(tmp);
c1713132
AZ
2545 }
2546 return 0;
2547}
2548
9ee6e8bb
PB
2549static int cp15_user_ok(uint32_t insn)
2550{
2551 int cpn = (insn >> 16) & 0xf;
2552 int cpm = insn & 0xf;
2553 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2554
2555 if (cpn == 13 && cpm == 0) {
2556 /* TLS register. */
2557 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2558 return 1;
2559 }
2560 if (cpn == 7) {
2561 /* ISB, DSB, DMB. */
2562 if ((cpm == 5 && op == 4)
2563 || (cpm == 10 && (op == 4 || op == 5)))
2564 return 1;
2565 }
2566 return 0;
2567}
2568
b5ff1b31
FB
2569/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2570 instruction is not defined. */
a90b7318 2571static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2572{
2573 uint32_t rd;
8984bd2e 2574 TCGv tmp;
b5ff1b31 2575
9ee6e8bb
PB
2576 /* M profile cores use memory mapped registers instead of cp15. */
2577 if (arm_feature(env, ARM_FEATURE_M))
2578 return 1;
2579
2580 if ((insn & (1 << 25)) == 0) {
2581 if (insn & (1 << 20)) {
2582 /* mrrc */
2583 return 1;
2584 }
2585 /* mcrr. Used for block cache operations, so implement as no-op. */
2586 return 0;
2587 }
2588 if ((insn & (1 << 4)) == 0) {
2589 /* cdp */
2590 return 1;
2591 }
2592 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2593 return 1;
2594 }
9332f9da
FB
2595 if ((insn & 0x0fff0fff) == 0x0e070f90
2596 || (insn & 0x0fff0fff) == 0x0e070f58) {
2597 /* Wait for interrupt. */
8984bd2e 2598 gen_set_pc_im(s->pc);
9ee6e8bb 2599 s->is_jmp = DISAS_WFI;
9332f9da
FB
2600 return 0;
2601 }
b5ff1b31 2602 rd = (insn >> 12) & 0xf;
18c9b560 2603 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2604 tmp = new_tmp();
2605 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2606 /* If the destination register is r15 then sets condition codes. */
2607 if (rd != 15)
8984bd2e
PB
2608 store_reg(s, rd, tmp);
2609 else
2610 dead_tmp(tmp);
b5ff1b31 2611 } else {
8984bd2e
PB
2612 tmp = load_reg(s, rd);
2613 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2614 dead_tmp(tmp);
a90b7318
AZ
2615 /* Normally we would always end the TB here, but Linux
2616 * arch/arm/mach-pxa/sleep.S expects two instructions following
2617 * an MMU enable to execute from cache. Imitate this behaviour. */
2618 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2619 (insn & 0x0fff0fff) != 0x0e010f10)
2620 gen_lookup_tb(s);
b5ff1b31 2621 }
b5ff1b31
FB
2622 return 0;
2623}
2624
9ee6e8bb
PB
2625#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2626#define VFP_SREG(insn, bigbit, smallbit) \
2627 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2628#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2629 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2630 reg = (((insn) >> (bigbit)) & 0x0f) \
2631 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2632 } else { \
2633 if (insn & (1 << (smallbit))) \
2634 return 1; \
2635 reg = ((insn) >> (bigbit)) & 0x0f; \
2636 }} while (0)
2637
2638#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2639#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2640#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2641#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2642#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2643#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2644
4373f3ce
PB
2645/* Move between integer and VFP cores. */
2646static TCGv gen_vfp_mrs(void)
2647{
2648 TCGv tmp = new_tmp();
2649 tcg_gen_mov_i32(tmp, cpu_F0s);
2650 return tmp;
2651}
2652
2653static void gen_vfp_msr(TCGv tmp)
2654{
2655 tcg_gen_mov_i32(cpu_F0s, tmp);
2656 dead_tmp(tmp);
2657}
2658
9ee6e8bb
PB
2659static inline int
2660vfp_enabled(CPUState * env)
2661{
2662 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2663}
2664
ad69471c
PB
2665static void gen_neon_dup_u8(TCGv var, int shift)
2666{
2667 TCGv tmp = new_tmp();
2668 if (shift)
2669 tcg_gen_shri_i32(var, var, shift);
86831435 2670 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2671 tcg_gen_shli_i32(tmp, var, 8);
2672 tcg_gen_or_i32(var, var, tmp);
2673 tcg_gen_shli_i32(tmp, var, 16);
2674 tcg_gen_or_i32(var, var, tmp);
2675 dead_tmp(tmp);
2676}
2677
2678static void gen_neon_dup_low16(TCGv var)
2679{
2680 TCGv tmp = new_tmp();
86831435 2681 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2682 tcg_gen_shli_i32(tmp, var, 16);
2683 tcg_gen_or_i32(var, var, tmp);
2684 dead_tmp(tmp);
2685}
2686
2687static void gen_neon_dup_high16(TCGv var)
2688{
2689 TCGv tmp = new_tmp();
2690 tcg_gen_andi_i32(var, var, 0xffff0000);
2691 tcg_gen_shri_i32(tmp, var, 16);
2692 tcg_gen_or_i32(var, var, tmp);
2693 dead_tmp(tmp);
2694}
2695
b7bcbe95
FB
2696/* Disassemble a VFP instruction. Returns nonzero if an error occured
2697 (ie. an undefined instruction). */
2698static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2699{
2700 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2701 int dp, veclen;
4373f3ce 2702 TCGv tmp;
ad69471c 2703 TCGv tmp2;
b7bcbe95 2704
40f137e1
PB
2705 if (!arm_feature(env, ARM_FEATURE_VFP))
2706 return 1;
2707
9ee6e8bb
PB
2708 if (!vfp_enabled(env)) {
2709 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2710 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2711 return 1;
2712 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2713 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2714 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2715 return 1;
2716 }
b7bcbe95
FB
2717 dp = ((insn & 0xf00) == 0xb00);
2718 switch ((insn >> 24) & 0xf) {
2719 case 0xe:
2720 if (insn & (1 << 4)) {
2721 /* single register transfer */
b7bcbe95
FB
2722 rd = (insn >> 12) & 0xf;
2723 if (dp) {
9ee6e8bb
PB
2724 int size;
2725 int pass;
2726
2727 VFP_DREG_N(rn, insn);
2728 if (insn & 0xf)
b7bcbe95 2729 return 1;
9ee6e8bb
PB
2730 if (insn & 0x00c00060
2731 && !arm_feature(env, ARM_FEATURE_NEON))
2732 return 1;
2733
2734 pass = (insn >> 21) & 1;
2735 if (insn & (1 << 22)) {
2736 size = 0;
2737 offset = ((insn >> 5) & 3) * 8;
2738 } else if (insn & (1 << 5)) {
2739 size = 1;
2740 offset = (insn & (1 << 6)) ? 16 : 0;
2741 } else {
2742 size = 2;
2743 offset = 0;
2744 }
18c9b560 2745 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2746 /* vfp->arm */
ad69471c 2747 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2748 switch (size) {
2749 case 0:
9ee6e8bb 2750 if (offset)
ad69471c 2751 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2752 if (insn & (1 << 23))
ad69471c 2753 gen_uxtb(tmp);
9ee6e8bb 2754 else
ad69471c 2755 gen_sxtb(tmp);
9ee6e8bb
PB
2756 break;
2757 case 1:
9ee6e8bb
PB
2758 if (insn & (1 << 23)) {
2759 if (offset) {
ad69471c 2760 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2761 } else {
ad69471c 2762 gen_uxth(tmp);
9ee6e8bb
PB
2763 }
2764 } else {
2765 if (offset) {
ad69471c 2766 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2767 } else {
ad69471c 2768 gen_sxth(tmp);
9ee6e8bb
PB
2769 }
2770 }
2771 break;
2772 case 2:
9ee6e8bb
PB
2773 break;
2774 }
ad69471c 2775 store_reg(s, rd, tmp);
b7bcbe95
FB
2776 } else {
2777 /* arm->vfp */
ad69471c 2778 tmp = load_reg(s, rd);
9ee6e8bb
PB
2779 if (insn & (1 << 23)) {
2780 /* VDUP */
2781 if (size == 0) {
ad69471c 2782 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2783 } else if (size == 1) {
ad69471c 2784 gen_neon_dup_low16(tmp);
9ee6e8bb 2785 }
ad69471c
PB
2786 tmp2 = new_tmp();
2787 tcg_gen_mov_i32(tmp2, tmp);
2788 neon_store_reg(rn, 0, tmp2);
3018f259 2789 neon_store_reg(rn, 1, tmp);
9ee6e8bb
PB
2790 } else {
2791 /* VMOV */
2792 switch (size) {
2793 case 0:
ad69471c
PB
2794 tmp2 = neon_load_reg(rn, pass);
2795 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2796 dead_tmp(tmp2);
9ee6e8bb
PB
2797 break;
2798 case 1:
ad69471c
PB
2799 tmp2 = neon_load_reg(rn, pass);
2800 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2801 dead_tmp(tmp2);
9ee6e8bb
PB
2802 break;
2803 case 2:
9ee6e8bb
PB
2804 break;
2805 }
ad69471c 2806 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2807 }
b7bcbe95 2808 }
9ee6e8bb
PB
2809 } else { /* !dp */
2810 if ((insn & 0x6f) != 0x00)
2811 return 1;
2812 rn = VFP_SREG_N(insn);
18c9b560 2813 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2814 /* vfp->arm */
2815 if (insn & (1 << 21)) {
2816 /* system register */
40f137e1 2817 rn >>= 1;
9ee6e8bb 2818
b7bcbe95 2819 switch (rn) {
40f137e1 2820 case ARM_VFP_FPSID:
4373f3ce 2821 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2822 VFP3 restricts all id registers to privileged
2823 accesses. */
2824 if (IS_USER(s)
2825 && arm_feature(env, ARM_FEATURE_VFP3))
2826 return 1;
4373f3ce 2827 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2828 break;
40f137e1 2829 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2830 if (IS_USER(s))
2831 return 1;
4373f3ce 2832 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2833 break;
40f137e1
PB
2834 case ARM_VFP_FPINST:
2835 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2836 /* Not present in VFP3. */
2837 if (IS_USER(s)
2838 || arm_feature(env, ARM_FEATURE_VFP3))
2839 return 1;
4373f3ce 2840 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2841 break;
40f137e1 2842 case ARM_VFP_FPSCR:
601d70b9 2843 if (rd == 15) {
4373f3ce
PB
2844 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2845 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2846 } else {
2847 tmp = new_tmp();
2848 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2849 }
b7bcbe95 2850 break;
9ee6e8bb
PB
2851 case ARM_VFP_MVFR0:
2852 case ARM_VFP_MVFR1:
2853 if (IS_USER(s)
2854 || !arm_feature(env, ARM_FEATURE_VFP3))
2855 return 1;
4373f3ce 2856 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2857 break;
b7bcbe95
FB
2858 default:
2859 return 1;
2860 }
2861 } else {
2862 gen_mov_F0_vreg(0, rn);
4373f3ce 2863 tmp = gen_vfp_mrs();
b7bcbe95
FB
2864 }
2865 if (rd == 15) {
b5ff1b31 2866 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2867 gen_set_nzcv(tmp);
2868 dead_tmp(tmp);
2869 } else {
2870 store_reg(s, rd, tmp);
2871 }
b7bcbe95
FB
2872 } else {
2873 /* arm->vfp */
4373f3ce 2874 tmp = load_reg(s, rd);
b7bcbe95 2875 if (insn & (1 << 21)) {
40f137e1 2876 rn >>= 1;
b7bcbe95
FB
2877 /* system register */
2878 switch (rn) {
40f137e1 2879 case ARM_VFP_FPSID:
9ee6e8bb
PB
2880 case ARM_VFP_MVFR0:
2881 case ARM_VFP_MVFR1:
b7bcbe95
FB
2882 /* Writes are ignored. */
2883 break;
40f137e1 2884 case ARM_VFP_FPSCR:
4373f3ce
PB
2885 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2886 dead_tmp(tmp);
b5ff1b31 2887 gen_lookup_tb(s);
b7bcbe95 2888 break;
40f137e1 2889 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2890 if (IS_USER(s))
2891 return 1;
4373f3ce 2892 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2893 gen_lookup_tb(s);
2894 break;
2895 case ARM_VFP_FPINST:
2896 case ARM_VFP_FPINST2:
4373f3ce 2897 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2898 break;
b7bcbe95
FB
2899 default:
2900 return 1;
2901 }
2902 } else {
4373f3ce 2903 gen_vfp_msr(tmp);
b7bcbe95
FB
2904 gen_mov_vreg_F0(0, rn);
2905 }
2906 }
2907 }
2908 } else {
2909 /* data processing */
2910 /* The opcode is in bits 23, 21, 20 and 6. */
2911 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2912 if (dp) {
2913 if (op == 15) {
2914 /* rn is opcode */
2915 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2916 } else {
2917 /* rn is register number */
9ee6e8bb 2918 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2919 }
2920
2921 if (op == 15 && (rn == 15 || rn > 17)) {
2922 /* Integer or single precision destination. */
9ee6e8bb 2923 rd = VFP_SREG_D(insn);
b7bcbe95 2924 } else {
9ee6e8bb 2925 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2926 }
2927
2928 if (op == 15 && (rn == 16 || rn == 17)) {
2929 /* Integer source. */
2930 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2931 } else {
9ee6e8bb 2932 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2933 }
2934 } else {
9ee6e8bb 2935 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2936 if (op == 15 && rn == 15) {
2937 /* Double precision destination. */
9ee6e8bb
PB
2938 VFP_DREG_D(rd, insn);
2939 } else {
2940 rd = VFP_SREG_D(insn);
2941 }
2942 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2943 }
2944
2945 veclen = env->vfp.vec_len;
2946 if (op == 15 && rn > 3)
2947 veclen = 0;
2948
2949 /* Shut up compiler warnings. */
2950 delta_m = 0;
2951 delta_d = 0;
2952 bank_mask = 0;
3b46e624 2953
b7bcbe95
FB
2954 if (veclen > 0) {
2955 if (dp)
2956 bank_mask = 0xc;
2957 else
2958 bank_mask = 0x18;
2959
2960 /* Figure out what type of vector operation this is. */
2961 if ((rd & bank_mask) == 0) {
2962 /* scalar */
2963 veclen = 0;
2964 } else {
2965 if (dp)
2966 delta_d = (env->vfp.vec_stride >> 1) + 1;
2967 else
2968 delta_d = env->vfp.vec_stride + 1;
2969
2970 if ((rm & bank_mask) == 0) {
2971 /* mixed scalar/vector */
2972 delta_m = 0;
2973 } else {
2974 /* vector */
2975 delta_m = delta_d;
2976 }
2977 }
2978 }
2979
2980 /* Load the initial operands. */
2981 if (op == 15) {
2982 switch (rn) {
2983 case 16:
2984 case 17:
2985 /* Integer source */
2986 gen_mov_F0_vreg(0, rm);
2987 break;
2988 case 8:
2989 case 9:
2990 /* Compare */
2991 gen_mov_F0_vreg(dp, rd);
2992 gen_mov_F1_vreg(dp, rm);
2993 break;
2994 case 10:
2995 case 11:
2996 /* Compare with zero */
2997 gen_mov_F0_vreg(dp, rd);
2998 gen_vfp_F1_ld0(dp);
2999 break;
9ee6e8bb
PB
3000 case 20:
3001 case 21:
3002 case 22:
3003 case 23:
3004 /* Source and destination the same. */
3005 gen_mov_F0_vreg(dp, rd);
3006 break;
b7bcbe95
FB
3007 default:
3008 /* One source operand. */
3009 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3010 break;
b7bcbe95
FB
3011 }
3012 } else {
3013 /* Two source operands. */
3014 gen_mov_F0_vreg(dp, rn);
3015 gen_mov_F1_vreg(dp, rm);
3016 }
3017
3018 for (;;) {
3019 /* Perform the calculation. */
3020 switch (op) {
3021 case 0: /* mac: fd + (fn * fm) */
3022 gen_vfp_mul(dp);
3023 gen_mov_F1_vreg(dp, rd);
3024 gen_vfp_add(dp);
3025 break;
3026 case 1: /* nmac: fd - (fn * fm) */
3027 gen_vfp_mul(dp);
3028 gen_vfp_neg(dp);
3029 gen_mov_F1_vreg(dp, rd);
3030 gen_vfp_add(dp);
3031 break;
3032 case 2: /* msc: -fd + (fn * fm) */
3033 gen_vfp_mul(dp);
3034 gen_mov_F1_vreg(dp, rd);
3035 gen_vfp_sub(dp);
3036 break;
3037 case 3: /* nmsc: -fd - (fn * fm) */
3038 gen_vfp_mul(dp);
b7bcbe95 3039 gen_vfp_neg(dp);
c9fb531a
PB
3040 gen_mov_F1_vreg(dp, rd);
3041 gen_vfp_sub(dp);
b7bcbe95
FB
3042 break;
3043 case 4: /* mul: fn * fm */
3044 gen_vfp_mul(dp);
3045 break;
3046 case 5: /* nmul: -(fn * fm) */
3047 gen_vfp_mul(dp);
3048 gen_vfp_neg(dp);
3049 break;
3050 case 6: /* add: fn + fm */
3051 gen_vfp_add(dp);
3052 break;
3053 case 7: /* sub: fn - fm */
3054 gen_vfp_sub(dp);
3055 break;
3056 case 8: /* div: fn / fm */
3057 gen_vfp_div(dp);
3058 break;
9ee6e8bb
PB
3059 case 14: /* fconst */
3060 if (!arm_feature(env, ARM_FEATURE_VFP3))
3061 return 1;
3062
3063 n = (insn << 12) & 0x80000000;
3064 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3065 if (dp) {
3066 if (i & 0x40)
3067 i |= 0x3f80;
3068 else
3069 i |= 0x4000;
3070 n |= i << 16;
4373f3ce 3071 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3072 } else {
3073 if (i & 0x40)
3074 i |= 0x780;
3075 else
3076 i |= 0x800;
3077 n |= i << 19;
5b340b51 3078 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3079 }
9ee6e8bb 3080 break;
b7bcbe95
FB
3081 case 15: /* extension space */
3082 switch (rn) {
3083 case 0: /* cpy */
3084 /* no-op */
3085 break;
3086 case 1: /* abs */
3087 gen_vfp_abs(dp);
3088 break;
3089 case 2: /* neg */
3090 gen_vfp_neg(dp);
3091 break;
3092 case 3: /* sqrt */
3093 gen_vfp_sqrt(dp);
3094 break;
3095 case 8: /* cmp */
3096 gen_vfp_cmp(dp);
3097 break;
3098 case 9: /* cmpe */
3099 gen_vfp_cmpe(dp);
3100 break;
3101 case 10: /* cmpz */
3102 gen_vfp_cmp(dp);
3103 break;
3104 case 11: /* cmpez */
3105 gen_vfp_F1_ld0(dp);
3106 gen_vfp_cmpe(dp);
3107 break;
3108 case 15: /* single<->double conversion */
3109 if (dp)
4373f3ce 3110 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3111 else
4373f3ce 3112 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3113 break;
3114 case 16: /* fuito */
3115 gen_vfp_uito(dp);
3116 break;
3117 case 17: /* fsito */
3118 gen_vfp_sito(dp);
3119 break;
9ee6e8bb
PB
3120 case 20: /* fshto */
3121 if (!arm_feature(env, ARM_FEATURE_VFP3))
3122 return 1;
3123 gen_vfp_shto(dp, rm);
3124 break;
3125 case 21: /* fslto */
3126 if (!arm_feature(env, ARM_FEATURE_VFP3))
3127 return 1;
3128 gen_vfp_slto(dp, rm);
3129 break;
3130 case 22: /* fuhto */
3131 if (!arm_feature(env, ARM_FEATURE_VFP3))
3132 return 1;
3133 gen_vfp_uhto(dp, rm);
3134 break;
3135 case 23: /* fulto */
3136 if (!arm_feature(env, ARM_FEATURE_VFP3))
3137 return 1;
3138 gen_vfp_ulto(dp, rm);
3139 break;
b7bcbe95
FB
3140 case 24: /* ftoui */
3141 gen_vfp_toui(dp);
3142 break;
3143 case 25: /* ftouiz */
3144 gen_vfp_touiz(dp);
3145 break;
3146 case 26: /* ftosi */
3147 gen_vfp_tosi(dp);
3148 break;
3149 case 27: /* ftosiz */
3150 gen_vfp_tosiz(dp);
3151 break;
9ee6e8bb
PB
3152 case 28: /* ftosh */
3153 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 return 1;
3155 gen_vfp_tosh(dp, rm);
3156 break;
3157 case 29: /* ftosl */
3158 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 return 1;
3160 gen_vfp_tosl(dp, rm);
3161 break;
3162 case 30: /* ftouh */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
3165 gen_vfp_touh(dp, rm);
3166 break;
3167 case 31: /* ftoul */
3168 if (!arm_feature(env, ARM_FEATURE_VFP3))
3169 return 1;
3170 gen_vfp_toul(dp, rm);
3171 break;
b7bcbe95
FB
3172 default: /* undefined */
3173 printf ("rn:%d\n", rn);
3174 return 1;
3175 }
3176 break;
3177 default: /* undefined */
3178 printf ("op:%d\n", op);
3179 return 1;
3180 }
3181
3182 /* Write back the result. */
3183 if (op == 15 && (rn >= 8 && rn <= 11))
3184 ; /* Comparison, do nothing. */
3185 else if (op == 15 && rn > 17)
3186 /* Integer result. */
3187 gen_mov_vreg_F0(0, rd);
3188 else if (op == 15 && rn == 15)
3189 /* conversion */
3190 gen_mov_vreg_F0(!dp, rd);
3191 else
3192 gen_mov_vreg_F0(dp, rd);
3193
3194 /* break out of the loop if we have finished */
3195 if (veclen == 0)
3196 break;
3197
3198 if (op == 15 && delta_m == 0) {
3199 /* single source one-many */
3200 while (veclen--) {
3201 rd = ((rd + delta_d) & (bank_mask - 1))
3202 | (rd & bank_mask);
3203 gen_mov_vreg_F0(dp, rd);
3204 }
3205 break;
3206 }
3207 /* Setup the next operands. */
3208 veclen--;
3209 rd = ((rd + delta_d) & (bank_mask - 1))
3210 | (rd & bank_mask);
3211
3212 if (op == 15) {
3213 /* One source operand. */
3214 rm = ((rm + delta_m) & (bank_mask - 1))
3215 | (rm & bank_mask);
3216 gen_mov_F0_vreg(dp, rm);
3217 } else {
3218 /* Two source operands. */
3219 rn = ((rn + delta_d) & (bank_mask - 1))
3220 | (rn & bank_mask);
3221 gen_mov_F0_vreg(dp, rn);
3222 if (delta_m) {
3223 rm = ((rm + delta_m) & (bank_mask - 1))
3224 | (rm & bank_mask);
3225 gen_mov_F1_vreg(dp, rm);
3226 }
3227 }
3228 }
3229 }
3230 break;
3231 case 0xc:
3232 case 0xd:
9ee6e8bb 3233 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3234 /* two-register transfer */
3235 rn = (insn >> 16) & 0xf;
3236 rd = (insn >> 12) & 0xf;
3237 if (dp) {
9ee6e8bb
PB
3238 VFP_DREG_M(rm, insn);
3239 } else {
3240 rm = VFP_SREG_M(insn);
3241 }
b7bcbe95 3242
18c9b560 3243 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3244 /* vfp->arm */
3245 if (dp) {
4373f3ce
PB
3246 gen_mov_F0_vreg(0, rm * 2);
3247 tmp = gen_vfp_mrs();
3248 store_reg(s, rd, tmp);
3249 gen_mov_F0_vreg(0, rm * 2 + 1);
3250 tmp = gen_vfp_mrs();
3251 store_reg(s, rn, tmp);
b7bcbe95
FB
3252 } else {
3253 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3254 tmp = gen_vfp_mrs();
3255 store_reg(s, rn, tmp);
b7bcbe95 3256 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3257 tmp = gen_vfp_mrs();
3258 store_reg(s, rd, tmp);
b7bcbe95
FB
3259 }
3260 } else {
3261 /* arm->vfp */
3262 if (dp) {
4373f3ce
PB
3263 tmp = load_reg(s, rd);
3264 gen_vfp_msr(tmp);
3265 gen_mov_vreg_F0(0, rm * 2);
3266 tmp = load_reg(s, rn);
3267 gen_vfp_msr(tmp);
3268 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3269 } else {
4373f3ce
PB
3270 tmp = load_reg(s, rn);
3271 gen_vfp_msr(tmp);
b7bcbe95 3272 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3273 tmp = load_reg(s, rd);
3274 gen_vfp_msr(tmp);
b7bcbe95
FB
3275 gen_mov_vreg_F0(0, rm + 1);
3276 }
3277 }
3278 } else {
3279 /* Load/store */
3280 rn = (insn >> 16) & 0xf;
3281 if (dp)
9ee6e8bb 3282 VFP_DREG_D(rd, insn);
b7bcbe95 3283 else
9ee6e8bb
PB
3284 rd = VFP_SREG_D(insn);
3285 if (s->thumb && rn == 15) {
3286 gen_op_movl_T1_im(s->pc & ~2);
3287 } else {
3288 gen_movl_T1_reg(s, rn);
3289 }
b7bcbe95
FB
3290 if ((insn & 0x01200000) == 0x01000000) {
3291 /* Single load/store */
3292 offset = (insn & 0xff) << 2;
3293 if ((insn & (1 << 23)) == 0)
3294 offset = -offset;
3295 gen_op_addl_T1_im(offset);
3296 if (insn & (1 << 20)) {
b5ff1b31 3297 gen_vfp_ld(s, dp);
b7bcbe95
FB
3298 gen_mov_vreg_F0(dp, rd);
3299 } else {
3300 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3301 gen_vfp_st(s, dp);
b7bcbe95
FB
3302 }
3303 } else {
3304 /* load/store multiple */
3305 if (dp)
3306 n = (insn >> 1) & 0x7f;
3307 else
3308 n = insn & 0xff;
3309
3310 if (insn & (1 << 24)) /* pre-decrement */
3311 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3312
3313 if (dp)
3314 offset = 8;
3315 else
3316 offset = 4;
3317 for (i = 0; i < n; i++) {
18c9b560 3318 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3319 /* load */
b5ff1b31 3320 gen_vfp_ld(s, dp);
b7bcbe95
FB
3321 gen_mov_vreg_F0(dp, rd + i);
3322 } else {
3323 /* store */
3324 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3325 gen_vfp_st(s, dp);
b7bcbe95
FB
3326 }
3327 gen_op_addl_T1_im(offset);
3328 }
3329 if (insn & (1 << 21)) {
3330 /* writeback */
3331 if (insn & (1 << 24))
3332 offset = -offset * n;
3333 else if (dp && (insn & 1))
3334 offset = 4;
3335 else
3336 offset = 0;
3337
3338 if (offset != 0)
3339 gen_op_addl_T1_im(offset);
3340 gen_movl_reg_T1(s, rn);
3341 }
3342 }
3343 }
3344 break;
3345 default:
3346 /* Should never happen. */
3347 return 1;
3348 }
3349 return 0;
3350}
3351
6e256c93 3352static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3353{
6e256c93
FB
3354 TranslationBlock *tb;
3355
3356 tb = s->tb;
3357 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3358 tcg_gen_goto_tb(n);
8984bd2e 3359 gen_set_pc_im(dest);
57fec1fe 3360 tcg_gen_exit_tb((long)tb + n);
6e256c93 3361 } else {
8984bd2e 3362 gen_set_pc_im(dest);
57fec1fe 3363 tcg_gen_exit_tb(0);
6e256c93 3364 }
c53be334
FB
3365}
3366
8aaca4c0
FB
3367static inline void gen_jmp (DisasContext *s, uint32_t dest)
3368{
551bd27f 3369 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3370 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3371 if (s->thumb)
d9ba4830
PB
3372 dest |= 1;
3373 gen_bx_im(s, dest);
8aaca4c0 3374 } else {
6e256c93 3375 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3376 s->is_jmp = DISAS_TB_JUMP;
3377 }
3378}
3379
d9ba4830 3380static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3381{
ee097184 3382 if (x)
d9ba4830 3383 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3384 else
d9ba4830 3385 gen_sxth(t0);
ee097184 3386 if (y)
d9ba4830 3387 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3388 else
d9ba4830
PB
3389 gen_sxth(t1);
3390 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3391}
3392
3393/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3394static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3395 uint32_t mask;
3396
3397 mask = 0;
3398 if (flags & (1 << 0))
3399 mask |= 0xff;
3400 if (flags & (1 << 1))
3401 mask |= 0xff00;
3402 if (flags & (1 << 2))
3403 mask |= 0xff0000;
3404 if (flags & (1 << 3))
3405 mask |= 0xff000000;
9ee6e8bb 3406
2ae23e75 3407 /* Mask out undefined bits. */
9ee6e8bb
PB
3408 mask &= ~CPSR_RESERVED;
3409 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3410 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3411 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3412 mask &= ~CPSR_IT;
9ee6e8bb 3413 /* Mask out execution state bits. */
2ae23e75 3414 if (!spsr)
e160c51c 3415 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3416 /* Mask out privileged bits. */
3417 if (IS_USER(s))
9ee6e8bb 3418 mask &= CPSR_USER;
b5ff1b31
FB
3419 return mask;
3420}
3421
3422/* Returns nonzero if access to the PSR is not permitted. */
3423static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3424{
d9ba4830 3425 TCGv tmp;
b5ff1b31
FB
3426 if (spsr) {
3427 /* ??? This is also undefined in system mode. */
3428 if (IS_USER(s))
3429 return 1;
d9ba4830
PB
3430
3431 tmp = load_cpu_field(spsr);
3432 tcg_gen_andi_i32(tmp, tmp, ~mask);
3433 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3434 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3435 store_cpu_field(tmp, spsr);
b5ff1b31 3436 } else {
d9ba4830 3437 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3438 }
3439 gen_lookup_tb(s);
3440 return 0;
3441}
3442
9ee6e8bb 3443/* Generate an old-style exception return. */
b5ff1b31
FB
3444static void gen_exception_return(DisasContext *s)
3445{
d9ba4830 3446 TCGv tmp;
e22f8f39 3447 gen_movl_reg_T0(s, 15);
d9ba4830
PB
3448 tmp = load_cpu_field(spsr);
3449 gen_set_cpsr(tmp, 0xffffffff);
3450 dead_tmp(tmp);
b5ff1b31
FB
3451 s->is_jmp = DISAS_UPDATE;
3452}
3453
b0109805
PB
3454/* Generate a v6 exception return. Marks both values as dead. */
3455static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3456{
b0109805
PB
3457 gen_set_cpsr(cpsr, 0xffffffff);
3458 dead_tmp(cpsr);
3459 store_reg(s, 15, pc);
9ee6e8bb
PB
3460 s->is_jmp = DISAS_UPDATE;
3461}
3b46e624 3462
9ee6e8bb
PB
3463static inline void
3464gen_set_condexec (DisasContext *s)
3465{
3466 if (s->condexec_mask) {
8f01245e
PB
3467 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3468 TCGv tmp = new_tmp();
3469 tcg_gen_movi_i32(tmp, val);
d9ba4830 3470 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3471 }
3472}
3b46e624 3473
9ee6e8bb
PB
3474static void gen_nop_hint(DisasContext *s, int val)
3475{
3476 switch (val) {
3477 case 3: /* wfi */
8984bd2e 3478 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3479 s->is_jmp = DISAS_WFI;
3480 break;
3481 case 2: /* wfe */
3482 case 4: /* sev */
3483 /* TODO: Implement SEV and WFE. May help SMP performance. */
3484 default: /* nop */
3485 break;
3486 }
3487}
99c475ab 3488
ad69471c
PB
3489/* These macros help make the code more readable when migrating from the
3490 old dyngen helpers. They should probably be removed when
3491 T0/T1 are removed. */
3492#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3493#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3494
ad69471c 3495#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3496
3497static inline int gen_neon_add(int size)
3498{
3499 switch (size) {
ad69471c
PB
3500 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3501 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3502 case 2: gen_op_addl_T0_T1(); break;
3503 default: return 1;
3504 }
3505 return 0;
3506}
3507
ad69471c
PB
3508static inline void gen_neon_rsb(int size)
3509{
3510 switch (size) {
3511 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3512 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3513 case 2: gen_op_rsbl_T0_T1(); break;
3514 default: return;
3515 }
3516}
3517
3518/* 32-bit pairwise ops end up the same as the elementwise versions. */
3519#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3520#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3521#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3522#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3523
3524/* FIXME: This is wrong. They set the wrong overflow bit. */
3525#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3526#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3527#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3528#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3529
3530#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3531 switch ((size << 1) | u) { \
3532 case 0: \
3533 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3534 break; \
3535 case 1: \
3536 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3537 break; \
3538 case 2: \
3539 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3540 break; \
3541 case 3: \
3542 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3543 break; \
3544 case 4: \
3545 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3546 break; \
3547 case 5: \
3548 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3549 break; \
3550 default: return 1; \
3551 }} while (0)
9ee6e8bb
PB
3552
3553#define GEN_NEON_INTEGER_OP(name) do { \
3554 switch ((size << 1) | u) { \
ad69471c
PB
3555 case 0: \
3556 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3557 break; \
3558 case 1: \
3559 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3560 break; \
3561 case 2: \
3562 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3563 break; \
3564 case 3: \
3565 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3566 break; \
3567 case 4: \
3568 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3569 break; \
3570 case 5: \
3571 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3572 break; \
9ee6e8bb
PB
3573 default: return 1; \
3574 }} while (0)
3575
3576static inline void
3577gen_neon_movl_scratch_T0(int scratch)
3578{
3579 uint32_t offset;
3580
3581 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3582 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3583}
3584
3585static inline void
3586gen_neon_movl_scratch_T1(int scratch)
3587{
3588 uint32_t offset;
3589
3590 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3591 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3592}
3593
3594static inline void
3595gen_neon_movl_T0_scratch(int scratch)
3596{
3597 uint32_t offset;
3598
3599 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3600 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3601}
3602
3603static inline void
3604gen_neon_movl_T1_scratch(int scratch)
3605{
3606 uint32_t offset;
3607
3608 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3609 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3610}
3611
3612static inline void gen_neon_get_scalar(int size, int reg)
3613{
3614 if (size == 1) {
3615 NEON_GET_REG(T0, reg >> 1, reg & 1);
3616 } else {
3617 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3618 if (reg & 1)
ad69471c 3619 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3620 else
ad69471c 3621 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3622 }
3623}
3624
3625static void gen_neon_unzip(int reg, int q, int tmp, int size)
3626{
3627 int n;
3628
3629 for (n = 0; n < q + 1; n += 2) {
3630 NEON_GET_REG(T0, reg, n);
3631 NEON_GET_REG(T0, reg, n + n);
3632 switch (size) {
ad69471c
PB
3633 case 0: gen_helper_neon_unzip_u8(); break;
3634 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3635 case 2: /* no-op */; break;
3636 default: abort();
3637 }
3638 gen_neon_movl_scratch_T0(tmp + n);
3639 gen_neon_movl_scratch_T1(tmp + n + 1);
3640 }
3641}
3642
3643static struct {
3644 int nregs;
3645 int interleave;
3646 int spacing;
3647} neon_ls_element_type[11] = {
3648 {4, 4, 1},
3649 {4, 4, 2},
3650 {4, 1, 1},
3651 {4, 2, 1},
3652 {3, 3, 1},
3653 {3, 3, 2},
3654 {3, 1, 1},
3655 {1, 1, 1},
3656 {2, 2, 1},
3657 {2, 2, 2},
3658 {2, 1, 1}
3659};
3660
3661/* Translate a NEON load/store element instruction. Return nonzero if the
3662 instruction is invalid. */
3663static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3664{
3665 int rd, rn, rm;
3666 int op;
3667 int nregs;
3668 int interleave;
3669 int stride;
3670 int size;
3671 int reg;
3672 int pass;
3673 int load;
3674 int shift;
9ee6e8bb 3675 int n;
b0109805 3676 TCGv tmp;
8f8e3aa4 3677 TCGv tmp2;
9ee6e8bb
PB
3678
3679 if (!vfp_enabled(env))
3680 return 1;
3681 VFP_DREG_D(rd, insn);
3682 rn = (insn >> 16) & 0xf;
3683 rm = insn & 0xf;
3684 load = (insn & (1 << 21)) != 0;
3685 if ((insn & (1 << 23)) == 0) {
3686 /* Load store all elements. */
3687 op = (insn >> 8) & 0xf;
3688 size = (insn >> 6) & 3;
3689 if (op > 10 || size == 3)
3690 return 1;
3691 nregs = neon_ls_element_type[op].nregs;
3692 interleave = neon_ls_element_type[op].interleave;
3693 gen_movl_T1_reg(s, rn);
3694 stride = (1 << size) * interleave;
3695 for (reg = 0; reg < nregs; reg++) {
3696 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3697 gen_movl_T1_reg(s, rn);
3698 gen_op_addl_T1_im((1 << size) * reg);
3699 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3700 gen_movl_T1_reg(s, rn);
3701 gen_op_addl_T1_im(1 << size);
3702 }
3703 for (pass = 0; pass < 2; pass++) {
3704 if (size == 2) {
3705 if (load) {
b0109805 3706 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3707 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3708 } else {
ad69471c 3709 tmp = neon_load_reg(rd, pass);
b0109805 3710 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3711 }
3712 gen_op_addl_T1_im(stride);
3713 } else if (size == 1) {
3714 if (load) {
b0109805 3715 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3716 gen_op_addl_T1_im(stride);
8f8e3aa4 3717 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3718 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3719 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3720 dead_tmp(tmp2);
3721 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3722 } else {
8f8e3aa4
PB
3723 tmp = neon_load_reg(rd, pass);
3724 tmp2 = new_tmp();
3725 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3726 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3727 gen_op_addl_T1_im(stride);
8f8e3aa4 3728 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3729 gen_op_addl_T1_im(stride);
3730 }
3731 } else /* size == 0 */ {
3732 if (load) {
a50f5b91 3733 TCGV_UNUSED(tmp2);
9ee6e8bb 3734 for (n = 0; n < 4; n++) {
b0109805 3735 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3736 gen_op_addl_T1_im(stride);
3737 if (n == 0) {
8f8e3aa4 3738 tmp2 = tmp;
9ee6e8bb 3739 } else {
8f8e3aa4
PB
3740 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3741 dead_tmp(tmp);
9ee6e8bb 3742 }
9ee6e8bb 3743 }
8f8e3aa4 3744 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3745 } else {
8f8e3aa4 3746 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3747 for (n = 0; n < 4; n++) {
8f8e3aa4 3748 tmp = new_tmp();
9ee6e8bb 3749 if (n == 0) {
8f8e3aa4 3750 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3751 } else {
8f8e3aa4 3752 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3753 }
b0109805 3754 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3755 gen_op_addl_T1_im(stride);
9ee6e8bb 3756 }
8f8e3aa4 3757 dead_tmp(tmp2);
9ee6e8bb
PB
3758 }
3759 }
3760 }
3761 rd += neon_ls_element_type[op].spacing;
3762 }
3763 stride = nregs * 8;
3764 } else {
3765 size = (insn >> 10) & 3;
3766 if (size == 3) {
3767 /* Load single element to all lanes. */
3768 if (!load)
3769 return 1;
3770 size = (insn >> 6) & 3;
3771 nregs = ((insn >> 8) & 3) + 1;
3772 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3773 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3774 for (reg = 0; reg < nregs; reg++) {
3775 switch (size) {
3776 case 0:
b0109805 3777 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3778 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3779 break;
3780 case 1:
b0109805 3781 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3782 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3783 break;
3784 case 2:
b0109805 3785 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3786 break;
3787 case 3:
3788 return 1;
a50f5b91
PB
3789 default: /* Avoid compiler warnings. */
3790 abort();
99c475ab 3791 }
9ee6e8bb 3792 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3793 tmp2 = new_tmp();
3794 tcg_gen_mov_i32(tmp2, tmp);
3795 neon_store_reg(rd, 0, tmp2);
3018f259 3796 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3797 rd += stride;
3798 }
3799 stride = (1 << size) * nregs;
3800 } else {
3801 /* Single element. */
3802 pass = (insn >> 7) & 1;
3803 switch (size) {
3804 case 0:
3805 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3806 stride = 1;
3807 break;
3808 case 1:
3809 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3810 stride = (insn & (1 << 5)) ? 2 : 1;
3811 break;
3812 case 2:
3813 shift = 0;
9ee6e8bb
PB
3814 stride = (insn & (1 << 6)) ? 2 : 1;
3815 break;
3816 default:
3817 abort();
3818 }
3819 nregs = ((insn >> 8) & 3) + 1;
3820 gen_movl_T1_reg(s, rn);
3821 for (reg = 0; reg < nregs; reg++) {
3822 if (load) {
9ee6e8bb
PB
3823 switch (size) {
3824 case 0:
b0109805 3825 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3826 break;
3827 case 1:
b0109805 3828 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3829 break;
3830 case 2:
b0109805 3831 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3832 break;
a50f5b91
PB
3833 default: /* Avoid compiler warnings. */
3834 abort();
9ee6e8bb
PB
3835 }
3836 if (size != 2) {
8f8e3aa4
PB
3837 tmp2 = neon_load_reg(rd, pass);
3838 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3839 dead_tmp(tmp2);
9ee6e8bb 3840 }
8f8e3aa4 3841 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3842 } else { /* Store */
8f8e3aa4
PB
3843 tmp = neon_load_reg(rd, pass);
3844 if (shift)
3845 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3846 switch (size) {
3847 case 0:
b0109805 3848 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3849 break;
3850 case 1:
b0109805 3851 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3852 break;
3853 case 2:
b0109805 3854 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3855 break;
99c475ab 3856 }
99c475ab 3857 }
9ee6e8bb
PB
3858 rd += stride;
3859 gen_op_addl_T1_im(1 << size);
99c475ab 3860 }
9ee6e8bb 3861 stride = nregs * (1 << size);
99c475ab 3862 }
9ee6e8bb
PB
3863 }
3864 if (rm != 15) {
b26eefb6
PB
3865 TCGv base;
3866
3867 base = load_reg(s, rn);
9ee6e8bb 3868 if (rm == 13) {
b26eefb6 3869 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3870 } else {
b26eefb6
PB
3871 TCGv index;
3872 index = load_reg(s, rm);
3873 tcg_gen_add_i32(base, base, index);
3874 dead_tmp(index);
9ee6e8bb 3875 }
b26eefb6 3876 store_reg(s, rn, base);
9ee6e8bb
PB
3877 }
3878 return 0;
3879}
3b46e624 3880
8f8e3aa4
PB
3881/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3882static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3883{
3884 tcg_gen_and_i32(t, t, c);
3885 tcg_gen_bic_i32(f, f, c);
3886 tcg_gen_or_i32(dest, t, f);
3887}
3888
a7812ae4 3889static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3890{
3891 switch (size) {
3892 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3893 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3894 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3895 default: abort();
3896 }
3897}
3898
a7812ae4 3899static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3900{
3901 switch (size) {
3902 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3903 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3904 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3905 default: abort();
3906 }
3907}
3908
a7812ae4 3909static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3910{
3911 switch (size) {
3912 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3913 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3914 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3915 default: abort();
3916 }
3917}
3918
3919static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3920 int q, int u)
3921{
3922 if (q) {
3923 if (u) {
3924 switch (size) {
3925 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3926 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3927 default: abort();
3928 }
3929 } else {
3930 switch (size) {
3931 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3932 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3933 default: abort();
3934 }
3935 }
3936 } else {
3937 if (u) {
3938 switch (size) {
3939 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3940 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3941 default: abort();
3942 }
3943 } else {
3944 switch (size) {
3945 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3946 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3947 default: abort();
3948 }
3949 }
3950 }
3951}
3952
a7812ae4 3953static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3954{
3955 if (u) {
3956 switch (size) {
3957 case 0: gen_helper_neon_widen_u8(dest, src); break;
3958 case 1: gen_helper_neon_widen_u16(dest, src); break;
3959 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3960 default: abort();
3961 }
3962 } else {
3963 switch (size) {
3964 case 0: gen_helper_neon_widen_s8(dest, src); break;
3965 case 1: gen_helper_neon_widen_s16(dest, src); break;
3966 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3967 default: abort();
3968 }
3969 }
3970 dead_tmp(src);
3971}
3972
3973static inline void gen_neon_addl(int size)
3974{
3975 switch (size) {
3976 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3977 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3978 case 2: tcg_gen_add_i64(CPU_V001); break;
3979 default: abort();
3980 }
3981}
3982
3983static inline void gen_neon_subl(int size)
3984{
3985 switch (size) {
3986 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
3987 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
3988 case 2: tcg_gen_sub_i64(CPU_V001); break;
3989 default: abort();
3990 }
3991}
3992
a7812ae4 3993static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
3994{
3995 switch (size) {
3996 case 0: gen_helper_neon_negl_u16(var, var); break;
3997 case 1: gen_helper_neon_negl_u32(var, var); break;
3998 case 2: gen_helper_neon_negl_u64(var, var); break;
3999 default: abort();
4000 }
4001}
4002
a7812ae4 4003static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4004{
4005 switch (size) {
4006 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4007 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4008 default: abort();
4009 }
4010}
4011
a7812ae4 4012static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4013{
a7812ae4 4014 TCGv_i64 tmp;
ad69471c
PB
4015
4016 switch ((size << 1) | u) {
4017 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4018 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4019 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4020 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4021 case 4:
4022 tmp = gen_muls_i64_i32(a, b);
4023 tcg_gen_mov_i64(dest, tmp);
4024 break;
4025 case 5:
4026 tmp = gen_mulu_i64_i32(a, b);
4027 tcg_gen_mov_i64(dest, tmp);
4028 break;
4029 default: abort();
4030 }
4031 if (size < 2) {
4032 dead_tmp(b);
4033 dead_tmp(a);
4034 }
4035}
4036
9ee6e8bb
PB
4037/* Translate a NEON data processing instruction. Return nonzero if the
4038 instruction is invalid.
ad69471c
PB
4039 We process data in a mixture of 32-bit and 64-bit chunks.
4040 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4041
9ee6e8bb
PB
4042static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4043{
4044 int op;
4045 int q;
4046 int rd, rn, rm;
4047 int size;
4048 int shift;
4049 int pass;
4050 int count;
4051 int pairwise;
4052 int u;
4053 int n;
4054 uint32_t imm;
8f8e3aa4
PB
4055 TCGv tmp;
4056 TCGv tmp2;
4057 TCGv tmp3;
a7812ae4 4058 TCGv_i64 tmp64;
9ee6e8bb
PB
4059
4060 if (!vfp_enabled(env))
4061 return 1;
4062 q = (insn & (1 << 6)) != 0;
4063 u = (insn >> 24) & 1;
4064 VFP_DREG_D(rd, insn);
4065 VFP_DREG_N(rn, insn);
4066 VFP_DREG_M(rm, insn);
4067 size = (insn >> 20) & 3;
4068 if ((insn & (1 << 23)) == 0) {
4069 /* Three register same length. */
4070 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4071 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4072 || op == 10 || op == 11 || op == 16)) {
4073 /* 64-bit element instructions. */
9ee6e8bb 4074 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4075 neon_load_reg64(cpu_V0, rn + pass);
4076 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4077 switch (op) {
4078 case 1: /* VQADD */
4079 if (u) {
ad69471c 4080 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4081 } else {
ad69471c 4082 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4083 }
9ee6e8bb
PB
4084 break;
4085 case 5: /* VQSUB */
4086 if (u) {
ad69471c
PB
4087 gen_helper_neon_sub_saturate_u64(CPU_V001);
4088 } else {
4089 gen_helper_neon_sub_saturate_s64(CPU_V001);
4090 }
4091 break;
4092 case 8: /* VSHL */
4093 if (u) {
4094 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4095 } else {
4096 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4097 }
4098 break;
4099 case 9: /* VQSHL */
4100 if (u) {
4101 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4102 cpu_V0, cpu_V0);
4103 } else {
4104 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4105 cpu_V1, cpu_V0);
4106 }
4107 break;
4108 case 10: /* VRSHL */
4109 if (u) {
4110 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4111 } else {
ad69471c
PB
4112 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4113 }
4114 break;
4115 case 11: /* VQRSHL */
4116 if (u) {
4117 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4118 cpu_V1, cpu_V0);
4119 } else {
4120 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4121 cpu_V1, cpu_V0);
1e8d4eec 4122 }
9ee6e8bb
PB
4123 break;
4124 case 16:
4125 if (u) {
ad69471c 4126 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4127 } else {
ad69471c 4128 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4129 }
4130 break;
4131 default:
4132 abort();
2c0262af 4133 }
ad69471c 4134 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4135 }
9ee6e8bb 4136 return 0;
2c0262af 4137 }
9ee6e8bb
PB
4138 switch (op) {
4139 case 8: /* VSHL */
4140 case 9: /* VQSHL */
4141 case 10: /* VRSHL */
ad69471c 4142 case 11: /* VQRSHL */
9ee6e8bb 4143 {
ad69471c
PB
4144 int rtmp;
4145 /* Shift instruction operands are reversed. */
4146 rtmp = rn;
9ee6e8bb 4147 rn = rm;
ad69471c 4148 rm = rtmp;
9ee6e8bb
PB
4149 pairwise = 0;
4150 }
2c0262af 4151 break;
9ee6e8bb
PB
4152 case 20: /* VPMAX */
4153 case 21: /* VPMIN */
4154 case 23: /* VPADD */
4155 pairwise = 1;
2c0262af 4156 break;
9ee6e8bb
PB
4157 case 26: /* VPADD (float) */
4158 pairwise = (u && size < 2);
2c0262af 4159 break;
9ee6e8bb
PB
4160 case 30: /* VPMIN/VPMAX (float) */
4161 pairwise = u;
2c0262af 4162 break;
9ee6e8bb
PB
4163 default:
4164 pairwise = 0;
2c0262af 4165 break;
9ee6e8bb
PB
4166 }
4167 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4168
4169 if (pairwise) {
4170 /* Pairwise. */
4171 if (q)
4172 n = (pass & 1) * 2;
2c0262af 4173 else
9ee6e8bb
PB
4174 n = 0;
4175 if (pass < q + 1) {
4176 NEON_GET_REG(T0, rn, n);
4177 NEON_GET_REG(T1, rn, n + 1);
4178 } else {
4179 NEON_GET_REG(T0, rm, n);
4180 NEON_GET_REG(T1, rm, n + 1);
4181 }
4182 } else {
4183 /* Elementwise. */
4184 NEON_GET_REG(T0, rn, pass);
4185 NEON_GET_REG(T1, rm, pass);
4186 }
4187 switch (op) {
4188 case 0: /* VHADD */
4189 GEN_NEON_INTEGER_OP(hadd);
4190 break;
4191 case 1: /* VQADD */
ad69471c 4192 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4193 break;
9ee6e8bb
PB
4194 case 2: /* VRHADD */
4195 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4196 break;
9ee6e8bb
PB
4197 case 3: /* Logic ops. */
4198 switch ((u << 2) | size) {
4199 case 0: /* VAND */
2c0262af 4200 gen_op_andl_T0_T1();
9ee6e8bb
PB
4201 break;
4202 case 1: /* BIC */
4203 gen_op_bicl_T0_T1();
4204 break;
4205 case 2: /* VORR */
4206 gen_op_orl_T0_T1();
4207 break;
4208 case 3: /* VORN */
4209 gen_op_notl_T1();
4210 gen_op_orl_T0_T1();
4211 break;
4212 case 4: /* VEOR */
4213 gen_op_xorl_T0_T1();
4214 break;
4215 case 5: /* VBSL */
8f8e3aa4
PB
4216 tmp = neon_load_reg(rd, pass);
4217 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4218 dead_tmp(tmp);
9ee6e8bb
PB
4219 break;
4220 case 6: /* VBIT */
8f8e3aa4
PB
4221 tmp = neon_load_reg(rd, pass);
4222 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4223 dead_tmp(tmp);
9ee6e8bb
PB
4224 break;
4225 case 7: /* VBIF */
8f8e3aa4
PB
4226 tmp = neon_load_reg(rd, pass);
4227 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4228 dead_tmp(tmp);
9ee6e8bb 4229 break;
2c0262af
FB
4230 }
4231 break;
9ee6e8bb
PB
4232 case 4: /* VHSUB */
4233 GEN_NEON_INTEGER_OP(hsub);
4234 break;
4235 case 5: /* VQSUB */
ad69471c 4236 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4237 break;
9ee6e8bb
PB
4238 case 6: /* VCGT */
4239 GEN_NEON_INTEGER_OP(cgt);
4240 break;
4241 case 7: /* VCGE */
4242 GEN_NEON_INTEGER_OP(cge);
4243 break;
4244 case 8: /* VSHL */
ad69471c 4245 GEN_NEON_INTEGER_OP(shl);
2c0262af 4246 break;
9ee6e8bb 4247 case 9: /* VQSHL */
ad69471c 4248 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4249 break;
9ee6e8bb 4250 case 10: /* VRSHL */
ad69471c 4251 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4252 break;
9ee6e8bb 4253 case 11: /* VQRSHL */
ad69471c 4254 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4255 break;
4256 case 12: /* VMAX */
4257 GEN_NEON_INTEGER_OP(max);
4258 break;
4259 case 13: /* VMIN */
4260 GEN_NEON_INTEGER_OP(min);
4261 break;
4262 case 14: /* VABD */
4263 GEN_NEON_INTEGER_OP(abd);
4264 break;
4265 case 15: /* VABA */
4266 GEN_NEON_INTEGER_OP(abd);
4267 NEON_GET_REG(T1, rd, pass);
4268 gen_neon_add(size);
4269 break;
4270 case 16:
4271 if (!u) { /* VADD */
4272 if (gen_neon_add(size))
4273 return 1;
4274 } else { /* VSUB */
4275 switch (size) {
ad69471c
PB
4276 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4277 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4278 case 2: gen_op_subl_T0_T1(); break;
4279 default: return 1;
4280 }
4281 }
4282 break;
4283 case 17:
4284 if (!u) { /* VTST */
4285 switch (size) {
ad69471c
PB
4286 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4287 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4288 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4289 default: return 1;
4290 }
4291 } else { /* VCEQ */
4292 switch (size) {
ad69471c
PB
4293 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4294 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4295 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4296 default: return 1;
4297 }
4298 }
4299 break;
4300 case 18: /* Multiply. */
4301 switch (size) {
ad69471c
PB
4302 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4303 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4304 case 2: gen_op_mul_T0_T1(); break;
4305 default: return 1;
4306 }
4307 NEON_GET_REG(T1, rd, pass);
4308 if (u) { /* VMLS */
ad69471c 4309 gen_neon_rsb(size);
9ee6e8bb
PB
4310 } else { /* VMLA */
4311 gen_neon_add(size);
4312 }
4313 break;
4314 case 19: /* VMUL */
4315 if (u) { /* polynomial */
ad69471c 4316 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4317 } else { /* Integer */
4318 switch (size) {
ad69471c
PB
4319 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4320 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4321 case 2: gen_op_mul_T0_T1(); break;
4322 default: return 1;
4323 }
4324 }
4325 break;
4326 case 20: /* VPMAX */
4327 GEN_NEON_INTEGER_OP(pmax);
4328 break;
4329 case 21: /* VPMIN */
4330 GEN_NEON_INTEGER_OP(pmin);
4331 break;
4332 case 22: /* Hultiply high. */
4333 if (!u) { /* VQDMULH */
4334 switch (size) {
ad69471c
PB
4335 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4336 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4337 default: return 1;
4338 }
4339 } else { /* VQRDHMUL */
4340 switch (size) {
ad69471c
PB
4341 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4342 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4343 default: return 1;
4344 }
4345 }
4346 break;
4347 case 23: /* VPADD */
4348 if (u)
4349 return 1;
4350 switch (size) {
ad69471c
PB
4351 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4352 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4353 case 2: gen_op_addl_T0_T1(); break;
4354 default: return 1;
4355 }
4356 break;
4357 case 26: /* Floating point arithnetic. */
4358 switch ((u << 2) | size) {
4359 case 0: /* VADD */
ad69471c 4360 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4361 break;
4362 case 2: /* VSUB */
ad69471c 4363 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4364 break;
4365 case 4: /* VPADD */
ad69471c 4366 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4367 break;
4368 case 6: /* VABD */
ad69471c 4369 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4370 break;
4371 default:
4372 return 1;
4373 }
4374 break;
4375 case 27: /* Float multiply. */
ad69471c 4376 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4377 if (!u) {
4378 NEON_GET_REG(T1, rd, pass);
4379 if (size == 0) {
ad69471c 4380 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4381 } else {
ad69471c 4382 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4383 }
4384 }
4385 break;
4386 case 28: /* Float compare. */
4387 if (!u) {
ad69471c 4388 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4389 } else {
9ee6e8bb 4390 if (size == 0)
ad69471c 4391 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4392 else
ad69471c 4393 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4394 }
2c0262af 4395 break;
9ee6e8bb
PB
4396 case 29: /* Float compare absolute. */
4397 if (!u)
4398 return 1;
4399 if (size == 0)
ad69471c 4400 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4401 else
ad69471c 4402 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4403 break;
9ee6e8bb
PB
4404 case 30: /* Float min/max. */
4405 if (size == 0)
ad69471c 4406 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4407 else
ad69471c 4408 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4409 break;
4410 case 31:
4411 if (size == 0)
4373f3ce 4412 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4413 else
4373f3ce 4414 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4415 break;
9ee6e8bb
PB
4416 default:
4417 abort();
2c0262af 4418 }
9ee6e8bb
PB
4419 /* Save the result. For elementwise operations we can put it
4420 straight into the destination register. For pairwise operations
4421 we have to be careful to avoid clobbering the source operands. */
4422 if (pairwise && rd == rm) {
4423 gen_neon_movl_scratch_T0(pass);
4424 } else {
4425 NEON_SET_REG(T0, rd, pass);
4426 }
4427
4428 } /* for pass */
4429 if (pairwise && rd == rm) {
4430 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4431 gen_neon_movl_T0_scratch(pass);
4432 NEON_SET_REG(T0, rd, pass);
4433 }
4434 }
ad69471c 4435 /* End of 3 register same size operations. */
9ee6e8bb
PB
4436 } else if (insn & (1 << 4)) {
4437 if ((insn & 0x00380080) != 0) {
4438 /* Two registers and shift. */
4439 op = (insn >> 8) & 0xf;
4440 if (insn & (1 << 7)) {
4441 /* 64-bit shift. */
4442 size = 3;
4443 } else {
4444 size = 2;
4445 while ((insn & (1 << (size + 19))) == 0)
4446 size--;
4447 }
4448 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4449 /* To avoid excessive dumplication of ops we implement shift
4450 by immediate using the variable shift operations. */
4451 if (op < 8) {
4452 /* Shift by immediate:
4453 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4454 /* Right shifts are encoded as N - shift, where N is the
4455 element size in bits. */
4456 if (op <= 4)
4457 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4458 if (size == 3) {
4459 count = q + 1;
4460 } else {
4461 count = q ? 4: 2;
4462 }
4463 switch (size) {
4464 case 0:
4465 imm = (uint8_t) shift;
4466 imm |= imm << 8;
4467 imm |= imm << 16;
4468 break;
4469 case 1:
4470 imm = (uint16_t) shift;
4471 imm |= imm << 16;
4472 break;
4473 case 2:
4474 case 3:
4475 imm = shift;
4476 break;
4477 default:
4478 abort();
4479 }
4480
4481 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4482 if (size == 3) {
4483 neon_load_reg64(cpu_V0, rm + pass);
4484 tcg_gen_movi_i64(cpu_V1, imm);
4485 switch (op) {
4486 case 0: /* VSHR */
4487 case 1: /* VSRA */
4488 if (u)
4489 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4490 else
ad69471c 4491 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4492 break;
ad69471c
PB
4493 case 2: /* VRSHR */
4494 case 3: /* VRSRA */
4495 if (u)
4496 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4497 else
ad69471c 4498 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4499 break;
ad69471c
PB
4500 case 4: /* VSRI */
4501 if (!u)
4502 return 1;
4503 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4504 break;
4505 case 5: /* VSHL, VSLI */
4506 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4507 break;
4508 case 6: /* VQSHL */
4509 if (u)
4510 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4511 else
ad69471c
PB
4512 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4513 break;
4514 case 7: /* VQSHLU */
4515 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4516 break;
9ee6e8bb 4517 }
ad69471c
PB
4518 if (op == 1 || op == 3) {
4519 /* Accumulate. */
4520 neon_load_reg64(cpu_V0, rd + pass);
4521 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4522 } else if (op == 4 || (op == 5 && u)) {
4523 /* Insert */
4524 cpu_abort(env, "VS[LR]I.64 not implemented");
4525 }
4526 neon_store_reg64(cpu_V0, rd + pass);
4527 } else { /* size < 3 */
4528 /* Operands in T0 and T1. */
4529 gen_op_movl_T1_im(imm);
4530 NEON_GET_REG(T0, rm, pass);
4531 switch (op) {
4532 case 0: /* VSHR */
4533 case 1: /* VSRA */
4534 GEN_NEON_INTEGER_OP(shl);
4535 break;
4536 case 2: /* VRSHR */
4537 case 3: /* VRSRA */
4538 GEN_NEON_INTEGER_OP(rshl);
4539 break;
4540 case 4: /* VSRI */
4541 if (!u)
4542 return 1;
4543 GEN_NEON_INTEGER_OP(shl);
4544 break;
4545 case 5: /* VSHL, VSLI */
4546 switch (size) {
4547 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4548 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4549 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4550 default: return 1;
4551 }
4552 break;
4553 case 6: /* VQSHL */
4554 GEN_NEON_INTEGER_OP_ENV(qshl);
4555 break;
4556 case 7: /* VQSHLU */
4557 switch (size) {
4558 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4559 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4560 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4561 default: return 1;
4562 }
4563 break;
4564 }
4565
4566 if (op == 1 || op == 3) {
4567 /* Accumulate. */
4568 NEON_GET_REG(T1, rd, pass);
4569 gen_neon_add(size);
4570 } else if (op == 4 || (op == 5 && u)) {
4571 /* Insert */
4572 switch (size) {
4573 case 0:
4574 if (op == 4)
4575 imm = 0xff >> -shift;
4576 else
4577 imm = (uint8_t)(0xff << shift);
4578 imm |= imm << 8;
4579 imm |= imm << 16;
4580 break;
4581 case 1:
4582 if (op == 4)
4583 imm = 0xffff >> -shift;
4584 else
4585 imm = (uint16_t)(0xffff << shift);
4586 imm |= imm << 16;
4587 break;
4588 case 2:
4589 if (op == 4)
4590 imm = 0xffffffffu >> -shift;
4591 else
4592 imm = 0xffffffffu << shift;
4593 break;
4594 default:
4595 abort();
4596 }
4597 tmp = neon_load_reg(rd, pass);
4598 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4599 tcg_gen_andi_i32(tmp, tmp, ~imm);
4600 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4601 }
9ee6e8bb
PB
4602 NEON_SET_REG(T0, rd, pass);
4603 }
4604 } /* for pass */
4605 } else if (op < 10) {
ad69471c 4606 /* Shift by immediate and narrow:
9ee6e8bb
PB
4607 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4608 shift = shift - (1 << (size + 3));
4609 size++;
9ee6e8bb
PB
4610 switch (size) {
4611 case 1:
ad69471c 4612 imm = (uint16_t)shift;
9ee6e8bb 4613 imm |= imm << 16;
ad69471c 4614 tmp2 = tcg_const_i32(imm);
a7812ae4 4615 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4616 break;
4617 case 2:
ad69471c
PB
4618 imm = (uint32_t)shift;
4619 tmp2 = tcg_const_i32(imm);
a7812ae4 4620 TCGV_UNUSED_I64(tmp64);
9ee6e8bb 4621 case 3:
a7812ae4
PB
4622 tmp64 = tcg_const_i64(shift);
4623 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4624 break;
4625 default:
4626 abort();
4627 }
4628
ad69471c
PB
4629 for (pass = 0; pass < 2; pass++) {
4630 if (size == 3) {
4631 neon_load_reg64(cpu_V0, rm + pass);
4632 if (q) {
4633 if (u)
a7812ae4 4634 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4635 else
a7812ae4 4636 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4637 } else {
4638 if (u)
a7812ae4 4639 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4640 else
a7812ae4 4641 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4642 }
2c0262af 4643 } else {
ad69471c
PB
4644 tmp = neon_load_reg(rm + pass, 0);
4645 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4646 tmp3 = neon_load_reg(rm + pass, 1);
4647 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4648 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4649 dead_tmp(tmp);
36aa55dc 4650 dead_tmp(tmp3);
9ee6e8bb 4651 }
ad69471c
PB
4652 tmp = new_tmp();
4653 if (op == 8 && !u) {
4654 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4655 } else {
ad69471c
PB
4656 if (op == 8)
4657 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4658 else
ad69471c
PB
4659 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4660 }
4661 if (pass == 0) {
4662 tmp2 = tmp;
4663 } else {
4664 neon_store_reg(rd, 0, tmp2);
4665 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4666 }
4667 } /* for pass */
4668 } else if (op == 10) {
4669 /* VSHLL */
ad69471c 4670 if (q || size == 3)
9ee6e8bb 4671 return 1;
ad69471c
PB
4672 tmp = neon_load_reg(rm, 0);
4673 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4674 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4675 if (pass == 1)
4676 tmp = tmp2;
4677
4678 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4679
9ee6e8bb
PB
4680 if (shift != 0) {
4681 /* The shift is less than the width of the source
ad69471c
PB
4682 type, so we can just shift the whole register. */
4683 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4684 if (size < 2 || !u) {
4685 uint64_t imm64;
4686 if (size == 0) {
4687 imm = (0xffu >> (8 - shift));
4688 imm |= imm << 16;
4689 } else {
4690 imm = 0xffff >> (16 - shift);
9ee6e8bb 4691 }
ad69471c
PB
4692 imm64 = imm | (((uint64_t)imm) << 32);
4693 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4694 }
4695 }
ad69471c 4696 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4697 }
4698 } else if (op == 15 || op == 16) {
4699 /* VCVT fixed-point. */
4700 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4701 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4702 if (op & 1) {
4703 if (u)
4373f3ce 4704 gen_vfp_ulto(0, shift);
9ee6e8bb 4705 else
4373f3ce 4706 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4707 } else {
4708 if (u)
4373f3ce 4709 gen_vfp_toul(0, shift);
9ee6e8bb 4710 else
4373f3ce 4711 gen_vfp_tosl(0, shift);
2c0262af 4712 }
4373f3ce 4713 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4714 }
4715 } else {
9ee6e8bb
PB
4716 return 1;
4717 }
4718 } else { /* (insn & 0x00380080) == 0 */
4719 int invert;
4720
4721 op = (insn >> 8) & 0xf;
4722 /* One register and immediate. */
4723 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4724 invert = (insn & (1 << 5)) != 0;
4725 switch (op) {
4726 case 0: case 1:
4727 /* no-op */
4728 break;
4729 case 2: case 3:
4730 imm <<= 8;
4731 break;
4732 case 4: case 5:
4733 imm <<= 16;
4734 break;
4735 case 6: case 7:
4736 imm <<= 24;
4737 break;
4738 case 8: case 9:
4739 imm |= imm << 16;
4740 break;
4741 case 10: case 11:
4742 imm = (imm << 8) | (imm << 24);
4743 break;
4744 case 12:
4745 imm = (imm < 8) | 0xff;
4746 break;
4747 case 13:
4748 imm = (imm << 16) | 0xffff;
4749 break;
4750 case 14:
4751 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4752 if (invert)
4753 imm = ~imm;
4754 break;
4755 case 15:
4756 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4757 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4758 break;
4759 }
4760 if (invert)
4761 imm = ~imm;
4762
4763 if (op != 14 || !invert)
4764 gen_op_movl_T1_im(imm);
4765
4766 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4767 if (op & 1 && op < 12) {
ad69471c 4768 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4769 if (invert) {
4770 /* The immediate value has already been inverted, so
4771 BIC becomes AND. */
ad69471c 4772 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4773 } else {
ad69471c 4774 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4775 }
9ee6e8bb 4776 } else {
ad69471c
PB
4777 /* VMOV, VMVN. */
4778 tmp = new_tmp();
9ee6e8bb 4779 if (op == 14 && invert) {
ad69471c
PB
4780 uint32_t val;
4781 val = 0;
9ee6e8bb
PB
4782 for (n = 0; n < 4; n++) {
4783 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4784 val |= 0xff << (n * 8);
9ee6e8bb 4785 }
ad69471c
PB
4786 tcg_gen_movi_i32(tmp, val);
4787 } else {
4788 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4789 }
9ee6e8bb 4790 }
ad69471c 4791 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4792 }
4793 }
e4b3861d 4794 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4795 if (size != 3) {
4796 op = (insn >> 8) & 0xf;
4797 if ((insn & (1 << 6)) == 0) {
4798 /* Three registers of different lengths. */
4799 int src1_wide;
4800 int src2_wide;
4801 int prewiden;
4802 /* prewiden, src1_wide, src2_wide */
4803 static const int neon_3reg_wide[16][3] = {
4804 {1, 0, 0}, /* VADDL */
4805 {1, 1, 0}, /* VADDW */
4806 {1, 0, 0}, /* VSUBL */
4807 {1, 1, 0}, /* VSUBW */
4808 {0, 1, 1}, /* VADDHN */
4809 {0, 0, 0}, /* VABAL */
4810 {0, 1, 1}, /* VSUBHN */
4811 {0, 0, 0}, /* VABDL */
4812 {0, 0, 0}, /* VMLAL */
4813 {0, 0, 0}, /* VQDMLAL */
4814 {0, 0, 0}, /* VMLSL */
4815 {0, 0, 0}, /* VQDMLSL */
4816 {0, 0, 0}, /* Integer VMULL */
4817 {0, 0, 0}, /* VQDMULL */
4818 {0, 0, 0} /* Polynomial VMULL */
4819 };
4820
4821 prewiden = neon_3reg_wide[op][0];
4822 src1_wide = neon_3reg_wide[op][1];
4823 src2_wide = neon_3reg_wide[op][2];
4824
ad69471c
PB
4825 if (size == 0 && (op == 9 || op == 11 || op == 13))
4826 return 1;
4827
9ee6e8bb
PB
4828 /* Avoid overlapping operands. Wide source operands are
4829 always aligned so will never overlap with wide
4830 destinations in problematic ways. */
8f8e3aa4
PB
4831 if (rd == rm && !src2_wide) {
4832 NEON_GET_REG(T0, rm, 1);
4833 gen_neon_movl_scratch_T0(2);
4834 } else if (rd == rn && !src1_wide) {
4835 NEON_GET_REG(T0, rn, 1);
4836 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4837 }
a50f5b91 4838 TCGV_UNUSED(tmp3);
9ee6e8bb 4839 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4840 if (src1_wide) {
4841 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4842 TCGV_UNUSED(tmp);
9ee6e8bb 4843 } else {
ad69471c
PB
4844 if (pass == 1 && rd == rn) {
4845 gen_neon_movl_T0_scratch(2);
4846 tmp = new_tmp();
4847 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4848 } else {
ad69471c
PB
4849 tmp = neon_load_reg(rn, pass);
4850 }
4851 if (prewiden) {
4852 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4853 }
4854 }
ad69471c
PB
4855 if (src2_wide) {
4856 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4857 TCGV_UNUSED(tmp2);
9ee6e8bb 4858 } else {
ad69471c 4859 if (pass == 1 && rd == rm) {
8f8e3aa4 4860 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4861 tmp2 = new_tmp();
4862 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4863 } else {
ad69471c
PB
4864 tmp2 = neon_load_reg(rm, pass);
4865 }
4866 if (prewiden) {
4867 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4868 }
9ee6e8bb
PB
4869 }
4870 switch (op) {
4871 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4872 gen_neon_addl(size);
9ee6e8bb
PB
4873 break;
4874 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4875 gen_neon_subl(size);
9ee6e8bb
PB
4876 break;
4877 case 5: case 7: /* VABAL, VABDL */
4878 switch ((size << 1) | u) {
ad69471c
PB
4879 case 0:
4880 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4881 break;
4882 case 1:
4883 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4884 break;
4885 case 2:
4886 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4887 break;
4888 case 3:
4889 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4890 break;
4891 case 4:
4892 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4893 break;
4894 case 5:
4895 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4896 break;
9ee6e8bb
PB
4897 default: abort();
4898 }
ad69471c
PB
4899 dead_tmp(tmp2);
4900 dead_tmp(tmp);
9ee6e8bb
PB
4901 break;
4902 case 8: case 9: case 10: case 11: case 12: case 13:
4903 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4904 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4905 break;
4906 case 14: /* Polynomial VMULL */
4907 cpu_abort(env, "Polynomial VMULL not implemented");
4908
4909 default: /* 15 is RESERVED. */
4910 return 1;
4911 }
4912 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4913 /* Accumulate. */
4914 if (op == 10 || op == 11) {
ad69471c 4915 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4916 }
4917
9ee6e8bb 4918 if (op != 13) {
ad69471c 4919 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4920 }
4921
4922 switch (op) {
4923 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4924 gen_neon_addl(size);
9ee6e8bb
PB
4925 break;
4926 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4927 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4928 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4929 break;
9ee6e8bb
PB
4930 /* Fall through. */
4931 case 13: /* VQDMULL */
ad69471c 4932 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4933 break;
4934 default:
4935 abort();
4936 }
ad69471c 4937 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4938 } else if (op == 4 || op == 6) {
4939 /* Narrowing operation. */
ad69471c 4940 tmp = new_tmp();
9ee6e8bb
PB
4941 if (u) {
4942 switch (size) {
ad69471c
PB
4943 case 0:
4944 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4945 break;
4946 case 1:
4947 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4948 break;
4949 case 2:
4950 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4951 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4952 break;
9ee6e8bb
PB
4953 default: abort();
4954 }
4955 } else {
4956 switch (size) {
ad69471c
PB
4957 case 0:
4958 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4959 break;
4960 case 1:
4961 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4962 break;
4963 case 2:
4964 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4965 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4966 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4967 break;
9ee6e8bb
PB
4968 default: abort();
4969 }
4970 }
ad69471c
PB
4971 if (pass == 0) {
4972 tmp3 = tmp;
4973 } else {
4974 neon_store_reg(rd, 0, tmp3);
4975 neon_store_reg(rd, 1, tmp);
4976 }
9ee6e8bb
PB
4977 } else {
4978 /* Write back the result. */
ad69471c 4979 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4980 }
4981 }
4982 } else {
4983 /* Two registers and a scalar. */
4984 switch (op) {
4985 case 0: /* Integer VMLA scalar */
4986 case 1: /* Float VMLA scalar */
4987 case 4: /* Integer VMLS scalar */
4988 case 5: /* Floating point VMLS scalar */
4989 case 8: /* Integer VMUL scalar */
4990 case 9: /* Floating point VMUL scalar */
4991 case 12: /* VQDMULH scalar */
4992 case 13: /* VQRDMULH scalar */
4993 gen_neon_get_scalar(size, rm);
8f8e3aa4 4994 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
4995 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4996 if (pass != 0)
8f8e3aa4 4997 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
4998 NEON_GET_REG(T1, rn, pass);
4999 if (op == 12) {
5000 if (size == 1) {
ad69471c 5001 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5002 } else {
ad69471c 5003 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5004 }
5005 } else if (op == 13) {
5006 if (size == 1) {
ad69471c 5007 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5008 } else {
ad69471c 5009 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5010 }
5011 } else if (op & 1) {
ad69471c 5012 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5013 } else {
5014 switch (size) {
ad69471c
PB
5015 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5016 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5017 case 2: gen_op_mul_T0_T1(); break;
5018 default: return 1;
5019 }
5020 }
5021 if (op < 8) {
5022 /* Accumulate. */
5023 NEON_GET_REG(T1, rd, pass);
5024 switch (op) {
5025 case 0:
5026 gen_neon_add(size);
5027 break;
5028 case 1:
ad69471c 5029 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5030 break;
5031 case 4:
ad69471c 5032 gen_neon_rsb(size);
9ee6e8bb
PB
5033 break;
5034 case 5:
ad69471c 5035 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5036 break;
5037 default:
5038 abort();
5039 }
5040 }
5041 NEON_SET_REG(T0, rd, pass);
5042 }
5043 break;
5044 case 2: /* VMLAL sclar */
5045 case 3: /* VQDMLAL scalar */
5046 case 6: /* VMLSL scalar */
5047 case 7: /* VQDMLSL scalar */
5048 case 10: /* VMULL scalar */
5049 case 11: /* VQDMULL scalar */
ad69471c
PB
5050 if (size == 0 && (op == 3 || op == 7 || op == 11))
5051 return 1;
5052
9ee6e8bb 5053 gen_neon_get_scalar(size, rm);
ad69471c
PB
5054 NEON_GET_REG(T1, rn, 1);
5055
9ee6e8bb 5056 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5057 if (pass == 0) {
5058 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5059 } else {
ad69471c
PB
5060 tmp = new_tmp();
5061 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5062 }
ad69471c
PB
5063 tmp2 = new_tmp();
5064 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5065 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5066 if (op == 6 || op == 7) {
ad69471c
PB
5067 gen_neon_negl(cpu_V0, size);
5068 }
5069 if (op != 11) {
5070 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5071 }
9ee6e8bb
PB
5072 switch (op) {
5073 case 2: case 6:
ad69471c 5074 gen_neon_addl(size);
9ee6e8bb
PB
5075 break;
5076 case 3: case 7:
ad69471c
PB
5077 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5078 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5079 break;
5080 case 10:
5081 /* no-op */
5082 break;
5083 case 11:
ad69471c 5084 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5085 break;
5086 default:
5087 abort();
5088 }
ad69471c 5089 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5090 }
5091 break;
5092 default: /* 14 and 15 are RESERVED */
5093 return 1;
5094 }
5095 }
5096 } else { /* size == 3 */
5097 if (!u) {
5098 /* Extract. */
9ee6e8bb 5099 imm = (insn >> 8) & 0xf;
ad69471c
PB
5100 count = q + 1;
5101
5102 if (imm > 7 && !q)
5103 return 1;
5104
5105 if (imm == 0) {
5106 neon_load_reg64(cpu_V0, rn);
5107 if (q) {
5108 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5109 }
ad69471c
PB
5110 } else if (imm == 8) {
5111 neon_load_reg64(cpu_V0, rn + 1);
5112 if (q) {
5113 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5114 }
ad69471c 5115 } else if (q) {
a7812ae4 5116 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5117 if (imm < 8) {
5118 neon_load_reg64(cpu_V0, rn);
a7812ae4 5119 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5120 } else {
5121 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5122 neon_load_reg64(tmp64, rm);
ad69471c
PB
5123 }
5124 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5125 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5126 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5127 if (imm < 8) {
5128 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5129 } else {
ad69471c
PB
5130 neon_load_reg64(cpu_V1, rm + 1);
5131 imm -= 8;
9ee6e8bb 5132 }
ad69471c 5133 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5134 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5135 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5136 } else {
a7812ae4 5137 /* BUGFIX */
ad69471c 5138 neon_load_reg64(cpu_V0, rn);
a7812ae4 5139 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5140 neon_load_reg64(cpu_V1, rm);
a7812ae4 5141 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5142 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5143 }
5144 neon_store_reg64(cpu_V0, rd);
5145 if (q) {
5146 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5147 }
5148 } else if ((insn & (1 << 11)) == 0) {
5149 /* Two register misc. */
5150 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5151 size = (insn >> 18) & 3;
5152 switch (op) {
5153 case 0: /* VREV64 */
5154 if (size == 3)
5155 return 1;
5156 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5157 NEON_GET_REG(T0, rm, pass * 2);
5158 NEON_GET_REG(T1, rm, pass * 2 + 1);
5159 switch (size) {
b0109805 5160 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5161 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5162 case 2: /* no-op */ break;
5163 default: abort();
5164 }
5165 NEON_SET_REG(T0, rd, pass * 2 + 1);
5166 if (size == 2) {
5167 NEON_SET_REG(T1, rd, pass * 2);
5168 } else {
5169 gen_op_movl_T0_T1();
5170 switch (size) {
b0109805 5171 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5172 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5173 default: abort();
5174 }
5175 NEON_SET_REG(T0, rd, pass * 2);
5176 }
5177 }
5178 break;
5179 case 4: case 5: /* VPADDL */
5180 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5181 if (size == 3)
5182 return 1;
ad69471c
PB
5183 for (pass = 0; pass < q + 1; pass++) {
5184 tmp = neon_load_reg(rm, pass * 2);
5185 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5186 tmp = neon_load_reg(rm, pass * 2 + 1);
5187 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5188 switch (size) {
5189 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5190 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5191 case 2: tcg_gen_add_i64(CPU_V001); break;
5192 default: abort();
5193 }
9ee6e8bb
PB
5194 if (op >= 12) {
5195 /* Accumulate. */
ad69471c
PB
5196 neon_load_reg64(cpu_V1, rd + pass);
5197 gen_neon_addl(size);
9ee6e8bb 5198 }
ad69471c 5199 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5200 }
5201 break;
5202 case 33: /* VTRN */
5203 if (size == 2) {
5204 for (n = 0; n < (q ? 4 : 2); n += 2) {
5205 NEON_GET_REG(T0, rm, n);
5206 NEON_GET_REG(T1, rd, n + 1);
5207 NEON_SET_REG(T1, rm, n);
5208 NEON_SET_REG(T0, rd, n + 1);
5209 }
5210 } else {
5211 goto elementwise;
5212 }
5213 break;
5214 case 34: /* VUZP */
5215 /* Reg Before After
5216 Rd A3 A2 A1 A0 B2 B0 A2 A0
5217 Rm B3 B2 B1 B0 B3 B1 A3 A1
5218 */
5219 if (size == 3)
5220 return 1;
5221 gen_neon_unzip(rd, q, 0, size);
5222 gen_neon_unzip(rm, q, 4, size);
5223 if (q) {
5224 static int unzip_order_q[8] =
5225 {0, 2, 4, 6, 1, 3, 5, 7};
5226 for (n = 0; n < 8; n++) {
5227 int reg = (n < 4) ? rd : rm;
5228 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5229 NEON_SET_REG(T0, reg, n % 4);
5230 }
5231 } else {
5232 static int unzip_order[4] =
5233 {0, 4, 1, 5};
5234 for (n = 0; n < 4; n++) {
5235 int reg = (n < 2) ? rd : rm;
5236 gen_neon_movl_T0_scratch(unzip_order[n]);
5237 NEON_SET_REG(T0, reg, n % 2);
5238 }
5239 }
5240 break;
5241 case 35: /* VZIP */
5242 /* Reg Before After
5243 Rd A3 A2 A1 A0 B1 A1 B0 A0
5244 Rm B3 B2 B1 B0 B3 A3 B2 A2
5245 */
5246 if (size == 3)
5247 return 1;
5248 count = (q ? 4 : 2);
5249 for (n = 0; n < count; n++) {
5250 NEON_GET_REG(T0, rd, n);
5251 NEON_GET_REG(T1, rd, n);
5252 switch (size) {
ad69471c
PB
5253 case 0: gen_helper_neon_zip_u8(); break;
5254 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5255 case 2: /* no-op */; break;
5256 default: abort();
5257 }
5258 gen_neon_movl_scratch_T0(n * 2);
5259 gen_neon_movl_scratch_T1(n * 2 + 1);
5260 }
5261 for (n = 0; n < count * 2; n++) {
5262 int reg = (n < count) ? rd : rm;
5263 gen_neon_movl_T0_scratch(n);
5264 NEON_SET_REG(T0, reg, n % count);
5265 }
5266 break;
5267 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5268 if (size == 3)
5269 return 1;
a50f5b91 5270 TCGV_UNUSED(tmp2);
9ee6e8bb 5271 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5272 neon_load_reg64(cpu_V0, rm + pass);
5273 tmp = new_tmp();
9ee6e8bb 5274 if (op == 36 && q == 0) {
ad69471c 5275 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5276 } else if (q) {
ad69471c 5277 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5278 } else {
ad69471c
PB
5279 gen_neon_narrow_sats(size, tmp, cpu_V0);
5280 }
5281 if (pass == 0) {
5282 tmp2 = tmp;
5283 } else {
5284 neon_store_reg(rd, 0, tmp2);
5285 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5286 }
9ee6e8bb
PB
5287 }
5288 break;
5289 case 38: /* VSHLL */
ad69471c 5290 if (q || size == 3)
9ee6e8bb 5291 return 1;
ad69471c
PB
5292 tmp = neon_load_reg(rm, 0);
5293 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5294 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5295 if (pass == 1)
5296 tmp = tmp2;
5297 gen_neon_widen(cpu_V0, tmp, size, 1);
5298 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5299 }
5300 break;
5301 default:
5302 elementwise:
5303 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5304 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5305 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5306 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5307 } else {
5308 NEON_GET_REG(T0, rm, pass);
5309 }
5310 switch (op) {
5311 case 1: /* VREV32 */
5312 switch (size) {
b0109805 5313 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5314 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5315 default: return 1;
5316 }
5317 break;
5318 case 2: /* VREV16 */
5319 if (size != 0)
5320 return 1;
3670669c 5321 gen_rev16(cpu_T[0]);
9ee6e8bb 5322 break;
9ee6e8bb
PB
5323 case 8: /* CLS */
5324 switch (size) {
ad69471c
PB
5325 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5326 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5327 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5328 default: return 1;
5329 }
5330 break;
5331 case 9: /* CLZ */
5332 switch (size) {
ad69471c
PB
5333 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5334 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5335 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5336 default: return 1;
5337 }
5338 break;
5339 case 10: /* CNT */
5340 if (size != 0)
5341 return 1;
ad69471c 5342 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5343 break;
5344 case 11: /* VNOT */
5345 if (size != 0)
5346 return 1;
5347 gen_op_notl_T0();
5348 break;
5349 case 14: /* VQABS */
5350 switch (size) {
ad69471c
PB
5351 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5352 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5353 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5354 default: return 1;
5355 }
5356 break;
5357 case 15: /* VQNEG */
5358 switch (size) {
ad69471c
PB
5359 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5360 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5361 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5362 default: return 1;
5363 }
5364 break;
5365 case 16: case 19: /* VCGT #0, VCLE #0 */
5366 gen_op_movl_T1_im(0);
5367 switch(size) {
ad69471c
PB
5368 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5369 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5370 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5371 default: return 1;
5372 }
5373 if (op == 19)
5374 gen_op_notl_T0();
5375 break;
5376 case 17: case 20: /* VCGE #0, VCLT #0 */
5377 gen_op_movl_T1_im(0);
5378 switch(size) {
ad69471c
PB
5379 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5380 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5381 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5382 default: return 1;
5383 }
5384 if (op == 20)
5385 gen_op_notl_T0();
5386 break;
5387 case 18: /* VCEQ #0 */
5388 gen_op_movl_T1_im(0);
5389 switch(size) {
ad69471c
PB
5390 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5391 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5392 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5393 default: return 1;
5394 }
5395 break;
5396 case 22: /* VABS */
5397 switch(size) {
ad69471c
PB
5398 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5399 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5400 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5401 default: return 1;
5402 }
5403 break;
5404 case 23: /* VNEG */
5405 gen_op_movl_T1_im(0);
ad69471c
PB
5406 if (size == 3)
5407 return 1;
5408 gen_neon_rsb(size);
9ee6e8bb
PB
5409 break;
5410 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5411 gen_op_movl_T1_im(0);
ad69471c 5412 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5413 if (op == 27)
5414 gen_op_notl_T0();
5415 break;
5416 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5417 gen_op_movl_T1_im(0);
ad69471c 5418 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5419 if (op == 28)
5420 gen_op_notl_T0();
5421 break;
5422 case 26: /* Float VCEQ #0 */
5423 gen_op_movl_T1_im(0);
ad69471c 5424 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5425 break;
5426 case 30: /* Float VABS */
4373f3ce 5427 gen_vfp_abs(0);
9ee6e8bb
PB
5428 break;
5429 case 31: /* Float VNEG */
4373f3ce 5430 gen_vfp_neg(0);
9ee6e8bb
PB
5431 break;
5432 case 32: /* VSWP */
5433 NEON_GET_REG(T1, rd, pass);
5434 NEON_SET_REG(T1, rm, pass);
5435 break;
5436 case 33: /* VTRN */
5437 NEON_GET_REG(T1, rd, pass);
5438 switch (size) {
ad69471c
PB
5439 case 0: gen_helper_neon_trn_u8(); break;
5440 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5441 case 2: abort();
5442 default: return 1;
5443 }
5444 NEON_SET_REG(T1, rm, pass);
5445 break;
5446 case 56: /* Integer VRECPE */
4373f3ce 5447 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5448 break;
5449 case 57: /* Integer VRSQRTE */
4373f3ce 5450 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5451 break;
5452 case 58: /* Float VRECPE */
4373f3ce 5453 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5454 break;
5455 case 59: /* Float VRSQRTE */
4373f3ce 5456 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5457 break;
5458 case 60: /* VCVT.F32.S32 */
4373f3ce 5459 gen_vfp_tosiz(0);
9ee6e8bb
PB
5460 break;
5461 case 61: /* VCVT.F32.U32 */
4373f3ce 5462 gen_vfp_touiz(0);
9ee6e8bb
PB
5463 break;
5464 case 62: /* VCVT.S32.F32 */
4373f3ce 5465 gen_vfp_sito(0);
9ee6e8bb
PB
5466 break;
5467 case 63: /* VCVT.U32.F32 */
4373f3ce 5468 gen_vfp_uito(0);
9ee6e8bb
PB
5469 break;
5470 default:
5471 /* Reserved: 21, 29, 39-56 */
5472 return 1;
5473 }
5474 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5475 tcg_gen_st_f32(cpu_F0s, cpu_env,
5476 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5477 } else {
5478 NEON_SET_REG(T0, rd, pass);
5479 }
5480 }
5481 break;
5482 }
5483 } else if ((insn & (1 << 10)) == 0) {
5484 /* VTBL, VTBX. */
3018f259 5485 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5486 if (insn & (1 << 6)) {
8f8e3aa4 5487 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5488 } else {
8f8e3aa4
PB
5489 tmp = new_tmp();
5490 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5491 }
8f8e3aa4
PB
5492 tmp2 = neon_load_reg(rm, 0);
5493 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5494 tcg_const_i32(n));
3018f259 5495 dead_tmp(tmp);
9ee6e8bb 5496 if (insn & (1 << 6)) {
8f8e3aa4 5497 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5498 } else {
8f8e3aa4
PB
5499 tmp = new_tmp();
5500 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5501 }
8f8e3aa4
PB
5502 tmp3 = neon_load_reg(rm, 1);
5503 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5504 tcg_const_i32(n));
5505 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5506 neon_store_reg(rd, 1, tmp3);
5507 dead_tmp(tmp);
9ee6e8bb
PB
5508 } else if ((insn & 0x380) == 0) {
5509 /* VDUP */
5510 if (insn & (1 << 19)) {
5511 NEON_SET_REG(T0, rm, 1);
5512 } else {
5513 NEON_SET_REG(T0, rm, 0);
5514 }
5515 if (insn & (1 << 16)) {
ad69471c 5516 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5517 } else if (insn & (1 << 17)) {
5518 if ((insn >> 18) & 1)
ad69471c 5519 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5520 else
ad69471c 5521 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5522 }
5523 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5524 NEON_SET_REG(T0, rd, pass);
5525 }
5526 } else {
5527 return 1;
5528 }
5529 }
5530 }
5531 return 0;
5532}
5533
5534static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5535{
5536 int cpnum;
5537
5538 cpnum = (insn >> 8) & 0xf;
5539 if (arm_feature(env, ARM_FEATURE_XSCALE)
5540 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5541 return 1;
5542
5543 switch (cpnum) {
5544 case 0:
5545 case 1:
5546 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5547 return disas_iwmmxt_insn(env, s, insn);
5548 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5549 return disas_dsp_insn(env, s, insn);
5550 }
5551 return 1;
5552 case 10:
5553 case 11:
5554 return disas_vfp_insn (env, s, insn);
5555 case 15:
5556 return disas_cp15_insn (env, s, insn);
5557 default:
5558 /* Unknown coprocessor. See if the board has hooked it. */
5559 return disas_cp_insn (env, s, insn);
5560 }
5561}
5562
5e3f878a
PB
5563
5564/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5565static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5566{
5567 TCGv tmp;
5568 tmp = new_tmp();
5569 tcg_gen_trunc_i64_i32(tmp, val);
5570 store_reg(s, rlow, tmp);
5571 tmp = new_tmp();
5572 tcg_gen_shri_i64(val, val, 32);
5573 tcg_gen_trunc_i64_i32(tmp, val);
5574 store_reg(s, rhigh, tmp);
5575}
5576
5577/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5578static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5579{
a7812ae4 5580 TCGv_i64 tmp;
5e3f878a
PB
5581 TCGv tmp2;
5582
36aa55dc 5583 /* Load value and extend to 64 bits. */
a7812ae4 5584 tmp = tcg_temp_new_i64();
5e3f878a
PB
5585 tmp2 = load_reg(s, rlow);
5586 tcg_gen_extu_i32_i64(tmp, tmp2);
5587 dead_tmp(tmp2);
5588 tcg_gen_add_i64(val, val, tmp);
5589}
5590
5591/* load and add a 64-bit value from a register pair. */
a7812ae4 5592static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5593{
a7812ae4 5594 TCGv_i64 tmp;
36aa55dc
PB
5595 TCGv tmpl;
5596 TCGv tmph;
5e3f878a
PB
5597
5598 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5599 tmpl = load_reg(s, rlow);
5600 tmph = load_reg(s, rhigh);
a7812ae4 5601 tmp = tcg_temp_new_i64();
36aa55dc
PB
5602 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5603 dead_tmp(tmpl);
5604 dead_tmp(tmph);
5e3f878a
PB
5605 tcg_gen_add_i64(val, val, tmp);
5606}
5607
5608/* Set N and Z flags from a 64-bit value. */
a7812ae4 5609static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5610{
5611 TCGv tmp = new_tmp();
5612 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5613 gen_logic_CC(tmp);
5614 dead_tmp(tmp);
5e3f878a
PB
5615}
5616
9ee6e8bb
PB
5617static void disas_arm_insn(CPUState * env, DisasContext *s)
5618{
5619 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5620 TCGv tmp;
3670669c 5621 TCGv tmp2;
6ddbc6e4 5622 TCGv tmp3;
b0109805 5623 TCGv addr;
a7812ae4 5624 TCGv_i64 tmp64;
9ee6e8bb
PB
5625
5626 insn = ldl_code(s->pc);
5627 s->pc += 4;
5628
5629 /* M variants do not implement ARM mode. */
5630 if (IS_M(env))
5631 goto illegal_op;
5632 cond = insn >> 28;
5633 if (cond == 0xf){
5634 /* Unconditional instructions. */
5635 if (((insn >> 25) & 7) == 1) {
5636 /* NEON Data processing. */
5637 if (!arm_feature(env, ARM_FEATURE_NEON))
5638 goto illegal_op;
5639
5640 if (disas_neon_data_insn(env, s, insn))
5641 goto illegal_op;
5642 return;
5643 }
5644 if ((insn & 0x0f100000) == 0x04000000) {
5645 /* NEON load/store. */
5646 if (!arm_feature(env, ARM_FEATURE_NEON))
5647 goto illegal_op;
5648
5649 if (disas_neon_ls_insn(env, s, insn))
5650 goto illegal_op;
5651 return;
5652 }
5653 if ((insn & 0x0d70f000) == 0x0550f000)
5654 return; /* PLD */
5655 else if ((insn & 0x0ffffdff) == 0x01010000) {
5656 ARCH(6);
5657 /* setend */
5658 if (insn & (1 << 9)) {
5659 /* BE8 mode not implemented. */
5660 goto illegal_op;
5661 }
5662 return;
5663 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5664 switch ((insn >> 4) & 0xf) {
5665 case 1: /* clrex */
5666 ARCH(6K);
8f8e3aa4 5667 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5668 return;
5669 case 4: /* dsb */
5670 case 5: /* dmb */
5671 case 6: /* isb */
5672 ARCH(7);
5673 /* We don't emulate caches so these are a no-op. */
5674 return;
5675 default:
5676 goto illegal_op;
5677 }
5678 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5679 /* srs */
5680 uint32_t offset;
5681 if (IS_USER(s))
5682 goto illegal_op;
5683 ARCH(6);
5684 op1 = (insn & 0x1f);
5685 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5686 addr = load_reg(s, 13);
9ee6e8bb 5687 } else {
b0109805
PB
5688 addr = new_tmp();
5689 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5690 }
5691 i = (insn >> 23) & 3;
5692 switch (i) {
5693 case 0: offset = -4; break; /* DA */
5694 case 1: offset = -8; break; /* DB */
5695 case 2: offset = 0; break; /* IA */
5696 case 3: offset = 4; break; /* IB */
5697 default: abort();
5698 }
5699 if (offset)
b0109805
PB
5700 tcg_gen_addi_i32(addr, addr, offset);
5701 tmp = load_reg(s, 14);
5702 gen_st32(tmp, addr, 0);
5703 tmp = new_tmp();
5704 gen_helper_cpsr_read(tmp);
5705 tcg_gen_addi_i32(addr, addr, 4);
5706 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5707 if (insn & (1 << 21)) {
5708 /* Base writeback. */
5709 switch (i) {
5710 case 0: offset = -8; break;
5711 case 1: offset = -4; break;
5712 case 2: offset = 4; break;
5713 case 3: offset = 0; break;
5714 default: abort();
5715 }
5716 if (offset)
b0109805 5717 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5718 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5719 gen_movl_reg_T1(s, 13);
5720 } else {
b0109805 5721 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5722 }
b0109805
PB
5723 } else {
5724 dead_tmp(addr);
9ee6e8bb
PB
5725 }
5726 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5727 /* rfe */
5728 uint32_t offset;
5729 if (IS_USER(s))
5730 goto illegal_op;
5731 ARCH(6);
5732 rn = (insn >> 16) & 0xf;
b0109805 5733 addr = load_reg(s, rn);
9ee6e8bb
PB
5734 i = (insn >> 23) & 3;
5735 switch (i) {
b0109805
PB
5736 case 0: offset = -4; break; /* DA */
5737 case 1: offset = -8; break; /* DB */
5738 case 2: offset = 0; break; /* IA */
5739 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5740 default: abort();
5741 }
5742 if (offset)
b0109805
PB
5743 tcg_gen_addi_i32(addr, addr, offset);
5744 /* Load PC into tmp and CPSR into tmp2. */
5745 tmp = gen_ld32(addr, 0);
5746 tcg_gen_addi_i32(addr, addr, 4);
5747 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5748 if (insn & (1 << 21)) {
5749 /* Base writeback. */
5750 switch (i) {
b0109805
PB
5751 case 0: offset = -8; break;
5752 case 1: offset = -4; break;
5753 case 2: offset = 4; break;
5754 case 3: offset = 0; break;
9ee6e8bb
PB
5755 default: abort();
5756 }
5757 if (offset)
b0109805
PB
5758 tcg_gen_addi_i32(addr, addr, offset);
5759 store_reg(s, rn, addr);
5760 } else {
5761 dead_tmp(addr);
9ee6e8bb 5762 }
b0109805 5763 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5764 } else if ((insn & 0x0e000000) == 0x0a000000) {
5765 /* branch link and change to thumb (blx <offset>) */
5766 int32_t offset;
5767
5768 val = (uint32_t)s->pc;
d9ba4830
PB
5769 tmp = new_tmp();
5770 tcg_gen_movi_i32(tmp, val);
5771 store_reg(s, 14, tmp);
9ee6e8bb
PB
5772 /* Sign-extend the 24-bit offset */
5773 offset = (((int32_t)insn) << 8) >> 8;
5774 /* offset * 4 + bit24 * 2 + (thumb bit) */
5775 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5776 /* pipeline offset */
5777 val += 4;
d9ba4830 5778 gen_bx_im(s, val);
9ee6e8bb
PB
5779 return;
5780 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5781 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5782 /* iWMMXt register transfer. */
5783 if (env->cp15.c15_cpar & (1 << 1))
5784 if (!disas_iwmmxt_insn(env, s, insn))
5785 return;
5786 }
5787 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5788 /* Coprocessor double register transfer. */
5789 } else if ((insn & 0x0f000010) == 0x0e000010) {
5790 /* Additional coprocessor register transfer. */
7997d92f 5791 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5792 uint32_t mask;
5793 uint32_t val;
5794 /* cps (privileged) */
5795 if (IS_USER(s))
5796 return;
5797 mask = val = 0;
5798 if (insn & (1 << 19)) {
5799 if (insn & (1 << 8))
5800 mask |= CPSR_A;
5801 if (insn & (1 << 7))
5802 mask |= CPSR_I;
5803 if (insn & (1 << 6))
5804 mask |= CPSR_F;
5805 if (insn & (1 << 18))
5806 val |= mask;
5807 }
7997d92f 5808 if (insn & (1 << 17)) {
9ee6e8bb
PB
5809 mask |= CPSR_M;
5810 val |= (insn & 0x1f);
5811 }
5812 if (mask) {
5813 gen_op_movl_T0_im(val);
5814 gen_set_psr_T0(s, mask, 0);
5815 }
5816 return;
5817 }
5818 goto illegal_op;
5819 }
5820 if (cond != 0xe) {
5821 /* if not always execute, we generate a conditional jump to
5822 next instruction */
5823 s->condlabel = gen_new_label();
d9ba4830 5824 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5825 s->condjmp = 1;
5826 }
5827 if ((insn & 0x0f900000) == 0x03000000) {
5828 if ((insn & (1 << 21)) == 0) {
5829 ARCH(6T2);
5830 rd = (insn >> 12) & 0xf;
5831 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5832 if ((insn & (1 << 22)) == 0) {
5833 /* MOVW */
5e3f878a
PB
5834 tmp = new_tmp();
5835 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5836 } else {
5837 /* MOVT */
5e3f878a 5838 tmp = load_reg(s, rd);
86831435 5839 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5840 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5841 }
5e3f878a 5842 store_reg(s, rd, tmp);
9ee6e8bb
PB
5843 } else {
5844 if (((insn >> 12) & 0xf) != 0xf)
5845 goto illegal_op;
5846 if (((insn >> 16) & 0xf) == 0) {
5847 gen_nop_hint(s, insn & 0xff);
5848 } else {
5849 /* CPSR = immediate */
5850 val = insn & 0xff;
5851 shift = ((insn >> 8) & 0xf) * 2;
5852 if (shift)
5853 val = (val >> shift) | (val << (32 - shift));
5854 gen_op_movl_T0_im(val);
5855 i = ((insn & (1 << 22)) != 0);
5856 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5857 goto illegal_op;
5858 }
5859 }
5860 } else if ((insn & 0x0f900000) == 0x01000000
5861 && (insn & 0x00000090) != 0x00000090) {
5862 /* miscellaneous instructions */
5863 op1 = (insn >> 21) & 3;
5864 sh = (insn >> 4) & 0xf;
5865 rm = insn & 0xf;
5866 switch (sh) {
5867 case 0x0: /* move program status register */
5868 if (op1 & 1) {
5869 /* PSR = reg */
5870 gen_movl_T0_reg(s, rm);
5871 i = ((op1 & 2) != 0);
5872 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5873 goto illegal_op;
5874 } else {
5875 /* reg = PSR */
5876 rd = (insn >> 12) & 0xf;
5877 if (op1 & 2) {
5878 if (IS_USER(s))
5879 goto illegal_op;
d9ba4830 5880 tmp = load_cpu_field(spsr);
9ee6e8bb 5881 } else {
d9ba4830
PB
5882 tmp = new_tmp();
5883 gen_helper_cpsr_read(tmp);
9ee6e8bb 5884 }
d9ba4830 5885 store_reg(s, rd, tmp);
9ee6e8bb
PB
5886 }
5887 break;
5888 case 0x1:
5889 if (op1 == 1) {
5890 /* branch/exchange thumb (bx). */
d9ba4830
PB
5891 tmp = load_reg(s, rm);
5892 gen_bx(s, tmp);
9ee6e8bb
PB
5893 } else if (op1 == 3) {
5894 /* clz */
5895 rd = (insn >> 12) & 0xf;
1497c961
PB
5896 tmp = load_reg(s, rm);
5897 gen_helper_clz(tmp, tmp);
5898 store_reg(s, rd, tmp);
9ee6e8bb
PB
5899 } else {
5900 goto illegal_op;
5901 }
5902 break;
5903 case 0x2:
5904 if (op1 == 1) {
5905 ARCH(5J); /* bxj */
5906 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5907 tmp = load_reg(s, rm);
5908 gen_bx(s, tmp);
9ee6e8bb
PB
5909 } else {
5910 goto illegal_op;
5911 }
5912 break;
5913 case 0x3:
5914 if (op1 != 1)
5915 goto illegal_op;
5916
5917 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5918 tmp = load_reg(s, rm);
5919 tmp2 = new_tmp();
5920 tcg_gen_movi_i32(tmp2, s->pc);
5921 store_reg(s, 14, tmp2);
5922 gen_bx(s, tmp);
9ee6e8bb
PB
5923 break;
5924 case 0x5: /* saturating add/subtract */
5925 rd = (insn >> 12) & 0xf;
5926 rn = (insn >> 16) & 0xf;
b40d0353 5927 tmp = load_reg(s, rm);
5e3f878a 5928 tmp2 = load_reg(s, rn);
9ee6e8bb 5929 if (op1 & 2)
5e3f878a 5930 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 5931 if (op1 & 1)
5e3f878a 5932 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 5933 else
5e3f878a
PB
5934 gen_helper_add_saturate(tmp, tmp, tmp2);
5935 dead_tmp(tmp2);
5936 store_reg(s, rd, tmp);
9ee6e8bb
PB
5937 break;
5938 case 7: /* bkpt */
5939 gen_set_condexec(s);
5e3f878a 5940 gen_set_pc_im(s->pc - 4);
d9ba4830 5941 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5942 s->is_jmp = DISAS_JUMP;
5943 break;
5944 case 0x8: /* signed multiply */
5945 case 0xa:
5946 case 0xc:
5947 case 0xe:
5948 rs = (insn >> 8) & 0xf;
5949 rn = (insn >> 12) & 0xf;
5950 rd = (insn >> 16) & 0xf;
5951 if (op1 == 1) {
5952 /* (32 * 16) >> 16 */
5e3f878a
PB
5953 tmp = load_reg(s, rm);
5954 tmp2 = load_reg(s, rs);
9ee6e8bb 5955 if (sh & 4)
5e3f878a 5956 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 5957 else
5e3f878a 5958 gen_sxth(tmp2);
a7812ae4
PB
5959 tmp64 = gen_muls_i64_i32(tmp, tmp2);
5960 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 5961 tmp = new_tmp();
a7812ae4 5962 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 5963 if ((sh & 2) == 0) {
5e3f878a
PB
5964 tmp2 = load_reg(s, rn);
5965 gen_helper_add_setq(tmp, tmp, tmp2);
5966 dead_tmp(tmp2);
9ee6e8bb 5967 }
5e3f878a 5968 store_reg(s, rd, tmp);
9ee6e8bb
PB
5969 } else {
5970 /* 16 * 16 */
5e3f878a
PB
5971 tmp = load_reg(s, rm);
5972 tmp2 = load_reg(s, rs);
5973 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5974 dead_tmp(tmp2);
9ee6e8bb 5975 if (op1 == 2) {
a7812ae4
PB
5976 tmp64 = tcg_temp_new_i64();
5977 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 5978 dead_tmp(tmp);
a7812ae4
PB
5979 gen_addq(s, tmp64, rn, rd);
5980 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
5981 } else {
5982 if (op1 == 0) {
5e3f878a
PB
5983 tmp2 = load_reg(s, rn);
5984 gen_helper_add_setq(tmp, tmp, tmp2);
5985 dead_tmp(tmp2);
9ee6e8bb 5986 }
5e3f878a 5987 store_reg(s, rd, tmp);
9ee6e8bb
PB
5988 }
5989 }
5990 break;
5991 default:
5992 goto illegal_op;
5993 }
5994 } else if (((insn & 0x0e000000) == 0 &&
5995 (insn & 0x00000090) != 0x90) ||
5996 ((insn & 0x0e000000) == (1 << 25))) {
5997 int set_cc, logic_cc, shiftop;
5998
5999 op1 = (insn >> 21) & 0xf;
6000 set_cc = (insn >> 20) & 1;
6001 logic_cc = table_logic_cc[op1] & set_cc;
6002
6003 /* data processing instruction */
6004 if (insn & (1 << 25)) {
6005 /* immediate operand */
6006 val = insn & 0xff;
6007 shift = ((insn >> 8) & 0xf) * 2;
6008 if (shift)
6009 val = (val >> shift) | (val << (32 - shift));
6010 gen_op_movl_T1_im(val);
6011 if (logic_cc && shift)
b26eefb6 6012 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6013 } else {
6014 /* register */
6015 rm = (insn) & 0xf;
6016 gen_movl_T1_reg(s, rm);
6017 shiftop = (insn >> 5) & 3;
6018 if (!(insn & (1 << 4))) {
6019 shift = (insn >> 7) & 0x1f;
9a119ff6 6020 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6021 } else {
6022 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6023 tmp = load_reg(s, rs);
6024 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6025 }
6026 }
6027 if (op1 != 0x0f && op1 != 0x0d) {
6028 rn = (insn >> 16) & 0xf;
6029 gen_movl_T0_reg(s, rn);
6030 }
6031 rd = (insn >> 12) & 0xf;
6032 switch(op1) {
6033 case 0x00:
6034 gen_op_andl_T0_T1();
6035 gen_movl_reg_T0(s, rd);
6036 if (logic_cc)
6037 gen_op_logic_T0_cc();
6038 break;
6039 case 0x01:
6040 gen_op_xorl_T0_T1();
6041 gen_movl_reg_T0(s, rd);
6042 if (logic_cc)
6043 gen_op_logic_T0_cc();
6044 break;
6045 case 0x02:
6046 if (set_cc && rd == 15) {
6047 /* SUBS r15, ... is used for exception return. */
6048 if (IS_USER(s))
6049 goto illegal_op;
6050 gen_op_subl_T0_T1_cc();
6051 gen_exception_return(s);
6052 } else {
6053 if (set_cc)
6054 gen_op_subl_T0_T1_cc();
6055 else
6056 gen_op_subl_T0_T1();
6057 gen_movl_reg_T0(s, rd);
6058 }
6059 break;
6060 case 0x03:
6061 if (set_cc)
6062 gen_op_rsbl_T0_T1_cc();
6063 else
6064 gen_op_rsbl_T0_T1();
6065 gen_movl_reg_T0(s, rd);
6066 break;
6067 case 0x04:
6068 if (set_cc)
6069 gen_op_addl_T0_T1_cc();
6070 else
6071 gen_op_addl_T0_T1();
6072 gen_movl_reg_T0(s, rd);
6073 break;
6074 case 0x05:
6075 if (set_cc)
6076 gen_op_adcl_T0_T1_cc();
6077 else
b26eefb6 6078 gen_adc_T0_T1();
9ee6e8bb
PB
6079 gen_movl_reg_T0(s, rd);
6080 break;
6081 case 0x06:
6082 if (set_cc)
6083 gen_op_sbcl_T0_T1_cc();
6084 else
3670669c 6085 gen_sbc_T0_T1();
9ee6e8bb
PB
6086 gen_movl_reg_T0(s, rd);
6087 break;
6088 case 0x07:
6089 if (set_cc)
6090 gen_op_rscl_T0_T1_cc();
6091 else
3670669c 6092 gen_rsc_T0_T1();
9ee6e8bb
PB
6093 gen_movl_reg_T0(s, rd);
6094 break;
6095 case 0x08:
6096 if (set_cc) {
6097 gen_op_andl_T0_T1();
6098 gen_op_logic_T0_cc();
6099 }
6100 break;
6101 case 0x09:
6102 if (set_cc) {
6103 gen_op_xorl_T0_T1();
6104 gen_op_logic_T0_cc();
6105 }
6106 break;
6107 case 0x0a:
6108 if (set_cc) {
6109 gen_op_subl_T0_T1_cc();
6110 }
6111 break;
6112 case 0x0b:
6113 if (set_cc) {
6114 gen_op_addl_T0_T1_cc();
6115 }
6116 break;
6117 case 0x0c:
6118 gen_op_orl_T0_T1();
6119 gen_movl_reg_T0(s, rd);
6120 if (logic_cc)
6121 gen_op_logic_T0_cc();
6122 break;
6123 case 0x0d:
6124 if (logic_cc && rd == 15) {
6125 /* MOVS r15, ... is used for exception return. */
6126 if (IS_USER(s))
6127 goto illegal_op;
6128 gen_op_movl_T0_T1();
6129 gen_exception_return(s);
6130 } else {
6131 gen_movl_reg_T1(s, rd);
6132 if (logic_cc)
6133 gen_op_logic_T1_cc();
6134 }
6135 break;
6136 case 0x0e:
6137 gen_op_bicl_T0_T1();
6138 gen_movl_reg_T0(s, rd);
6139 if (logic_cc)
6140 gen_op_logic_T0_cc();
6141 break;
6142 default:
6143 case 0x0f:
6144 gen_op_notl_T1();
6145 gen_movl_reg_T1(s, rd);
6146 if (logic_cc)
6147 gen_op_logic_T1_cc();
6148 break;
6149 }
6150 } else {
6151 /* other instructions */
6152 op1 = (insn >> 24) & 0xf;
6153 switch(op1) {
6154 case 0x0:
6155 case 0x1:
6156 /* multiplies, extra load/stores */
6157 sh = (insn >> 5) & 3;
6158 if (sh == 0) {
6159 if (op1 == 0x0) {
6160 rd = (insn >> 16) & 0xf;
6161 rn = (insn >> 12) & 0xf;
6162 rs = (insn >> 8) & 0xf;
6163 rm = (insn) & 0xf;
6164 op1 = (insn >> 20) & 0xf;
6165 switch (op1) {
6166 case 0: case 1: case 2: case 3: case 6:
6167 /* 32 bit mul */
5e3f878a
PB
6168 tmp = load_reg(s, rs);
6169 tmp2 = load_reg(s, rm);
6170 tcg_gen_mul_i32(tmp, tmp, tmp2);
6171 dead_tmp(tmp2);
9ee6e8bb
PB
6172 if (insn & (1 << 22)) {
6173 /* Subtract (mls) */
6174 ARCH(6T2);
5e3f878a
PB
6175 tmp2 = load_reg(s, rn);
6176 tcg_gen_sub_i32(tmp, tmp2, tmp);
6177 dead_tmp(tmp2);
9ee6e8bb
PB
6178 } else if (insn & (1 << 21)) {
6179 /* Add */
5e3f878a
PB
6180 tmp2 = load_reg(s, rn);
6181 tcg_gen_add_i32(tmp, tmp, tmp2);
6182 dead_tmp(tmp2);
9ee6e8bb
PB
6183 }
6184 if (insn & (1 << 20))
5e3f878a
PB
6185 gen_logic_CC(tmp);
6186 store_reg(s, rd, tmp);
9ee6e8bb
PB
6187 break;
6188 default:
6189 /* 64 bit mul */
5e3f878a
PB
6190 tmp = load_reg(s, rs);
6191 tmp2 = load_reg(s, rm);
9ee6e8bb 6192 if (insn & (1 << 22))
a7812ae4 6193 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6194 else
a7812ae4 6195 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6196 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6197 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6198 if (!(insn & (1 << 23))) { /* double accumulate */
6199 ARCH(6);
a7812ae4
PB
6200 gen_addq_lo(s, tmp64, rn);
6201 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6202 }
6203 if (insn & (1 << 20))
a7812ae4
PB
6204 gen_logicq_cc(tmp64);
6205 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6206 break;
6207 }
6208 } else {
6209 rn = (insn >> 16) & 0xf;
6210 rd = (insn >> 12) & 0xf;
6211 if (insn & (1 << 23)) {
6212 /* load/store exclusive */
86753403
PB
6213 op1 = (insn >> 21) & 0x3;
6214 if (op1)
a47f43d2 6215 ARCH(6K);
86753403
PB
6216 else
6217 ARCH(6);
9ee6e8bb 6218 gen_movl_T1_reg(s, rn);
72f1c62f 6219 addr = cpu_T[1];
9ee6e8bb 6220 if (insn & (1 << 20)) {
8f8e3aa4 6221 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
86753403
PB
6222 switch (op1) {
6223 case 0: /* ldrex */
6224 tmp = gen_ld32(addr, IS_USER(s));
6225 break;
6226 case 1: /* ldrexd */
6227 tmp = gen_ld32(addr, IS_USER(s));
6228 store_reg(s, rd, tmp);
6229 tcg_gen_addi_i32(addr, addr, 4);
6230 tmp = gen_ld32(addr, IS_USER(s));
6231 rd++;
6232 break;
6233 case 2: /* ldrexb */
6234 tmp = gen_ld8u(addr, IS_USER(s));
6235 break;
6236 case 3: /* ldrexh */
6237 tmp = gen_ld16u(addr, IS_USER(s));
6238 break;
6239 default:
6240 abort();
6241 }
8f8e3aa4 6242 store_reg(s, rd, tmp);
9ee6e8bb 6243 } else {
8f8e3aa4 6244 int label = gen_new_label();
9ee6e8bb 6245 rm = insn & 0xf;
8f8e3aa4 6246 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6247 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6248 0, label);
8f8e3aa4 6249 tmp = load_reg(s,rm);
86753403
PB
6250 switch (op1) {
6251 case 0: /* strex */
6252 gen_st32(tmp, addr, IS_USER(s));
6253 break;
6254 case 1: /* strexd */
6255 gen_st32(tmp, addr, IS_USER(s));
6256 tcg_gen_addi_i32(addr, addr, 4);
6257 tmp = load_reg(s, rm + 1);
6258 gen_st32(tmp, addr, IS_USER(s));
6259 break;
6260 case 2: /* strexb */
6261 gen_st8(tmp, addr, IS_USER(s));
6262 break;
6263 case 3: /* strexh */
6264 gen_st16(tmp, addr, IS_USER(s));
6265 break;
6266 default:
6267 abort();
6268 }
2637a3be 6269 gen_set_label(label);
8f8e3aa4 6270 gen_movl_reg_T0(s, rd);
9ee6e8bb 6271 }
9ee6e8bb
PB
6272 } else {
6273 /* SWP instruction */
6274 rm = (insn) & 0xf;
6275
8984bd2e
PB
6276 /* ??? This is not really atomic. However we know
6277 we never have multiple CPUs running in parallel,
6278 so it is good enough. */
6279 addr = load_reg(s, rn);
6280 tmp = load_reg(s, rm);
9ee6e8bb 6281 if (insn & (1 << 22)) {
8984bd2e
PB
6282 tmp2 = gen_ld8u(addr, IS_USER(s));
6283 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6284 } else {
8984bd2e
PB
6285 tmp2 = gen_ld32(addr, IS_USER(s));
6286 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6287 }
8984bd2e
PB
6288 dead_tmp(addr);
6289 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6290 }
6291 }
6292 } else {
6293 int address_offset;
6294 int load;
6295 /* Misc load/store */
6296 rn = (insn >> 16) & 0xf;
6297 rd = (insn >> 12) & 0xf;
b0109805 6298 addr = load_reg(s, rn);
9ee6e8bb 6299 if (insn & (1 << 24))
b0109805 6300 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6301 address_offset = 0;
6302 if (insn & (1 << 20)) {
6303 /* load */
6304 switch(sh) {
6305 case 1:
b0109805 6306 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6307 break;
6308 case 2:
b0109805 6309 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6310 break;
6311 default:
6312 case 3:
b0109805 6313 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6314 break;
6315 }
6316 load = 1;
6317 } else if (sh & 2) {
6318 /* doubleword */
6319 if (sh & 1) {
6320 /* store */
b0109805
PB
6321 tmp = load_reg(s, rd);
6322 gen_st32(tmp, addr, IS_USER(s));
6323 tcg_gen_addi_i32(addr, addr, 4);
6324 tmp = load_reg(s, rd + 1);
6325 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6326 load = 0;
6327 } else {
6328 /* load */
b0109805
PB
6329 tmp = gen_ld32(addr, IS_USER(s));
6330 store_reg(s, rd, tmp);
6331 tcg_gen_addi_i32(addr, addr, 4);
6332 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6333 rd++;
6334 load = 1;
6335 }
6336 address_offset = -4;
6337 } else {
6338 /* store */
b0109805
PB
6339 tmp = load_reg(s, rd);
6340 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6341 load = 0;
6342 }
6343 /* Perform base writeback before the loaded value to
6344 ensure correct behavior with overlapping index registers.
6345 ldrd with base writeback is is undefined if the
6346 destination and index registers overlap. */
6347 if (!(insn & (1 << 24))) {
b0109805
PB
6348 gen_add_datah_offset(s, insn, address_offset, addr);
6349 store_reg(s, rn, addr);
9ee6e8bb
PB
6350 } else if (insn & (1 << 21)) {
6351 if (address_offset)
b0109805
PB
6352 tcg_gen_addi_i32(addr, addr, address_offset);
6353 store_reg(s, rn, addr);
6354 } else {
6355 dead_tmp(addr);
9ee6e8bb
PB
6356 }
6357 if (load) {
6358 /* Complete the load. */
b0109805 6359 store_reg(s, rd, tmp);
9ee6e8bb
PB
6360 }
6361 }
6362 break;
6363 case 0x4:
6364 case 0x5:
6365 goto do_ldst;
6366 case 0x6:
6367 case 0x7:
6368 if (insn & (1 << 4)) {
6369 ARCH(6);
6370 /* Armv6 Media instructions. */
6371 rm = insn & 0xf;
6372 rn = (insn >> 16) & 0xf;
2c0262af 6373 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6374 rs = (insn >> 8) & 0xf;
6375 switch ((insn >> 23) & 3) {
6376 case 0: /* Parallel add/subtract. */
6377 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6378 tmp = load_reg(s, rn);
6379 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6380 sh = (insn >> 5) & 7;
6381 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6382 goto illegal_op;
6ddbc6e4
PB
6383 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6384 dead_tmp(tmp2);
6385 store_reg(s, rd, tmp);
9ee6e8bb
PB
6386 break;
6387 case 1:
6388 if ((insn & 0x00700020) == 0) {
6c95676b 6389 /* Halfword pack. */
3670669c
PB
6390 tmp = load_reg(s, rn);
6391 tmp2 = load_reg(s, rm);
9ee6e8bb 6392 shift = (insn >> 7) & 0x1f;
3670669c
PB
6393 if (insn & (1 << 6)) {
6394 /* pkhtb */
22478e79
AZ
6395 if (shift == 0)
6396 shift = 31;
6397 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6398 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6399 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6400 } else {
6401 /* pkhbt */
22478e79
AZ
6402 if (shift)
6403 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6404 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6405 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6406 }
6407 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6408 dead_tmp(tmp2);
3670669c 6409 store_reg(s, rd, tmp);
9ee6e8bb
PB
6410 } else if ((insn & 0x00200020) == 0x00200000) {
6411 /* [us]sat */
6ddbc6e4 6412 tmp = load_reg(s, rm);
9ee6e8bb
PB
6413 shift = (insn >> 7) & 0x1f;
6414 if (insn & (1 << 6)) {
6415 if (shift == 0)
6416 shift = 31;
6ddbc6e4 6417 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6418 } else {
6ddbc6e4 6419 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6420 }
6421 sh = (insn >> 16) & 0x1f;
6422 if (sh != 0) {
6423 if (insn & (1 << 22))
6ddbc6e4 6424 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6425 else
6ddbc6e4 6426 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6427 }
6ddbc6e4 6428 store_reg(s, rd, tmp);
9ee6e8bb
PB
6429 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6430 /* [us]sat16 */
6ddbc6e4 6431 tmp = load_reg(s, rm);
9ee6e8bb
PB
6432 sh = (insn >> 16) & 0x1f;
6433 if (sh != 0) {
6434 if (insn & (1 << 22))
6ddbc6e4 6435 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6436 else
6ddbc6e4 6437 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6438 }
6ddbc6e4 6439 store_reg(s, rd, tmp);
9ee6e8bb
PB
6440 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6441 /* Select bytes. */
6ddbc6e4
PB
6442 tmp = load_reg(s, rn);
6443 tmp2 = load_reg(s, rm);
6444 tmp3 = new_tmp();
6445 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6446 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6447 dead_tmp(tmp3);
6448 dead_tmp(tmp2);
6449 store_reg(s, rd, tmp);
9ee6e8bb 6450 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6451 tmp = load_reg(s, rm);
9ee6e8bb
PB
6452 shift = (insn >> 10) & 3;
6453 /* ??? In many cases it's not neccessary to do a
6454 rotate, a shift is sufficient. */
6455 if (shift != 0)
5e3f878a 6456 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6457 op1 = (insn >> 20) & 7;
6458 switch (op1) {
5e3f878a
PB
6459 case 0: gen_sxtb16(tmp); break;
6460 case 2: gen_sxtb(tmp); break;
6461 case 3: gen_sxth(tmp); break;
6462 case 4: gen_uxtb16(tmp); break;
6463 case 6: gen_uxtb(tmp); break;
6464 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6465 default: goto illegal_op;
6466 }
6467 if (rn != 15) {
5e3f878a 6468 tmp2 = load_reg(s, rn);
9ee6e8bb 6469 if ((op1 & 3) == 0) {
5e3f878a 6470 gen_add16(tmp, tmp2);
9ee6e8bb 6471 } else {
5e3f878a
PB
6472 tcg_gen_add_i32(tmp, tmp, tmp2);
6473 dead_tmp(tmp2);
9ee6e8bb
PB
6474 }
6475 }
6c95676b 6476 store_reg(s, rd, tmp);
9ee6e8bb
PB
6477 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6478 /* rev */
b0109805 6479 tmp = load_reg(s, rm);
9ee6e8bb
PB
6480 if (insn & (1 << 22)) {
6481 if (insn & (1 << 7)) {
b0109805 6482 gen_revsh(tmp);
9ee6e8bb
PB
6483 } else {
6484 ARCH(6T2);
b0109805 6485 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6486 }
6487 } else {
6488 if (insn & (1 << 7))
b0109805 6489 gen_rev16(tmp);
9ee6e8bb 6490 else
b0109805 6491 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6492 }
b0109805 6493 store_reg(s, rd, tmp);
9ee6e8bb
PB
6494 } else {
6495 goto illegal_op;
6496 }
6497 break;
6498 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6499 tmp = load_reg(s, rm);
6500 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6501 if (insn & (1 << 20)) {
6502 /* Signed multiply most significant [accumulate]. */
a7812ae4 6503 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6504 if (insn & (1 << 5))
a7812ae4
PB
6505 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6506 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6507 tmp = new_tmp();
a7812ae4 6508 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6509 if (rn != 15) {
5e3f878a 6510 tmp2 = load_reg(s, rn);
9ee6e8bb 6511 if (insn & (1 << 6)) {
5e3f878a 6512 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6513 } else {
5e3f878a 6514 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6515 }
5e3f878a 6516 dead_tmp(tmp2);
9ee6e8bb 6517 }
5e3f878a 6518 store_reg(s, rd, tmp);
9ee6e8bb
PB
6519 } else {
6520 if (insn & (1 << 5))
5e3f878a
PB
6521 gen_swap_half(tmp2);
6522 gen_smul_dual(tmp, tmp2);
6523 /* This addition cannot overflow. */
6524 if (insn & (1 << 6)) {
6525 tcg_gen_sub_i32(tmp, tmp, tmp2);
6526 } else {
6527 tcg_gen_add_i32(tmp, tmp, tmp2);
6528 }
6529 dead_tmp(tmp2);
9ee6e8bb 6530 if (insn & (1 << 22)) {
5e3f878a 6531 /* smlald, smlsld */
a7812ae4
PB
6532 tmp64 = tcg_temp_new_i64();
6533 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6534 dead_tmp(tmp);
a7812ae4
PB
6535 gen_addq(s, tmp64, rd, rn);
6536 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6537 } else {
5e3f878a 6538 /* smuad, smusd, smlad, smlsd */
22478e79 6539 if (rd != 15)
9ee6e8bb 6540 {
22478e79 6541 tmp2 = load_reg(s, rd);
5e3f878a
PB
6542 gen_helper_add_setq(tmp, tmp, tmp2);
6543 dead_tmp(tmp2);
9ee6e8bb 6544 }
22478e79 6545 store_reg(s, rn, tmp);
9ee6e8bb
PB
6546 }
6547 }
6548 break;
6549 case 3:
6550 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6551 switch (op1) {
6552 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6553 ARCH(6);
6554 tmp = load_reg(s, rm);
6555 tmp2 = load_reg(s, rs);
6556 gen_helper_usad8(tmp, tmp, tmp2);
6557 dead_tmp(tmp2);
9ee6e8bb 6558 if (rn != 15) {
6ddbc6e4
PB
6559 tmp2 = load_reg(s, rn);
6560 tcg_gen_add_i32(tmp, tmp, tmp2);
6561 dead_tmp(tmp2);
9ee6e8bb 6562 }
6ddbc6e4 6563 store_reg(s, rd, tmp);
9ee6e8bb
PB
6564 break;
6565 case 0x20: case 0x24: case 0x28: case 0x2c:
6566 /* Bitfield insert/clear. */
6567 ARCH(6T2);
6568 shift = (insn >> 7) & 0x1f;
6569 i = (insn >> 16) & 0x1f;
6570 i = i + 1 - shift;
6571 if (rm == 15) {
5e3f878a
PB
6572 tmp = new_tmp();
6573 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6574 } else {
5e3f878a 6575 tmp = load_reg(s, rm);
9ee6e8bb
PB
6576 }
6577 if (i != 32) {
5e3f878a 6578 tmp2 = load_reg(s, rd);
8f8e3aa4 6579 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6580 dead_tmp(tmp2);
9ee6e8bb 6581 }
5e3f878a 6582 store_reg(s, rd, tmp);
9ee6e8bb
PB
6583 break;
6584 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6585 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5e3f878a 6586 tmp = load_reg(s, rm);
9ee6e8bb
PB
6587 shift = (insn >> 7) & 0x1f;
6588 i = ((insn >> 16) & 0x1f) + 1;
6589 if (shift + i > 32)
6590 goto illegal_op;
6591 if (i < 32) {
6592 if (op1 & 0x20) {
5e3f878a 6593 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6594 } else {
5e3f878a 6595 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6596 }
6597 }
5e3f878a 6598 store_reg(s, rd, tmp);
9ee6e8bb
PB
6599 break;
6600 default:
6601 goto illegal_op;
6602 }
6603 break;
6604 }
6605 break;
6606 }
6607 do_ldst:
6608 /* Check for undefined extension instructions
6609 * per the ARM Bible IE:
6610 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6611 */
6612 sh = (0xf << 20) | (0xf << 4);
6613 if (op1 == 0x7 && ((insn & sh) == sh))
6614 {
6615 goto illegal_op;
6616 }
6617 /* load/store byte/word */
6618 rn = (insn >> 16) & 0xf;
6619 rd = (insn >> 12) & 0xf;
b0109805 6620 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6621 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6622 if (insn & (1 << 24))
b0109805 6623 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6624 if (insn & (1 << 20)) {
6625 /* load */
9ee6e8bb 6626 if (insn & (1 << 22)) {
b0109805 6627 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6628 } else {
b0109805 6629 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6630 }
9ee6e8bb
PB
6631 } else {
6632 /* store */
b0109805 6633 tmp = load_reg(s, rd);
9ee6e8bb 6634 if (insn & (1 << 22))
b0109805 6635 gen_st8(tmp, tmp2, i);
9ee6e8bb 6636 else
b0109805 6637 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6638 }
6639 if (!(insn & (1 << 24))) {
b0109805
PB
6640 gen_add_data_offset(s, insn, tmp2);
6641 store_reg(s, rn, tmp2);
6642 } else if (insn & (1 << 21)) {
6643 store_reg(s, rn, tmp2);
6644 } else {
6645 dead_tmp(tmp2);
9ee6e8bb
PB
6646 }
6647 if (insn & (1 << 20)) {
6648 /* Complete the load. */
6649 if (rd == 15)
b0109805 6650 gen_bx(s, tmp);
9ee6e8bb 6651 else
b0109805 6652 store_reg(s, rd, tmp);
9ee6e8bb
PB
6653 }
6654 break;
6655 case 0x08:
6656 case 0x09:
6657 {
6658 int j, n, user, loaded_base;
b0109805 6659 TCGv loaded_var;
9ee6e8bb
PB
6660 /* load/store multiple words */
6661 /* XXX: store correct base if write back */
6662 user = 0;
6663 if (insn & (1 << 22)) {
6664 if (IS_USER(s))
6665 goto illegal_op; /* only usable in supervisor mode */
6666
6667 if ((insn & (1 << 15)) == 0)
6668 user = 1;
6669 }
6670 rn = (insn >> 16) & 0xf;
b0109805 6671 addr = load_reg(s, rn);
9ee6e8bb
PB
6672
6673 /* compute total size */
6674 loaded_base = 0;
a50f5b91 6675 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6676 n = 0;
6677 for(i=0;i<16;i++) {
6678 if (insn & (1 << i))
6679 n++;
6680 }
6681 /* XXX: test invalid n == 0 case ? */
6682 if (insn & (1 << 23)) {
6683 if (insn & (1 << 24)) {
6684 /* pre increment */
b0109805 6685 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6686 } else {
6687 /* post increment */
6688 }
6689 } else {
6690 if (insn & (1 << 24)) {
6691 /* pre decrement */
b0109805 6692 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6693 } else {
6694 /* post decrement */
6695 if (n != 1)
b0109805 6696 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6697 }
6698 }
6699 j = 0;
6700 for(i=0;i<16;i++) {
6701 if (insn & (1 << i)) {
6702 if (insn & (1 << 20)) {
6703 /* load */
b0109805 6704 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6705 if (i == 15) {
b0109805 6706 gen_bx(s, tmp);
9ee6e8bb 6707 } else if (user) {
b0109805
PB
6708 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6709 dead_tmp(tmp);
9ee6e8bb 6710 } else if (i == rn) {
b0109805 6711 loaded_var = tmp;
9ee6e8bb
PB
6712 loaded_base = 1;
6713 } else {
b0109805 6714 store_reg(s, i, tmp);
9ee6e8bb
PB
6715 }
6716 } else {
6717 /* store */
6718 if (i == 15) {
6719 /* special case: r15 = PC + 8 */
6720 val = (long)s->pc + 4;
b0109805
PB
6721 tmp = new_tmp();
6722 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6723 } else if (user) {
b0109805
PB
6724 tmp = new_tmp();
6725 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6726 } else {
b0109805 6727 tmp = load_reg(s, i);
9ee6e8bb 6728 }
b0109805 6729 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6730 }
6731 j++;
6732 /* no need to add after the last transfer */
6733 if (j != n)
b0109805 6734 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6735 }
6736 }
6737 if (insn & (1 << 21)) {
6738 /* write back */
6739 if (insn & (1 << 23)) {
6740 if (insn & (1 << 24)) {
6741 /* pre increment */
6742 } else {
6743 /* post increment */
b0109805 6744 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6745 }
6746 } else {
6747 if (insn & (1 << 24)) {
6748 /* pre decrement */
6749 if (n != 1)
b0109805 6750 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6751 } else {
6752 /* post decrement */
b0109805 6753 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6754 }
6755 }
b0109805
PB
6756 store_reg(s, rn, addr);
6757 } else {
6758 dead_tmp(addr);
9ee6e8bb
PB
6759 }
6760 if (loaded_base) {
b0109805 6761 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6762 }
6763 if ((insn & (1 << 22)) && !user) {
6764 /* Restore CPSR from SPSR. */
d9ba4830
PB
6765 tmp = load_cpu_field(spsr);
6766 gen_set_cpsr(tmp, 0xffffffff);
6767 dead_tmp(tmp);
9ee6e8bb
PB
6768 s->is_jmp = DISAS_UPDATE;
6769 }
6770 }
6771 break;
6772 case 0xa:
6773 case 0xb:
6774 {
6775 int32_t offset;
6776
6777 /* branch (and link) */
6778 val = (int32_t)s->pc;
6779 if (insn & (1 << 24)) {
5e3f878a
PB
6780 tmp = new_tmp();
6781 tcg_gen_movi_i32(tmp, val);
6782 store_reg(s, 14, tmp);
9ee6e8bb
PB
6783 }
6784 offset = (((int32_t)insn << 8) >> 8);
6785 val += (offset << 2) + 4;
6786 gen_jmp(s, val);
6787 }
6788 break;
6789 case 0xc:
6790 case 0xd:
6791 case 0xe:
6792 /* Coprocessor. */
6793 if (disas_coproc_insn(env, s, insn))
6794 goto illegal_op;
6795 break;
6796 case 0xf:
6797 /* swi */
5e3f878a 6798 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6799 s->is_jmp = DISAS_SWI;
6800 break;
6801 default:
6802 illegal_op:
6803 gen_set_condexec(s);
5e3f878a 6804 gen_set_pc_im(s->pc - 4);
d9ba4830 6805 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6806 s->is_jmp = DISAS_JUMP;
6807 break;
6808 }
6809 }
6810}
6811
6812/* Return true if this is a Thumb-2 logical op. */
6813static int
6814thumb2_logic_op(int op)
6815{
6816 return (op < 8);
6817}
6818
6819/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6820 then set condition code flags based on the result of the operation.
6821 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6822 to the high bit of T1.
6823 Returns zero if the opcode is valid. */
6824
6825static int
6826gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6827{
6828 int logic_cc;
6829
6830 logic_cc = 0;
6831 switch (op) {
6832 case 0: /* and */
6833 gen_op_andl_T0_T1();
6834 logic_cc = conds;
6835 break;
6836 case 1: /* bic */
6837 gen_op_bicl_T0_T1();
6838 logic_cc = conds;
6839 break;
6840 case 2: /* orr */
6841 gen_op_orl_T0_T1();
6842 logic_cc = conds;
6843 break;
6844 case 3: /* orn */
6845 gen_op_notl_T1();
6846 gen_op_orl_T0_T1();
6847 logic_cc = conds;
6848 break;
6849 case 4: /* eor */
6850 gen_op_xorl_T0_T1();
6851 logic_cc = conds;
6852 break;
6853 case 8: /* add */
6854 if (conds)
6855 gen_op_addl_T0_T1_cc();
6856 else
6857 gen_op_addl_T0_T1();
6858 break;
6859 case 10: /* adc */
6860 if (conds)
6861 gen_op_adcl_T0_T1_cc();
6862 else
b26eefb6 6863 gen_adc_T0_T1();
9ee6e8bb
PB
6864 break;
6865 case 11: /* sbc */
6866 if (conds)
6867 gen_op_sbcl_T0_T1_cc();
6868 else
3670669c 6869 gen_sbc_T0_T1();
9ee6e8bb
PB
6870 break;
6871 case 13: /* sub */
6872 if (conds)
6873 gen_op_subl_T0_T1_cc();
6874 else
6875 gen_op_subl_T0_T1();
6876 break;
6877 case 14: /* rsb */
6878 if (conds)
6879 gen_op_rsbl_T0_T1_cc();
6880 else
6881 gen_op_rsbl_T0_T1();
6882 break;
6883 default: /* 5, 6, 7, 9, 12, 15. */
6884 return 1;
6885 }
6886 if (logic_cc) {
6887 gen_op_logic_T0_cc();
6888 if (shifter_out)
b26eefb6 6889 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6890 }
6891 return 0;
6892}
6893
6894/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6895 is not legal. */
6896static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6897{
b0109805 6898 uint32_t insn, imm, shift, offset;
9ee6e8bb 6899 uint32_t rd, rn, rm, rs;
b26eefb6 6900 TCGv tmp;
6ddbc6e4
PB
6901 TCGv tmp2;
6902 TCGv tmp3;
b0109805 6903 TCGv addr;
a7812ae4 6904 TCGv_i64 tmp64;
9ee6e8bb
PB
6905 int op;
6906 int shiftop;
6907 int conds;
6908 int logic_cc;
6909
6910 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6911 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 6912 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
6913 16-bit instructions to get correct prefetch abort behavior. */
6914 insn = insn_hw1;
6915 if ((insn & (1 << 12)) == 0) {
6916 /* Second half of blx. */
6917 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6918 tmp = load_reg(s, 14);
6919 tcg_gen_addi_i32(tmp, tmp, offset);
6920 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6921
d9ba4830 6922 tmp2 = new_tmp();
b0109805 6923 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6924 store_reg(s, 14, tmp2);
6925 gen_bx(s, tmp);
9ee6e8bb
PB
6926 return 0;
6927 }
6928 if (insn & (1 << 11)) {
6929 /* Second half of bl. */
6930 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 6931 tmp = load_reg(s, 14);
6a0d8a1d 6932 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 6933
d9ba4830 6934 tmp2 = new_tmp();
b0109805 6935 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6936 store_reg(s, 14, tmp2);
6937 gen_bx(s, tmp);
9ee6e8bb
PB
6938 return 0;
6939 }
6940 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6941 /* Instruction spans a page boundary. Implement it as two
6942 16-bit instructions in case the second half causes an
6943 prefetch abort. */
6944 offset = ((int32_t)insn << 21) >> 9;
b0109805 6945 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6946 gen_movl_reg_T0(s, 14);
6947 return 0;
6948 }
6949 /* Fall through to 32-bit decode. */
6950 }
6951
6952 insn = lduw_code(s->pc);
6953 s->pc += 2;
6954 insn |= (uint32_t)insn_hw1 << 16;
6955
6956 if ((insn & 0xf800e800) != 0xf000e800) {
6957 ARCH(6T2);
6958 }
6959
6960 rn = (insn >> 16) & 0xf;
6961 rs = (insn >> 12) & 0xf;
6962 rd = (insn >> 8) & 0xf;
6963 rm = insn & 0xf;
6964 switch ((insn >> 25) & 0xf) {
6965 case 0: case 1: case 2: case 3:
6966 /* 16-bit instructions. Should never happen. */
6967 abort();
6968 case 4:
6969 if (insn & (1 << 22)) {
6970 /* Other load/store, table branch. */
6971 if (insn & 0x01200000) {
6972 /* Load/store doubleword. */
6973 if (rn == 15) {
b0109805
PB
6974 addr = new_tmp();
6975 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 6976 } else {
b0109805 6977 addr = load_reg(s, rn);
9ee6e8bb
PB
6978 }
6979 offset = (insn & 0xff) * 4;
6980 if ((insn & (1 << 23)) == 0)
6981 offset = -offset;
6982 if (insn & (1 << 24)) {
b0109805 6983 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
6984 offset = 0;
6985 }
6986 if (insn & (1 << 20)) {
6987 /* ldrd */
b0109805
PB
6988 tmp = gen_ld32(addr, IS_USER(s));
6989 store_reg(s, rs, tmp);
6990 tcg_gen_addi_i32(addr, addr, 4);
6991 tmp = gen_ld32(addr, IS_USER(s));
6992 store_reg(s, rd, tmp);
9ee6e8bb
PB
6993 } else {
6994 /* strd */
b0109805
PB
6995 tmp = load_reg(s, rs);
6996 gen_st32(tmp, addr, IS_USER(s));
6997 tcg_gen_addi_i32(addr, addr, 4);
6998 tmp = load_reg(s, rd);
6999 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7000 }
7001 if (insn & (1 << 21)) {
7002 /* Base writeback. */
7003 if (rn == 15)
7004 goto illegal_op;
b0109805
PB
7005 tcg_gen_addi_i32(addr, addr, offset - 4);
7006 store_reg(s, rn, addr);
7007 } else {
7008 dead_tmp(addr);
9ee6e8bb
PB
7009 }
7010 } else if ((insn & (1 << 23)) == 0) {
7011 /* Load/store exclusive word. */
2c0262af 7012 gen_movl_T1_reg(s, rn);
72f1c62f 7013 addr = cpu_T[1];
2c0262af 7014 if (insn & (1 << 20)) {
8f8e3aa4
PB
7015 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7016 tmp = gen_ld32(addr, IS_USER(s));
7017 store_reg(s, rd, tmp);
9ee6e8bb 7018 } else {
8f8e3aa4
PB
7019 int label = gen_new_label();
7020 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7021 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7022 0, label);
8f8e3aa4
PB
7023 tmp = load_reg(s, rs);
7024 gen_st32(tmp, cpu_T[1], IS_USER(s));
7025 gen_set_label(label);
7026 gen_movl_reg_T0(s, rd);
9ee6e8bb 7027 }
9ee6e8bb
PB
7028 } else if ((insn & (1 << 6)) == 0) {
7029 /* Table Branch. */
7030 if (rn == 15) {
b0109805
PB
7031 addr = new_tmp();
7032 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7033 } else {
b0109805 7034 addr = load_reg(s, rn);
9ee6e8bb 7035 }
b26eefb6 7036 tmp = load_reg(s, rm);
b0109805 7037 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7038 if (insn & (1 << 4)) {
7039 /* tbh */
b0109805 7040 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7041 dead_tmp(tmp);
b0109805 7042 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7043 } else { /* tbb */
b26eefb6 7044 dead_tmp(tmp);
b0109805 7045 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7046 }
b0109805
PB
7047 dead_tmp(addr);
7048 tcg_gen_shli_i32(tmp, tmp, 1);
7049 tcg_gen_addi_i32(tmp, tmp, s->pc);
7050 store_reg(s, 15, tmp);
9ee6e8bb
PB
7051 } else {
7052 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7053 /* ??? These are not really atomic. However we know
7054 we never have multiple CPUs running in parallel,
7055 so it is good enough. */
9ee6e8bb 7056 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7057 /* Must use a global reg for the address because we have
7058 a conditional branch in the store instruction. */
9ee6e8bb 7059 gen_movl_T1_reg(s, rn);
8f8e3aa4 7060 addr = cpu_T[1];
9ee6e8bb 7061 if (insn & (1 << 20)) {
8f8e3aa4 7062 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7063 switch (op) {
7064 case 0:
8f8e3aa4 7065 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7066 break;
2c0262af 7067 case 1:
8f8e3aa4 7068 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7069 break;
9ee6e8bb 7070 case 3:
8f8e3aa4
PB
7071 tmp = gen_ld32(addr, IS_USER(s));
7072 tcg_gen_addi_i32(addr, addr, 4);
7073 tmp2 = gen_ld32(addr, IS_USER(s));
7074 store_reg(s, rd, tmp2);
2c0262af
FB
7075 break;
7076 default:
9ee6e8bb
PB
7077 goto illegal_op;
7078 }
8f8e3aa4 7079 store_reg(s, rs, tmp);
9ee6e8bb 7080 } else {
8f8e3aa4
PB
7081 int label = gen_new_label();
7082 /* Must use a global that is not killed by the branch. */
7083 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7084 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7085 tmp = load_reg(s, rs);
9ee6e8bb
PB
7086 switch (op) {
7087 case 0:
8f8e3aa4 7088 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7089 break;
7090 case 1:
8f8e3aa4 7091 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7092 break;
2c0262af 7093 case 3:
8f8e3aa4
PB
7094 gen_st32(tmp, addr, IS_USER(s));
7095 tcg_gen_addi_i32(addr, addr, 4);
7096 tmp = load_reg(s, rd);
7097 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7098 break;
9ee6e8bb
PB
7099 default:
7100 goto illegal_op;
2c0262af 7101 }
8f8e3aa4 7102 gen_set_label(label);
9ee6e8bb
PB
7103 gen_movl_reg_T0(s, rm);
7104 }
7105 }
7106 } else {
7107 /* Load/store multiple, RFE, SRS. */
7108 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7109 /* Not available in user mode. */
b0109805 7110 if (IS_USER(s))
9ee6e8bb
PB
7111 goto illegal_op;
7112 if (insn & (1 << 20)) {
7113 /* rfe */
b0109805
PB
7114 addr = load_reg(s, rn);
7115 if ((insn & (1 << 24)) == 0)
7116 tcg_gen_addi_i32(addr, addr, -8);
7117 /* Load PC into tmp and CPSR into tmp2. */
7118 tmp = gen_ld32(addr, 0);
7119 tcg_gen_addi_i32(addr, addr, 4);
7120 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7121 if (insn & (1 << 21)) {
7122 /* Base writeback. */
b0109805
PB
7123 if (insn & (1 << 24)) {
7124 tcg_gen_addi_i32(addr, addr, 4);
7125 } else {
7126 tcg_gen_addi_i32(addr, addr, -4);
7127 }
7128 store_reg(s, rn, addr);
7129 } else {
7130 dead_tmp(addr);
9ee6e8bb 7131 }
b0109805 7132 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7133 } else {
7134 /* srs */
7135 op = (insn & 0x1f);
7136 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7137 addr = load_reg(s, 13);
9ee6e8bb 7138 } else {
b0109805
PB
7139 addr = new_tmp();
7140 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7141 }
7142 if ((insn & (1 << 24)) == 0) {
b0109805 7143 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7144 }
b0109805
PB
7145 tmp = load_reg(s, 14);
7146 gen_st32(tmp, addr, 0);
7147 tcg_gen_addi_i32(addr, addr, 4);
7148 tmp = new_tmp();
7149 gen_helper_cpsr_read(tmp);
7150 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7151 if (insn & (1 << 21)) {
7152 if ((insn & (1 << 24)) == 0) {
b0109805 7153 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7154 } else {
b0109805 7155 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7156 }
7157 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7158 store_reg(s, 13, addr);
9ee6e8bb 7159 } else {
b0109805
PB
7160 gen_helper_set_r13_banked(cpu_env,
7161 tcg_const_i32(op), addr);
9ee6e8bb 7162 }
b0109805
PB
7163 } else {
7164 dead_tmp(addr);
9ee6e8bb
PB
7165 }
7166 }
7167 } else {
7168 int i;
7169 /* Load/store multiple. */
b0109805 7170 addr = load_reg(s, rn);
9ee6e8bb
PB
7171 offset = 0;
7172 for (i = 0; i < 16; i++) {
7173 if (insn & (1 << i))
7174 offset += 4;
7175 }
7176 if (insn & (1 << 24)) {
b0109805 7177 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7178 }
7179
7180 for (i = 0; i < 16; i++) {
7181 if ((insn & (1 << i)) == 0)
7182 continue;
7183 if (insn & (1 << 20)) {
7184 /* Load. */
b0109805 7185 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7186 if (i == 15) {
b0109805 7187 gen_bx(s, tmp);
9ee6e8bb 7188 } else {
b0109805 7189 store_reg(s, i, tmp);
9ee6e8bb
PB
7190 }
7191 } else {
7192 /* Store. */
b0109805
PB
7193 tmp = load_reg(s, i);
7194 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7195 }
b0109805 7196 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7197 }
7198 if (insn & (1 << 21)) {
7199 /* Base register writeback. */
7200 if (insn & (1 << 24)) {
b0109805 7201 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7202 }
7203 /* Fault if writeback register is in register list. */
7204 if (insn & (1 << rn))
7205 goto illegal_op;
b0109805
PB
7206 store_reg(s, rn, addr);
7207 } else {
7208 dead_tmp(addr);
9ee6e8bb
PB
7209 }
7210 }
7211 }
7212 break;
7213 case 5: /* Data processing register constant shift. */
7214 if (rn == 15)
7215 gen_op_movl_T0_im(0);
7216 else
7217 gen_movl_T0_reg(s, rn);
7218 gen_movl_T1_reg(s, rm);
7219 op = (insn >> 21) & 0xf;
7220 shiftop = (insn >> 4) & 3;
7221 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7222 conds = (insn & (1 << 20)) != 0;
7223 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7224 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7225 if (gen_thumb2_data_op(s, op, conds, 0))
7226 goto illegal_op;
7227 if (rd != 15)
7228 gen_movl_reg_T0(s, rd);
7229 break;
7230 case 13: /* Misc data processing. */
7231 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7232 if (op < 4 && (insn & 0xf000) != 0xf000)
7233 goto illegal_op;
7234 switch (op) {
7235 case 0: /* Register controlled shift. */
8984bd2e
PB
7236 tmp = load_reg(s, rn);
7237 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7238 if ((insn & 0x70) != 0)
7239 goto illegal_op;
7240 op = (insn >> 21) & 3;
8984bd2e
PB
7241 logic_cc = (insn & (1 << 20)) != 0;
7242 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7243 if (logic_cc)
7244 gen_logic_CC(tmp);
7245 store_reg(s, rd, tmp);
9ee6e8bb
PB
7246 break;
7247 case 1: /* Sign/zero extend. */
5e3f878a 7248 tmp = load_reg(s, rm);
9ee6e8bb
PB
7249 shift = (insn >> 4) & 3;
7250 /* ??? In many cases it's not neccessary to do a
7251 rotate, a shift is sufficient. */
7252 if (shift != 0)
5e3f878a 7253 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7254 op = (insn >> 20) & 7;
7255 switch (op) {
5e3f878a
PB
7256 case 0: gen_sxth(tmp); break;
7257 case 1: gen_uxth(tmp); break;
7258 case 2: gen_sxtb16(tmp); break;
7259 case 3: gen_uxtb16(tmp); break;
7260 case 4: gen_sxtb(tmp); break;
7261 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7262 default: goto illegal_op;
7263 }
7264 if (rn != 15) {
5e3f878a 7265 tmp2 = load_reg(s, rn);
9ee6e8bb 7266 if ((op >> 1) == 1) {
5e3f878a 7267 gen_add16(tmp, tmp2);
9ee6e8bb 7268 } else {
5e3f878a
PB
7269 tcg_gen_add_i32(tmp, tmp, tmp2);
7270 dead_tmp(tmp2);
9ee6e8bb
PB
7271 }
7272 }
5e3f878a 7273 store_reg(s, rd, tmp);
9ee6e8bb
PB
7274 break;
7275 case 2: /* SIMD add/subtract. */
7276 op = (insn >> 20) & 7;
7277 shift = (insn >> 4) & 7;
7278 if ((op & 3) == 3 || (shift & 3) == 3)
7279 goto illegal_op;
6ddbc6e4
PB
7280 tmp = load_reg(s, rn);
7281 tmp2 = load_reg(s, rm);
7282 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7283 dead_tmp(tmp2);
7284 store_reg(s, rd, tmp);
9ee6e8bb
PB
7285 break;
7286 case 3: /* Other data processing. */
7287 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7288 if (op < 4) {
7289 /* Saturating add/subtract. */
d9ba4830
PB
7290 tmp = load_reg(s, rn);
7291 tmp2 = load_reg(s, rm);
9ee6e8bb 7292 if (op & 2)
d9ba4830 7293 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7294 if (op & 1)
d9ba4830 7295 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7296 else
d9ba4830
PB
7297 gen_helper_add_saturate(tmp, tmp, tmp2);
7298 dead_tmp(tmp2);
9ee6e8bb 7299 } else {
d9ba4830 7300 tmp = load_reg(s, rn);
9ee6e8bb
PB
7301 switch (op) {
7302 case 0x0a: /* rbit */
d9ba4830 7303 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7304 break;
7305 case 0x08: /* rev */
d9ba4830 7306 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7307 break;
7308 case 0x09: /* rev16 */
d9ba4830 7309 gen_rev16(tmp);
9ee6e8bb
PB
7310 break;
7311 case 0x0b: /* revsh */
d9ba4830 7312 gen_revsh(tmp);
9ee6e8bb
PB
7313 break;
7314 case 0x10: /* sel */
d9ba4830 7315 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7316 tmp3 = new_tmp();
7317 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7318 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7319 dead_tmp(tmp3);
d9ba4830 7320 dead_tmp(tmp2);
9ee6e8bb
PB
7321 break;
7322 case 0x18: /* clz */
d9ba4830 7323 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7324 break;
7325 default:
7326 goto illegal_op;
7327 }
7328 }
d9ba4830 7329 store_reg(s, rd, tmp);
9ee6e8bb
PB
7330 break;
7331 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7332 op = (insn >> 4) & 0xf;
d9ba4830
PB
7333 tmp = load_reg(s, rn);
7334 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7335 switch ((insn >> 20) & 7) {
7336 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7337 tcg_gen_mul_i32(tmp, tmp, tmp2);
7338 dead_tmp(tmp2);
9ee6e8bb 7339 if (rs != 15) {
d9ba4830 7340 tmp2 = load_reg(s, rs);
9ee6e8bb 7341 if (op)
d9ba4830 7342 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7343 else
d9ba4830
PB
7344 tcg_gen_add_i32(tmp, tmp, tmp2);
7345 dead_tmp(tmp2);
9ee6e8bb 7346 }
9ee6e8bb
PB
7347 break;
7348 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7349 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7350 dead_tmp(tmp2);
9ee6e8bb 7351 if (rs != 15) {
d9ba4830
PB
7352 tmp2 = load_reg(s, rs);
7353 gen_helper_add_setq(tmp, tmp, tmp2);
7354 dead_tmp(tmp2);
9ee6e8bb 7355 }
9ee6e8bb
PB
7356 break;
7357 case 2: /* Dual multiply add. */
7358 case 4: /* Dual multiply subtract. */
7359 if (op)
d9ba4830
PB
7360 gen_swap_half(tmp2);
7361 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7362 /* This addition cannot overflow. */
7363 if (insn & (1 << 22)) {
d9ba4830 7364 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7365 } else {
d9ba4830 7366 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7367 }
d9ba4830 7368 dead_tmp(tmp2);
9ee6e8bb
PB
7369 if (rs != 15)
7370 {
d9ba4830
PB
7371 tmp2 = load_reg(s, rs);
7372 gen_helper_add_setq(tmp, tmp, tmp2);
7373 dead_tmp(tmp2);
9ee6e8bb 7374 }
9ee6e8bb
PB
7375 break;
7376 case 3: /* 32 * 16 -> 32msb */
7377 if (op)
d9ba4830 7378 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7379 else
d9ba4830 7380 gen_sxth(tmp2);
a7812ae4
PB
7381 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7382 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7383 tmp = new_tmp();
a7812ae4 7384 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7385 if (rs != 15)
7386 {
d9ba4830
PB
7387 tmp2 = load_reg(s, rs);
7388 gen_helper_add_setq(tmp, tmp, tmp2);
7389 dead_tmp(tmp2);
9ee6e8bb 7390 }
9ee6e8bb
PB
7391 break;
7392 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7393 gen_imull(tmp, tmp2);
7394 if (insn & (1 << 5)) {
7395 gen_roundqd(tmp, tmp2);
7396 dead_tmp(tmp2);
7397 } else {
7398 dead_tmp(tmp);
7399 tmp = tmp2;
7400 }
9ee6e8bb 7401 if (rs != 15) {
d9ba4830 7402 tmp2 = load_reg(s, rs);
9ee6e8bb 7403 if (insn & (1 << 21)) {
d9ba4830 7404 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7405 } else {
d9ba4830 7406 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7407 }
d9ba4830 7408 dead_tmp(tmp2);
2c0262af 7409 }
9ee6e8bb
PB
7410 break;
7411 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7412 gen_helper_usad8(tmp, tmp, tmp2);
7413 dead_tmp(tmp2);
9ee6e8bb 7414 if (rs != 15) {
d9ba4830
PB
7415 tmp2 = load_reg(s, rs);
7416 tcg_gen_add_i32(tmp, tmp, tmp2);
7417 dead_tmp(tmp2);
5fd46862 7418 }
9ee6e8bb 7419 break;
2c0262af 7420 }
d9ba4830 7421 store_reg(s, rd, tmp);
2c0262af 7422 break;
9ee6e8bb
PB
7423 case 6: case 7: /* 64-bit multiply, Divide. */
7424 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7425 tmp = load_reg(s, rn);
7426 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7427 if ((op & 0x50) == 0x10) {
7428 /* sdiv, udiv */
7429 if (!arm_feature(env, ARM_FEATURE_DIV))
7430 goto illegal_op;
7431 if (op & 0x20)
5e3f878a 7432 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7433 else
5e3f878a
PB
7434 gen_helper_sdiv(tmp, tmp, tmp2);
7435 dead_tmp(tmp2);
7436 store_reg(s, rd, tmp);
9ee6e8bb
PB
7437 } else if ((op & 0xe) == 0xc) {
7438 /* Dual multiply accumulate long. */
7439 if (op & 1)
5e3f878a
PB
7440 gen_swap_half(tmp2);
7441 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7442 if (op & 0x10) {
5e3f878a 7443 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7444 } else {
5e3f878a 7445 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7446 }
5e3f878a 7447 dead_tmp(tmp2);
a7812ae4
PB
7448 /* BUGFIX */
7449 tmp64 = tcg_temp_new_i64();
7450 tcg_gen_ext_i32_i64(tmp64, tmp);
7451 dead_tmp(tmp);
7452 gen_addq(s, tmp64, rs, rd);
7453 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7454 } else {
9ee6e8bb
PB
7455 if (op & 0x20) {
7456 /* Unsigned 64-bit multiply */
a7812ae4 7457 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7458 } else {
9ee6e8bb
PB
7459 if (op & 8) {
7460 /* smlalxy */
5e3f878a
PB
7461 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7462 dead_tmp(tmp2);
a7812ae4
PB
7463 tmp64 = tcg_temp_new_i64();
7464 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7465 dead_tmp(tmp);
9ee6e8bb
PB
7466 } else {
7467 /* Signed 64-bit multiply */
a7812ae4 7468 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7469 }
b5ff1b31 7470 }
9ee6e8bb
PB
7471 if (op & 4) {
7472 /* umaal */
a7812ae4
PB
7473 gen_addq_lo(s, tmp64, rs);
7474 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7475 } else if (op & 0x40) {
7476 /* 64-bit accumulate. */
a7812ae4 7477 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7478 }
a7812ae4 7479 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7480 }
2c0262af 7481 break;
9ee6e8bb
PB
7482 }
7483 break;
7484 case 6: case 7: case 14: case 15:
7485 /* Coprocessor. */
7486 if (((insn >> 24) & 3) == 3) {
7487 /* Translate into the equivalent ARM encoding. */
7488 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7489 if (disas_neon_data_insn(env, s, insn))
7490 goto illegal_op;
7491 } else {
7492 if (insn & (1 << 28))
7493 goto illegal_op;
7494 if (disas_coproc_insn (env, s, insn))
7495 goto illegal_op;
7496 }
7497 break;
7498 case 8: case 9: case 10: case 11:
7499 if (insn & (1 << 15)) {
7500 /* Branches, misc control. */
7501 if (insn & 0x5000) {
7502 /* Unconditional branch. */
7503 /* signextend(hw1[10:0]) -> offset[:12]. */
7504 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7505 /* hw1[10:0] -> offset[11:1]. */
7506 offset |= (insn & 0x7ff) << 1;
7507 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7508 offset[24:22] already have the same value because of the
7509 sign extension above. */
7510 offset ^= ((~insn) & (1 << 13)) << 10;
7511 offset ^= ((~insn) & (1 << 11)) << 11;
7512
9ee6e8bb
PB
7513 if (insn & (1 << 14)) {
7514 /* Branch and link. */
b0109805 7515 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7516 gen_movl_reg_T1(s, 14);
b5ff1b31 7517 }
3b46e624 7518
b0109805 7519 offset += s->pc;
9ee6e8bb
PB
7520 if (insn & (1 << 12)) {
7521 /* b/bl */
b0109805 7522 gen_jmp(s, offset);
9ee6e8bb
PB
7523 } else {
7524 /* blx */
b0109805
PB
7525 offset &= ~(uint32_t)2;
7526 gen_bx_im(s, offset);
2c0262af 7527 }
9ee6e8bb
PB
7528 } else if (((insn >> 23) & 7) == 7) {
7529 /* Misc control */
7530 if (insn & (1 << 13))
7531 goto illegal_op;
7532
7533 if (insn & (1 << 26)) {
7534 /* Secure monitor call (v6Z) */
7535 goto illegal_op; /* not implemented. */
2c0262af 7536 } else {
9ee6e8bb
PB
7537 op = (insn >> 20) & 7;
7538 switch (op) {
7539 case 0: /* msr cpsr. */
7540 if (IS_M(env)) {
8984bd2e
PB
7541 tmp = load_reg(s, rn);
7542 addr = tcg_const_i32(insn & 0xff);
7543 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7544 gen_lookup_tb(s);
7545 break;
7546 }
7547 /* fall through */
7548 case 1: /* msr spsr. */
7549 if (IS_M(env))
7550 goto illegal_op;
7551 gen_movl_T0_reg(s, rn);
7552 if (gen_set_psr_T0(s,
7553 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7554 op == 1))
7555 goto illegal_op;
7556 break;
7557 case 2: /* cps, nop-hint. */
7558 if (((insn >> 8) & 7) == 0) {
7559 gen_nop_hint(s, insn & 0xff);
7560 }
7561 /* Implemented as NOP in user mode. */
7562 if (IS_USER(s))
7563 break;
7564 offset = 0;
7565 imm = 0;
7566 if (insn & (1 << 10)) {
7567 if (insn & (1 << 7))
7568 offset |= CPSR_A;
7569 if (insn & (1 << 6))
7570 offset |= CPSR_I;
7571 if (insn & (1 << 5))
7572 offset |= CPSR_F;
7573 if (insn & (1 << 9))
7574 imm = CPSR_A | CPSR_I | CPSR_F;
7575 }
7576 if (insn & (1 << 8)) {
7577 offset |= 0x1f;
7578 imm |= (insn & 0x1f);
7579 }
7580 if (offset) {
7581 gen_op_movl_T0_im(imm);
7582 gen_set_psr_T0(s, offset, 0);
7583 }
7584 break;
7585 case 3: /* Special control operations. */
7586 op = (insn >> 4) & 0xf;
7587 switch (op) {
7588 case 2: /* clrex */
8f8e3aa4 7589 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7590 break;
7591 case 4: /* dsb */
7592 case 5: /* dmb */
7593 case 6: /* isb */
7594 /* These execute as NOPs. */
7595 ARCH(7);
7596 break;
7597 default:
7598 goto illegal_op;
7599 }
7600 break;
7601 case 4: /* bxj */
7602 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7603 tmp = load_reg(s, rn);
7604 gen_bx(s, tmp);
9ee6e8bb
PB
7605 break;
7606 case 5: /* Exception return. */
7607 /* Unpredictable in user mode. */
7608 goto illegal_op;
7609 case 6: /* mrs cpsr. */
8984bd2e 7610 tmp = new_tmp();
9ee6e8bb 7611 if (IS_M(env)) {
8984bd2e
PB
7612 addr = tcg_const_i32(insn & 0xff);
7613 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7614 } else {
8984bd2e 7615 gen_helper_cpsr_read(tmp);
9ee6e8bb 7616 }
8984bd2e 7617 store_reg(s, rd, tmp);
9ee6e8bb
PB
7618 break;
7619 case 7: /* mrs spsr. */
7620 /* Not accessible in user mode. */
7621 if (IS_USER(s) || IS_M(env))
7622 goto illegal_op;
d9ba4830
PB
7623 tmp = load_cpu_field(spsr);
7624 store_reg(s, rd, tmp);
9ee6e8bb 7625 break;
2c0262af
FB
7626 }
7627 }
9ee6e8bb
PB
7628 } else {
7629 /* Conditional branch. */
7630 op = (insn >> 22) & 0xf;
7631 /* Generate a conditional jump to next instruction. */
7632 s->condlabel = gen_new_label();
d9ba4830 7633 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7634 s->condjmp = 1;
7635
7636 /* offset[11:1] = insn[10:0] */
7637 offset = (insn & 0x7ff) << 1;
7638 /* offset[17:12] = insn[21:16]. */
7639 offset |= (insn & 0x003f0000) >> 4;
7640 /* offset[31:20] = insn[26]. */
7641 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7642 /* offset[18] = insn[13]. */
7643 offset |= (insn & (1 << 13)) << 5;
7644 /* offset[19] = insn[11]. */
7645 offset |= (insn & (1 << 11)) << 8;
7646
7647 /* jump to the offset */
b0109805 7648 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7649 }
7650 } else {
7651 /* Data processing immediate. */
7652 if (insn & (1 << 25)) {
7653 if (insn & (1 << 24)) {
7654 if (insn & (1 << 20))
7655 goto illegal_op;
7656 /* Bitfield/Saturate. */
7657 op = (insn >> 21) & 7;
7658 imm = insn & 0x1f;
7659 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7660 if (rn == 15) {
7661 tmp = new_tmp();
7662 tcg_gen_movi_i32(tmp, 0);
7663 } else {
7664 tmp = load_reg(s, rn);
7665 }
9ee6e8bb
PB
7666 switch (op) {
7667 case 2: /* Signed bitfield extract. */
7668 imm++;
7669 if (shift + imm > 32)
7670 goto illegal_op;
7671 if (imm < 32)
6ddbc6e4 7672 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7673 break;
7674 case 6: /* Unsigned bitfield extract. */
7675 imm++;
7676 if (shift + imm > 32)
7677 goto illegal_op;
7678 if (imm < 32)
6ddbc6e4 7679 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7680 break;
7681 case 3: /* Bitfield insert/clear. */
7682 if (imm < shift)
7683 goto illegal_op;
7684 imm = imm + 1 - shift;
7685 if (imm != 32) {
6ddbc6e4 7686 tmp2 = load_reg(s, rd);
8f8e3aa4 7687 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7688 dead_tmp(tmp2);
9ee6e8bb
PB
7689 }
7690 break;
7691 case 7:
7692 goto illegal_op;
7693 default: /* Saturate. */
9ee6e8bb
PB
7694 if (shift) {
7695 if (op & 1)
6ddbc6e4 7696 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7697 else
6ddbc6e4 7698 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7699 }
6ddbc6e4 7700 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7701 if (op & 4) {
7702 /* Unsigned. */
9ee6e8bb 7703 if ((op & 1) && shift == 0)
6ddbc6e4 7704 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7705 else
6ddbc6e4 7706 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7707 } else {
9ee6e8bb 7708 /* Signed. */
9ee6e8bb 7709 if ((op & 1) && shift == 0)
6ddbc6e4 7710 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7711 else
6ddbc6e4 7712 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7713 }
9ee6e8bb 7714 break;
2c0262af 7715 }
6ddbc6e4 7716 store_reg(s, rd, tmp);
9ee6e8bb
PB
7717 } else {
7718 imm = ((insn & 0x04000000) >> 15)
7719 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7720 if (insn & (1 << 22)) {
7721 /* 16-bit immediate. */
7722 imm |= (insn >> 4) & 0xf000;
7723 if (insn & (1 << 23)) {
7724 /* movt */
5e3f878a 7725 tmp = load_reg(s, rd);
86831435 7726 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7727 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7728 } else {
9ee6e8bb 7729 /* movw */
5e3f878a
PB
7730 tmp = new_tmp();
7731 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7732 }
7733 } else {
9ee6e8bb
PB
7734 /* Add/sub 12-bit immediate. */
7735 if (rn == 15) {
b0109805 7736 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7737 if (insn & (1 << 23))
b0109805 7738 offset -= imm;
9ee6e8bb 7739 else
b0109805 7740 offset += imm;
5e3f878a
PB
7741 tmp = new_tmp();
7742 tcg_gen_movi_i32(tmp, offset);
2c0262af 7743 } else {
5e3f878a 7744 tmp = load_reg(s, rn);
9ee6e8bb 7745 if (insn & (1 << 23))
5e3f878a 7746 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7747 else
5e3f878a 7748 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7749 }
9ee6e8bb 7750 }
5e3f878a 7751 store_reg(s, rd, tmp);
191abaa2 7752 }
9ee6e8bb
PB
7753 } else {
7754 int shifter_out = 0;
7755 /* modified 12-bit immediate. */
7756 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7757 imm = (insn & 0xff);
7758 switch (shift) {
7759 case 0: /* XY */
7760 /* Nothing to do. */
7761 break;
7762 case 1: /* 00XY00XY */
7763 imm |= imm << 16;
7764 break;
7765 case 2: /* XY00XY00 */
7766 imm |= imm << 16;
7767 imm <<= 8;
7768 break;
7769 case 3: /* XYXYXYXY */
7770 imm |= imm << 16;
7771 imm |= imm << 8;
7772 break;
7773 default: /* Rotated constant. */
7774 shift = (shift << 1) | (imm >> 7);
7775 imm |= 0x80;
7776 imm = imm << (32 - shift);
7777 shifter_out = 1;
7778 break;
b5ff1b31 7779 }
9ee6e8bb
PB
7780 gen_op_movl_T1_im(imm);
7781 rn = (insn >> 16) & 0xf;
7782 if (rn == 15)
7783 gen_op_movl_T0_im(0);
7784 else
7785 gen_movl_T0_reg(s, rn);
7786 op = (insn >> 21) & 0xf;
7787 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7788 shifter_out))
7789 goto illegal_op;
7790 rd = (insn >> 8) & 0xf;
7791 if (rd != 15) {
7792 gen_movl_reg_T0(s, rd);
2c0262af 7793 }
2c0262af 7794 }
9ee6e8bb
PB
7795 }
7796 break;
7797 case 12: /* Load/store single data item. */
7798 {
7799 int postinc = 0;
7800 int writeback = 0;
b0109805 7801 int user;
9ee6e8bb
PB
7802 if ((insn & 0x01100000) == 0x01000000) {
7803 if (disas_neon_ls_insn(env, s, insn))
c1713132 7804 goto illegal_op;
9ee6e8bb
PB
7805 break;
7806 }
b0109805 7807 user = IS_USER(s);
9ee6e8bb 7808 if (rn == 15) {
b0109805 7809 addr = new_tmp();
9ee6e8bb
PB
7810 /* PC relative. */
7811 /* s->pc has already been incremented by 4. */
7812 imm = s->pc & 0xfffffffc;
7813 if (insn & (1 << 23))
7814 imm += insn & 0xfff;
7815 else
7816 imm -= insn & 0xfff;
b0109805 7817 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7818 } else {
b0109805 7819 addr = load_reg(s, rn);
9ee6e8bb
PB
7820 if (insn & (1 << 23)) {
7821 /* Positive offset. */
7822 imm = insn & 0xfff;
b0109805 7823 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7824 } else {
7825 op = (insn >> 8) & 7;
7826 imm = insn & 0xff;
7827 switch (op) {
7828 case 0: case 8: /* Shifted Register. */
7829 shift = (insn >> 4) & 0xf;
7830 if (shift > 3)
18c9b560 7831 goto illegal_op;
b26eefb6 7832 tmp = load_reg(s, rm);
9ee6e8bb 7833 if (shift)
b26eefb6 7834 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7835 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7836 dead_tmp(tmp);
9ee6e8bb
PB
7837 break;
7838 case 4: /* Negative offset. */
b0109805 7839 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7840 break;
7841 case 6: /* User privilege. */
b0109805
PB
7842 tcg_gen_addi_i32(addr, addr, imm);
7843 user = 1;
9ee6e8bb
PB
7844 break;
7845 case 1: /* Post-decrement. */
7846 imm = -imm;
7847 /* Fall through. */
7848 case 3: /* Post-increment. */
9ee6e8bb
PB
7849 postinc = 1;
7850 writeback = 1;
7851 break;
7852 case 5: /* Pre-decrement. */
7853 imm = -imm;
7854 /* Fall through. */
7855 case 7: /* Pre-increment. */
b0109805 7856 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7857 writeback = 1;
7858 break;
7859 default:
b7bcbe95 7860 goto illegal_op;
9ee6e8bb
PB
7861 }
7862 }
7863 }
7864 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7865 if (insn & (1 << 20)) {
7866 /* Load. */
7867 if (rs == 15 && op != 2) {
7868 if (op & 2)
b5ff1b31 7869 goto illegal_op;
9ee6e8bb
PB
7870 /* Memory hint. Implemented as NOP. */
7871 } else {
7872 switch (op) {
b0109805
PB
7873 case 0: tmp = gen_ld8u(addr, user); break;
7874 case 4: tmp = gen_ld8s(addr, user); break;
7875 case 1: tmp = gen_ld16u(addr, user); break;
7876 case 5: tmp = gen_ld16s(addr, user); break;
7877 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7878 default: goto illegal_op;
7879 }
7880 if (rs == 15) {
b0109805 7881 gen_bx(s, tmp);
9ee6e8bb 7882 } else {
b0109805 7883 store_reg(s, rs, tmp);
9ee6e8bb
PB
7884 }
7885 }
7886 } else {
7887 /* Store. */
7888 if (rs == 15)
b7bcbe95 7889 goto illegal_op;
b0109805 7890 tmp = load_reg(s, rs);
9ee6e8bb 7891 switch (op) {
b0109805
PB
7892 case 0: gen_st8(tmp, addr, user); break;
7893 case 1: gen_st16(tmp, addr, user); break;
7894 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7895 default: goto illegal_op;
b7bcbe95 7896 }
2c0262af 7897 }
9ee6e8bb 7898 if (postinc)
b0109805
PB
7899 tcg_gen_addi_i32(addr, addr, imm);
7900 if (writeback) {
7901 store_reg(s, rn, addr);
7902 } else {
7903 dead_tmp(addr);
7904 }
9ee6e8bb
PB
7905 }
7906 break;
7907 default:
7908 goto illegal_op;
2c0262af 7909 }
9ee6e8bb
PB
7910 return 0;
7911illegal_op:
7912 return 1;
2c0262af
FB
7913}
7914
9ee6e8bb 7915static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7916{
7917 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7918 int32_t offset;
7919 int i;
b26eefb6 7920 TCGv tmp;
d9ba4830 7921 TCGv tmp2;
b0109805 7922 TCGv addr;
99c475ab 7923
9ee6e8bb
PB
7924 if (s->condexec_mask) {
7925 cond = s->condexec_cond;
7926 s->condlabel = gen_new_label();
d9ba4830 7927 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7928 s->condjmp = 1;
7929 }
7930
b5ff1b31 7931 insn = lduw_code(s->pc);
99c475ab 7932 s->pc += 2;
b5ff1b31 7933
99c475ab
FB
7934 switch (insn >> 12) {
7935 case 0: case 1:
7936 rd = insn & 7;
7937 op = (insn >> 11) & 3;
7938 if (op == 3) {
7939 /* add/subtract */
7940 rn = (insn >> 3) & 7;
7941 gen_movl_T0_reg(s, rn);
7942 if (insn & (1 << 10)) {
7943 /* immediate */
7944 gen_op_movl_T1_im((insn >> 6) & 7);
7945 } else {
7946 /* reg */
7947 rm = (insn >> 6) & 7;
7948 gen_movl_T1_reg(s, rm);
7949 }
9ee6e8bb
PB
7950 if (insn & (1 << 9)) {
7951 if (s->condexec_mask)
7952 gen_op_subl_T0_T1();
7953 else
7954 gen_op_subl_T0_T1_cc();
7955 } else {
7956 if (s->condexec_mask)
7957 gen_op_addl_T0_T1();
7958 else
7959 gen_op_addl_T0_T1_cc();
7960 }
99c475ab
FB
7961 gen_movl_reg_T0(s, rd);
7962 } else {
7963 /* shift immediate */
7964 rm = (insn >> 3) & 7;
7965 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7966 tmp = load_reg(s, rm);
7967 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7968 if (!s->condexec_mask)
7969 gen_logic_CC(tmp);
7970 store_reg(s, rd, tmp);
99c475ab
FB
7971 }
7972 break;
7973 case 2: case 3:
7974 /* arithmetic large immediate */
7975 op = (insn >> 11) & 3;
7976 rd = (insn >> 8) & 0x7;
7977 if (op == 0) {
7978 gen_op_movl_T0_im(insn & 0xff);
7979 } else {
7980 gen_movl_T0_reg(s, rd);
7981 gen_op_movl_T1_im(insn & 0xff);
7982 }
7983 switch (op) {
7984 case 0: /* mov */
9ee6e8bb
PB
7985 if (!s->condexec_mask)
7986 gen_op_logic_T0_cc();
99c475ab
FB
7987 break;
7988 case 1: /* cmp */
7989 gen_op_subl_T0_T1_cc();
7990 break;
7991 case 2: /* add */
9ee6e8bb
PB
7992 if (s->condexec_mask)
7993 gen_op_addl_T0_T1();
7994 else
7995 gen_op_addl_T0_T1_cc();
99c475ab
FB
7996 break;
7997 case 3: /* sub */
9ee6e8bb
PB
7998 if (s->condexec_mask)
7999 gen_op_subl_T0_T1();
8000 else
8001 gen_op_subl_T0_T1_cc();
99c475ab
FB
8002 break;
8003 }
8004 if (op != 1)
8005 gen_movl_reg_T0(s, rd);
8006 break;
8007 case 4:
8008 if (insn & (1 << 11)) {
8009 rd = (insn >> 8) & 7;
5899f386
FB
8010 /* load pc-relative. Bit 1 of PC is ignored. */
8011 val = s->pc + 2 + ((insn & 0xff) * 4);
8012 val &= ~(uint32_t)2;
b0109805
PB
8013 addr = new_tmp();
8014 tcg_gen_movi_i32(addr, val);
8015 tmp = gen_ld32(addr, IS_USER(s));
8016 dead_tmp(addr);
8017 store_reg(s, rd, tmp);
99c475ab
FB
8018 break;
8019 }
8020 if (insn & (1 << 10)) {
8021 /* data processing extended or blx */
8022 rd = (insn & 7) | ((insn >> 4) & 8);
8023 rm = (insn >> 3) & 0xf;
8024 op = (insn >> 8) & 3;
8025 switch (op) {
8026 case 0: /* add */
8027 gen_movl_T0_reg(s, rd);
8028 gen_movl_T1_reg(s, rm);
8029 gen_op_addl_T0_T1();
8030 gen_movl_reg_T0(s, rd);
8031 break;
8032 case 1: /* cmp */
8033 gen_movl_T0_reg(s, rd);
8034 gen_movl_T1_reg(s, rm);
8035 gen_op_subl_T0_T1_cc();
8036 break;
8037 case 2: /* mov/cpy */
8038 gen_movl_T0_reg(s, rm);
8039 gen_movl_reg_T0(s, rd);
8040 break;
8041 case 3:/* branch [and link] exchange thumb register */
b0109805 8042 tmp = load_reg(s, rm);
99c475ab
FB
8043 if (insn & (1 << 7)) {
8044 val = (uint32_t)s->pc | 1;
b0109805
PB
8045 tmp2 = new_tmp();
8046 tcg_gen_movi_i32(tmp2, val);
8047 store_reg(s, 14, tmp2);
99c475ab 8048 }
d9ba4830 8049 gen_bx(s, tmp);
99c475ab
FB
8050 break;
8051 }
8052 break;
8053 }
8054
8055 /* data processing register */
8056 rd = insn & 7;
8057 rm = (insn >> 3) & 7;
8058 op = (insn >> 6) & 0xf;
8059 if (op == 2 || op == 3 || op == 4 || op == 7) {
8060 /* the shift/rotate ops want the operands backwards */
8061 val = rm;
8062 rm = rd;
8063 rd = val;
8064 val = 1;
8065 } else {
8066 val = 0;
8067 }
8068
8069 if (op == 9) /* neg */
8070 gen_op_movl_T0_im(0);
8071 else if (op != 0xf) /* mvn doesn't read its first operand */
8072 gen_movl_T0_reg(s, rd);
8073
8074 gen_movl_T1_reg(s, rm);
5899f386 8075 switch (op) {
99c475ab
FB
8076 case 0x0: /* and */
8077 gen_op_andl_T0_T1();
9ee6e8bb
PB
8078 if (!s->condexec_mask)
8079 gen_op_logic_T0_cc();
99c475ab
FB
8080 break;
8081 case 0x1: /* eor */
8082 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8083 if (!s->condexec_mask)
8084 gen_op_logic_T0_cc();
99c475ab
FB
8085 break;
8086 case 0x2: /* lsl */
9ee6e8bb 8087 if (s->condexec_mask) {
8984bd2e 8088 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8089 } else {
8984bd2e 8090 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8091 gen_op_logic_T1_cc();
8092 }
99c475ab
FB
8093 break;
8094 case 0x3: /* lsr */
9ee6e8bb 8095 if (s->condexec_mask) {
8984bd2e 8096 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8097 } else {
8984bd2e 8098 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8099 gen_op_logic_T1_cc();
8100 }
99c475ab
FB
8101 break;
8102 case 0x4: /* asr */
9ee6e8bb 8103 if (s->condexec_mask) {
8984bd2e 8104 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8105 } else {
8984bd2e 8106 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8107 gen_op_logic_T1_cc();
8108 }
99c475ab
FB
8109 break;
8110 case 0x5: /* adc */
9ee6e8bb 8111 if (s->condexec_mask)
b26eefb6 8112 gen_adc_T0_T1();
9ee6e8bb
PB
8113 else
8114 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8115 break;
8116 case 0x6: /* sbc */
9ee6e8bb 8117 if (s->condexec_mask)
3670669c 8118 gen_sbc_T0_T1();
9ee6e8bb
PB
8119 else
8120 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8121 break;
8122 case 0x7: /* ror */
9ee6e8bb 8123 if (s->condexec_mask) {
8984bd2e 8124 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8125 } else {
8984bd2e 8126 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8127 gen_op_logic_T1_cc();
8128 }
99c475ab
FB
8129 break;
8130 case 0x8: /* tst */
8131 gen_op_andl_T0_T1();
8132 gen_op_logic_T0_cc();
8133 rd = 16;
5899f386 8134 break;
99c475ab 8135 case 0x9: /* neg */
9ee6e8bb 8136 if (s->condexec_mask)
390efc54 8137 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8138 else
8139 gen_op_subl_T0_T1_cc();
99c475ab
FB
8140 break;
8141 case 0xa: /* cmp */
8142 gen_op_subl_T0_T1_cc();
8143 rd = 16;
8144 break;
8145 case 0xb: /* cmn */
8146 gen_op_addl_T0_T1_cc();
8147 rd = 16;
8148 break;
8149 case 0xc: /* orr */
8150 gen_op_orl_T0_T1();
9ee6e8bb
PB
8151 if (!s->condexec_mask)
8152 gen_op_logic_T0_cc();
99c475ab
FB
8153 break;
8154 case 0xd: /* mul */
8155 gen_op_mull_T0_T1();
9ee6e8bb
PB
8156 if (!s->condexec_mask)
8157 gen_op_logic_T0_cc();
99c475ab
FB
8158 break;
8159 case 0xe: /* bic */
8160 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8161 if (!s->condexec_mask)
8162 gen_op_logic_T0_cc();
99c475ab
FB
8163 break;
8164 case 0xf: /* mvn */
8165 gen_op_notl_T1();
9ee6e8bb
PB
8166 if (!s->condexec_mask)
8167 gen_op_logic_T1_cc();
99c475ab 8168 val = 1;
5899f386 8169 rm = rd;
99c475ab
FB
8170 break;
8171 }
8172 if (rd != 16) {
8173 if (val)
5899f386 8174 gen_movl_reg_T1(s, rm);
99c475ab
FB
8175 else
8176 gen_movl_reg_T0(s, rd);
8177 }
8178 break;
8179
8180 case 5:
8181 /* load/store register offset. */
8182 rd = insn & 7;
8183 rn = (insn >> 3) & 7;
8184 rm = (insn >> 6) & 7;
8185 op = (insn >> 9) & 7;
b0109805 8186 addr = load_reg(s, rn);
b26eefb6 8187 tmp = load_reg(s, rm);
b0109805 8188 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8189 dead_tmp(tmp);
99c475ab
FB
8190
8191 if (op < 3) /* store */
b0109805 8192 tmp = load_reg(s, rd);
99c475ab
FB
8193
8194 switch (op) {
8195 case 0: /* str */
b0109805 8196 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8197 break;
8198 case 1: /* strh */
b0109805 8199 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8200 break;
8201 case 2: /* strb */
b0109805 8202 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8203 break;
8204 case 3: /* ldrsb */
b0109805 8205 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8206 break;
8207 case 4: /* ldr */
b0109805 8208 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8209 break;
8210 case 5: /* ldrh */
b0109805 8211 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8212 break;
8213 case 6: /* ldrb */
b0109805 8214 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8215 break;
8216 case 7: /* ldrsh */
b0109805 8217 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8218 break;
8219 }
8220 if (op >= 3) /* load */
b0109805
PB
8221 store_reg(s, rd, tmp);
8222 dead_tmp(addr);
99c475ab
FB
8223 break;
8224
8225 case 6:
8226 /* load/store word immediate offset */
8227 rd = insn & 7;
8228 rn = (insn >> 3) & 7;
b0109805 8229 addr = load_reg(s, rn);
99c475ab 8230 val = (insn >> 4) & 0x7c;
b0109805 8231 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8232
8233 if (insn & (1 << 11)) {
8234 /* load */
b0109805
PB
8235 tmp = gen_ld32(addr, IS_USER(s));
8236 store_reg(s, rd, tmp);
99c475ab
FB
8237 } else {
8238 /* store */
b0109805
PB
8239 tmp = load_reg(s, rd);
8240 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8241 }
b0109805 8242 dead_tmp(addr);
99c475ab
FB
8243 break;
8244
8245 case 7:
8246 /* load/store byte immediate offset */
8247 rd = insn & 7;
8248 rn = (insn >> 3) & 7;
b0109805 8249 addr = load_reg(s, rn);
99c475ab 8250 val = (insn >> 6) & 0x1f;
b0109805 8251 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8252
8253 if (insn & (1 << 11)) {
8254 /* load */
b0109805
PB
8255 tmp = gen_ld8u(addr, IS_USER(s));
8256 store_reg(s, rd, tmp);
99c475ab
FB
8257 } else {
8258 /* store */
b0109805
PB
8259 tmp = load_reg(s, rd);
8260 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8261 }
b0109805 8262 dead_tmp(addr);
99c475ab
FB
8263 break;
8264
8265 case 8:
8266 /* load/store halfword immediate offset */
8267 rd = insn & 7;
8268 rn = (insn >> 3) & 7;
b0109805 8269 addr = load_reg(s, rn);
99c475ab 8270 val = (insn >> 5) & 0x3e;
b0109805 8271 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8272
8273 if (insn & (1 << 11)) {
8274 /* load */
b0109805
PB
8275 tmp = gen_ld16u(addr, IS_USER(s));
8276 store_reg(s, rd, tmp);
99c475ab
FB
8277 } else {
8278 /* store */
b0109805
PB
8279 tmp = load_reg(s, rd);
8280 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8281 }
b0109805 8282 dead_tmp(addr);
99c475ab
FB
8283 break;
8284
8285 case 9:
8286 /* load/store from stack */
8287 rd = (insn >> 8) & 7;
b0109805 8288 addr = load_reg(s, 13);
99c475ab 8289 val = (insn & 0xff) * 4;
b0109805 8290 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8291
8292 if (insn & (1 << 11)) {
8293 /* load */
b0109805
PB
8294 tmp = gen_ld32(addr, IS_USER(s));
8295 store_reg(s, rd, tmp);
99c475ab
FB
8296 } else {
8297 /* store */
b0109805
PB
8298 tmp = load_reg(s, rd);
8299 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8300 }
b0109805 8301 dead_tmp(addr);
99c475ab
FB
8302 break;
8303
8304 case 10:
8305 /* add to high reg */
8306 rd = (insn >> 8) & 7;
5899f386
FB
8307 if (insn & (1 << 11)) {
8308 /* SP */
5e3f878a 8309 tmp = load_reg(s, 13);
5899f386
FB
8310 } else {
8311 /* PC. bit 1 is ignored. */
5e3f878a
PB
8312 tmp = new_tmp();
8313 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8314 }
99c475ab 8315 val = (insn & 0xff) * 4;
5e3f878a
PB
8316 tcg_gen_addi_i32(tmp, tmp, val);
8317 store_reg(s, rd, tmp);
99c475ab
FB
8318 break;
8319
8320 case 11:
8321 /* misc */
8322 op = (insn >> 8) & 0xf;
8323 switch (op) {
8324 case 0:
8325 /* adjust stack pointer */
b26eefb6 8326 tmp = load_reg(s, 13);
99c475ab
FB
8327 val = (insn & 0x7f) * 4;
8328 if (insn & (1 << 7))
6a0d8a1d 8329 val = -(int32_t)val;
b26eefb6
PB
8330 tcg_gen_addi_i32(tmp, tmp, val);
8331 store_reg(s, 13, tmp);
99c475ab
FB
8332 break;
8333
9ee6e8bb
PB
8334 case 2: /* sign/zero extend. */
8335 ARCH(6);
8336 rd = insn & 7;
8337 rm = (insn >> 3) & 7;
b0109805 8338 tmp = load_reg(s, rm);
9ee6e8bb 8339 switch ((insn >> 6) & 3) {
b0109805
PB
8340 case 0: gen_sxth(tmp); break;
8341 case 1: gen_sxtb(tmp); break;
8342 case 2: gen_uxth(tmp); break;
8343 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8344 }
b0109805 8345 store_reg(s, rd, tmp);
9ee6e8bb 8346 break;
99c475ab
FB
8347 case 4: case 5: case 0xc: case 0xd:
8348 /* push/pop */
b0109805 8349 addr = load_reg(s, 13);
5899f386
FB
8350 if (insn & (1 << 8))
8351 offset = 4;
99c475ab 8352 else
5899f386
FB
8353 offset = 0;
8354 for (i = 0; i < 8; i++) {
8355 if (insn & (1 << i))
8356 offset += 4;
8357 }
8358 if ((insn & (1 << 11)) == 0) {
b0109805 8359 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8360 }
99c475ab
FB
8361 for (i = 0; i < 8; i++) {
8362 if (insn & (1 << i)) {
8363 if (insn & (1 << 11)) {
8364 /* pop */
b0109805
PB
8365 tmp = gen_ld32(addr, IS_USER(s));
8366 store_reg(s, i, tmp);
99c475ab
FB
8367 } else {
8368 /* push */
b0109805
PB
8369 tmp = load_reg(s, i);
8370 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8371 }
5899f386 8372 /* advance to the next address. */
b0109805 8373 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8374 }
8375 }
a50f5b91 8376 TCGV_UNUSED(tmp);
99c475ab
FB
8377 if (insn & (1 << 8)) {
8378 if (insn & (1 << 11)) {
8379 /* pop pc */
b0109805 8380 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8381 /* don't set the pc until the rest of the instruction
8382 has completed */
8383 } else {
8384 /* push lr */
b0109805
PB
8385 tmp = load_reg(s, 14);
8386 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8387 }
b0109805 8388 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8389 }
5899f386 8390 if ((insn & (1 << 11)) == 0) {
b0109805 8391 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8392 }
99c475ab 8393 /* write back the new stack pointer */
b0109805 8394 store_reg(s, 13, addr);
99c475ab
FB
8395 /* set the new PC value */
8396 if ((insn & 0x0900) == 0x0900)
b0109805 8397 gen_bx(s, tmp);
99c475ab
FB
8398 break;
8399
9ee6e8bb
PB
8400 case 1: case 3: case 9: case 11: /* czb */
8401 rm = insn & 7;
d9ba4830 8402 tmp = load_reg(s, rm);
9ee6e8bb
PB
8403 s->condlabel = gen_new_label();
8404 s->condjmp = 1;
8405 if (insn & (1 << 11))
cb63669a 8406 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8407 else
cb63669a 8408 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8409 dead_tmp(tmp);
9ee6e8bb
PB
8410 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8411 val = (uint32_t)s->pc + 2;
8412 val += offset;
8413 gen_jmp(s, val);
8414 break;
8415
8416 case 15: /* IT, nop-hint. */
8417 if ((insn & 0xf) == 0) {
8418 gen_nop_hint(s, (insn >> 4) & 0xf);
8419 break;
8420 }
8421 /* If Then. */
8422 s->condexec_cond = (insn >> 4) & 0xe;
8423 s->condexec_mask = insn & 0x1f;
8424 /* No actual code generated for this insn, just setup state. */
8425 break;
8426
06c949e6 8427 case 0xe: /* bkpt */
9ee6e8bb 8428 gen_set_condexec(s);
5e3f878a 8429 gen_set_pc_im(s->pc - 2);
d9ba4830 8430 gen_exception(EXCP_BKPT);
06c949e6
PB
8431 s->is_jmp = DISAS_JUMP;
8432 break;
8433
9ee6e8bb
PB
8434 case 0xa: /* rev */
8435 ARCH(6);
8436 rn = (insn >> 3) & 0x7;
8437 rd = insn & 0x7;
b0109805 8438 tmp = load_reg(s, rn);
9ee6e8bb 8439 switch ((insn >> 6) & 3) {
b0109805
PB
8440 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8441 case 1: gen_rev16(tmp); break;
8442 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8443 default: goto illegal_op;
8444 }
b0109805 8445 store_reg(s, rd, tmp);
9ee6e8bb
PB
8446 break;
8447
8448 case 6: /* cps */
8449 ARCH(6);
8450 if (IS_USER(s))
8451 break;
8452 if (IS_M(env)) {
8984bd2e 8453 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8454 /* PRIMASK */
8984bd2e
PB
8455 if (insn & 1) {
8456 addr = tcg_const_i32(16);
8457 gen_helper_v7m_msr(cpu_env, addr, tmp);
8458 }
9ee6e8bb 8459 /* FAULTMASK */
8984bd2e
PB
8460 if (insn & 2) {
8461 addr = tcg_const_i32(17);
8462 gen_helper_v7m_msr(cpu_env, addr, tmp);
8463 }
9ee6e8bb
PB
8464 gen_lookup_tb(s);
8465 } else {
8466 if (insn & (1 << 4))
8467 shift = CPSR_A | CPSR_I | CPSR_F;
8468 else
8469 shift = 0;
8470
8471 val = ((insn & 7) << 6) & shift;
8472 gen_op_movl_T0_im(val);
8473 gen_set_psr_T0(s, shift, 0);
8474 }
8475 break;
8476
99c475ab
FB
8477 default:
8478 goto undef;
8479 }
8480 break;
8481
8482 case 12:
8483 /* load/store multiple */
8484 rn = (insn >> 8) & 0x7;
b0109805 8485 addr = load_reg(s, rn);
99c475ab
FB
8486 for (i = 0; i < 8; i++) {
8487 if (insn & (1 << i)) {
99c475ab
FB
8488 if (insn & (1 << 11)) {
8489 /* load */
b0109805
PB
8490 tmp = gen_ld32(addr, IS_USER(s));
8491 store_reg(s, i, tmp);
99c475ab
FB
8492 } else {
8493 /* store */
b0109805
PB
8494 tmp = load_reg(s, i);
8495 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8496 }
5899f386 8497 /* advance to the next address */
b0109805 8498 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8499 }
8500 }
5899f386 8501 /* Base register writeback. */
b0109805
PB
8502 if ((insn & (1 << rn)) == 0) {
8503 store_reg(s, rn, addr);
8504 } else {
8505 dead_tmp(addr);
8506 }
99c475ab
FB
8507 break;
8508
8509 case 13:
8510 /* conditional branch or swi */
8511 cond = (insn >> 8) & 0xf;
8512 if (cond == 0xe)
8513 goto undef;
8514
8515 if (cond == 0xf) {
8516 /* swi */
9ee6e8bb 8517 gen_set_condexec(s);
422ebf69 8518 gen_set_pc_im(s->pc);
9ee6e8bb 8519 s->is_jmp = DISAS_SWI;
99c475ab
FB
8520 break;
8521 }
8522 /* generate a conditional jump to next instruction */
e50e6a20 8523 s->condlabel = gen_new_label();
d9ba4830 8524 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8525 s->condjmp = 1;
99c475ab
FB
8526 gen_movl_T1_reg(s, 15);
8527
8528 /* jump to the offset */
5899f386 8529 val = (uint32_t)s->pc + 2;
99c475ab 8530 offset = ((int32_t)insn << 24) >> 24;
5899f386 8531 val += offset << 1;
8aaca4c0 8532 gen_jmp(s, val);
99c475ab
FB
8533 break;
8534
8535 case 14:
358bf29e 8536 if (insn & (1 << 11)) {
9ee6e8bb
PB
8537 if (disas_thumb2_insn(env, s, insn))
8538 goto undef32;
358bf29e
PB
8539 break;
8540 }
9ee6e8bb 8541 /* unconditional branch */
99c475ab
FB
8542 val = (uint32_t)s->pc;
8543 offset = ((int32_t)insn << 21) >> 21;
8544 val += (offset << 1) + 2;
8aaca4c0 8545 gen_jmp(s, val);
99c475ab
FB
8546 break;
8547
8548 case 15:
9ee6e8bb 8549 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8550 goto undef32;
9ee6e8bb 8551 break;
99c475ab
FB
8552 }
8553 return;
9ee6e8bb
PB
8554undef32:
8555 gen_set_condexec(s);
5e3f878a 8556 gen_set_pc_im(s->pc - 4);
d9ba4830 8557 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8558 s->is_jmp = DISAS_JUMP;
8559 return;
8560illegal_op:
99c475ab 8561undef:
9ee6e8bb 8562 gen_set_condexec(s);
5e3f878a 8563 gen_set_pc_im(s->pc - 2);
d9ba4830 8564 gen_exception(EXCP_UDEF);
99c475ab
FB
8565 s->is_jmp = DISAS_JUMP;
8566}
8567
2c0262af
FB
8568/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8569 basic block 'tb'. If search_pc is TRUE, also generate PC
8570 information for each intermediate instruction. */
2cfc5f17
TS
8571static inline void gen_intermediate_code_internal(CPUState *env,
8572 TranslationBlock *tb,
8573 int search_pc)
2c0262af
FB
8574{
8575 DisasContext dc1, *dc = &dc1;
a1d1bb31 8576 CPUBreakpoint *bp;
2c0262af
FB
8577 uint16_t *gen_opc_end;
8578 int j, lj;
0fa85d43 8579 target_ulong pc_start;
b5ff1b31 8580 uint32_t next_page_start;
2e70f6ef
PB
8581 int num_insns;
8582 int max_insns;
3b46e624 8583
2c0262af 8584 /* generate intermediate code */
b26eefb6
PB
8585 num_temps = 0;
8586 memset(temps, 0, sizeof(temps));
8587
0fa85d43 8588 pc_start = tb->pc;
3b46e624 8589
2c0262af
FB
8590 dc->tb = tb;
8591
2c0262af 8592 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8593
8594 dc->is_jmp = DISAS_NEXT;
8595 dc->pc = pc_start;
8aaca4c0 8596 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8597 dc->condjmp = 0;
5899f386 8598 dc->thumb = env->thumb;
9ee6e8bb
PB
8599 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8600 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8601#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8602 if (IS_M(env)) {
8603 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8604 } else {
8605 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8606 }
b5ff1b31 8607#endif
a7812ae4
PB
8608 cpu_F0s = tcg_temp_new_i32();
8609 cpu_F1s = tcg_temp_new_i32();
8610 cpu_F0d = tcg_temp_new_i64();
8611 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8612 cpu_V0 = cpu_F0d;
8613 cpu_V1 = cpu_F1d;
e677137d 8614 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8615 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8616 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8617 lj = -1;
2e70f6ef
PB
8618 num_insns = 0;
8619 max_insns = tb->cflags & CF_COUNT_MASK;
8620 if (max_insns == 0)
8621 max_insns = CF_COUNT_MASK;
8622
8623 gen_icount_start();
9ee6e8bb
PB
8624 /* Reset the conditional execution bits immediately. This avoids
8625 complications trying to do it at the end of the block. */
8626 if (env->condexec_bits)
8f01245e
PB
8627 {
8628 TCGv tmp = new_tmp();
8629 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8630 store_cpu_field(tmp, condexec_bits);
8f01245e 8631 }
2c0262af 8632 do {
fbb4a2e3
PB
8633#ifdef CONFIG_USER_ONLY
8634 /* Intercept jump to the magic kernel page. */
8635 if (dc->pc >= 0xffff0000) {
8636 /* We always get here via a jump, so know we are not in a
8637 conditional execution block. */
8638 gen_exception(EXCP_KERNEL_TRAP);
8639 dc->is_jmp = DISAS_UPDATE;
8640 break;
8641 }
8642#else
9ee6e8bb
PB
8643 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8644 /* We always get here via a jump, so know we are not in a
8645 conditional execution block. */
d9ba4830 8646 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8647 dc->is_jmp = DISAS_UPDATE;
8648 break;
9ee6e8bb
PB
8649 }
8650#endif
8651
c0ce998e
AL
8652 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8653 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8654 if (bp->pc == dc->pc) {
9ee6e8bb 8655 gen_set_condexec(dc);
5e3f878a 8656 gen_set_pc_im(dc->pc);
d9ba4830 8657 gen_exception(EXCP_DEBUG);
1fddef4b 8658 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8659 /* Advance PC so that clearing the breakpoint will
8660 invalidate this TB. */
8661 dc->pc += 2;
8662 goto done_generating;
1fddef4b
FB
8663 break;
8664 }
8665 }
8666 }
2c0262af
FB
8667 if (search_pc) {
8668 j = gen_opc_ptr - gen_opc_buf;
8669 if (lj < j) {
8670 lj++;
8671 while (lj < j)
8672 gen_opc_instr_start[lj++] = 0;
8673 }
0fa85d43 8674 gen_opc_pc[lj] = dc->pc;
2c0262af 8675 gen_opc_instr_start[lj] = 1;
2e70f6ef 8676 gen_opc_icount[lj] = num_insns;
2c0262af 8677 }
e50e6a20 8678
2e70f6ef
PB
8679 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8680 gen_io_start();
8681
9ee6e8bb
PB
8682 if (env->thumb) {
8683 disas_thumb_insn(env, dc);
8684 if (dc->condexec_mask) {
8685 dc->condexec_cond = (dc->condexec_cond & 0xe)
8686 | ((dc->condexec_mask >> 4) & 1);
8687 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8688 if (dc->condexec_mask == 0) {
8689 dc->condexec_cond = 0;
8690 }
8691 }
8692 } else {
8693 disas_arm_insn(env, dc);
8694 }
b26eefb6
PB
8695 if (num_temps) {
8696 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8697 num_temps = 0;
8698 }
e50e6a20
FB
8699
8700 if (dc->condjmp && !dc->is_jmp) {
8701 gen_set_label(dc->condlabel);
8702 dc->condjmp = 0;
8703 }
aaf2d97d 8704 /* Translation stops when a conditional branch is encountered.
e50e6a20 8705 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8706 * Also stop translation when a page boundary is reached. This
bf20dc07 8707 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8708 num_insns ++;
1fddef4b
FB
8709 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8710 !env->singlestep_enabled &&
2e70f6ef
PB
8711 dc->pc < next_page_start &&
8712 num_insns < max_insns);
8713
8714 if (tb->cflags & CF_LAST_IO) {
8715 if (dc->condjmp) {
8716 /* FIXME: This can theoretically happen with self-modifying
8717 code. */
8718 cpu_abort(env, "IO on conditional branch instruction");
8719 }
8720 gen_io_end();
8721 }
9ee6e8bb 8722
b5ff1b31 8723 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8724 instruction was a conditional branch or trap, and the PC has
8725 already been written. */
551bd27f 8726 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8727 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8728 if (dc->condjmp) {
9ee6e8bb
PB
8729 gen_set_condexec(dc);
8730 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8731 gen_exception(EXCP_SWI);
9ee6e8bb 8732 } else {
d9ba4830 8733 gen_exception(EXCP_DEBUG);
9ee6e8bb 8734 }
e50e6a20
FB
8735 gen_set_label(dc->condlabel);
8736 }
8737 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8738 gen_set_pc_im(dc->pc);
e50e6a20 8739 dc->condjmp = 0;
8aaca4c0 8740 }
9ee6e8bb
PB
8741 gen_set_condexec(dc);
8742 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8743 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8744 } else {
8745 /* FIXME: Single stepping a WFI insn will not halt
8746 the CPU. */
d9ba4830 8747 gen_exception(EXCP_DEBUG);
9ee6e8bb 8748 }
8aaca4c0 8749 } else {
9ee6e8bb
PB
8750 /* While branches must always occur at the end of an IT block,
8751 there are a few other things that can cause us to terminate
8752 the TB in the middel of an IT block:
8753 - Exception generating instructions (bkpt, swi, undefined).
8754 - Page boundaries.
8755 - Hardware watchpoints.
8756 Hardware breakpoints have already been handled and skip this code.
8757 */
8758 gen_set_condexec(dc);
8aaca4c0 8759 switch(dc->is_jmp) {
8aaca4c0 8760 case DISAS_NEXT:
6e256c93 8761 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8762 break;
8763 default:
8764 case DISAS_JUMP:
8765 case DISAS_UPDATE:
8766 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8767 tcg_gen_exit_tb(0);
8aaca4c0
FB
8768 break;
8769 case DISAS_TB_JUMP:
8770 /* nothing more to generate */
8771 break;
9ee6e8bb 8772 case DISAS_WFI:
d9ba4830 8773 gen_helper_wfi();
9ee6e8bb
PB
8774 break;
8775 case DISAS_SWI:
d9ba4830 8776 gen_exception(EXCP_SWI);
9ee6e8bb 8777 break;
8aaca4c0 8778 }
e50e6a20
FB
8779 if (dc->condjmp) {
8780 gen_set_label(dc->condlabel);
9ee6e8bb 8781 gen_set_condexec(dc);
6e256c93 8782 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8783 dc->condjmp = 0;
8784 }
2c0262af 8785 }
2e70f6ef 8786
9ee6e8bb 8787done_generating:
2e70f6ef 8788 gen_icount_end(tb, num_insns);
2c0262af
FB
8789 *gen_opc_ptr = INDEX_op_end;
8790
8791#ifdef DEBUG_DISAS
e19e89a5 8792 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8793 fprintf(logfile, "----------------\n");
8794 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8795 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8796 fprintf(logfile, "\n");
8797 }
8798#endif
b5ff1b31
FB
8799 if (search_pc) {
8800 j = gen_opc_ptr - gen_opc_buf;
8801 lj++;
8802 while (lj <= j)
8803 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8804 } else {
2c0262af 8805 tb->size = dc->pc - pc_start;
2e70f6ef 8806 tb->icount = num_insns;
b5ff1b31 8807 }
2c0262af
FB
8808}
8809
2cfc5f17 8810void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8811{
2cfc5f17 8812 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8813}
8814
2cfc5f17 8815void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8816{
2cfc5f17 8817 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8818}
8819
b5ff1b31
FB
8820static const char *cpu_mode_names[16] = {
8821 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8822 "???", "???", "???", "und", "???", "???", "???", "sys"
8823};
9ee6e8bb 8824
5fafdf24 8825void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8826 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8827 int flags)
2c0262af
FB
8828{
8829 int i;
06e80fc9 8830#if 0
bc380d17 8831 union {
b7bcbe95
FB
8832 uint32_t i;
8833 float s;
8834 } s0, s1;
8835 CPU_DoubleU d;
a94a6abf
PB
8836 /* ??? This assumes float64 and double have the same layout.
8837 Oh well, it's only debug dumps. */
8838 union {
8839 float64 f64;
8840 double d;
8841 } d0;
06e80fc9 8842#endif
b5ff1b31 8843 uint32_t psr;
2c0262af
FB
8844
8845 for(i=0;i<16;i++) {
7fe48483 8846 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8847 if ((i % 4) == 3)
7fe48483 8848 cpu_fprintf(f, "\n");
2c0262af 8849 else
7fe48483 8850 cpu_fprintf(f, " ");
2c0262af 8851 }
b5ff1b31 8852 psr = cpsr_read(env);
687fa640
TS
8853 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8854 psr,
b5ff1b31
FB
8855 psr & (1 << 31) ? 'N' : '-',
8856 psr & (1 << 30) ? 'Z' : '-',
8857 psr & (1 << 29) ? 'C' : '-',
8858 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8859 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8860 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8861
5e3f878a 8862#if 0
b7bcbe95 8863 for (i = 0; i < 16; i++) {
8e96005d
FB
8864 d.d = env->vfp.regs[i];
8865 s0.i = d.l.lower;
8866 s1.i = d.l.upper;
a94a6abf
PB
8867 d0.f64 = d.d;
8868 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8869 i * 2, (int)s0.i, s0.s,
a94a6abf 8870 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8871 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8872 d0.d);
b7bcbe95 8873 }
40f137e1 8874 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8875#endif
2c0262af 8876}
a6b025d3 8877
d2856f1a
AJ
8878void gen_pc_load(CPUState *env, TranslationBlock *tb,
8879 unsigned long searched_pc, int pc_pos, void *puc)
8880{
8881 env->regs[15] = gen_opc_pc[pc_pos];
8882}