]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
Fix smlald, smlsld, pkhtp, pkhbt, ssat, usat, umul, smul... (Laurent Desnogues).
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
1497c961
PB
32
33#define GEN_HELPER 1
b26eefb6 34#include "helpers.h"
2c0262af 35
9ee6e8bb
PB
36#define ENABLE_ARCH_5J 0
37#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
38#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
39#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
40#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
41
42#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43
2c0262af
FB
44/* internal defines */
45typedef struct DisasContext {
0fa85d43 46 target_ulong pc;
2c0262af 47 int is_jmp;
e50e6a20
FB
48 /* Nonzero if this instruction has been conditionally skipped. */
49 int condjmp;
50 /* The label that will be jumped to when the instruction is skipped. */
51 int condlabel;
9ee6e8bb
PB
52 /* Thumb-2 condtional execution bits. */
53 int condexec_mask;
54 int condexec_cond;
2c0262af 55 struct TranslationBlock *tb;
8aaca4c0 56 int singlestep_enabled;
5899f386 57 int thumb;
6658ffb8 58 int is_mem;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af
FB
74
75/* XXX: move that elsewhere */
2c0262af
FB
76extern FILE *logfile;
77extern int loglevel;
78
b26eefb6 79static TCGv cpu_env;
ad69471c 80/* We reuse the same 64-bit temporaries for efficiency. */
e677137d 81static TCGv cpu_V0, cpu_V1, cpu_M0;
ad69471c 82
b26eefb6 83/* FIXME: These should be removed. */
8f8e3aa4 84static TCGv cpu_T[2];
4373f3ce 85static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
b26eefb6 86
2e70f6ef
PB
87#define ICOUNT_TEMP cpu_T[0]
88#include "gen-icount.h"
89
b26eefb6
PB
90/* initialize TCG globals. */
91void arm_translate_init(void)
92{
93 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
94
95 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
96 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
b26eefb6
PB
97}
98
99/* The code generator doesn't like lots of temporaries, so maintain our own
100 cache for reuse within a function. */
101#define MAX_TEMPS 8
102static int num_temps;
103static TCGv temps[MAX_TEMPS];
104
105/* Allocate a temporary variable. */
106static TCGv new_tmp(void)
107{
108 TCGv tmp;
109 if (num_temps == MAX_TEMPS)
110 abort();
111
112 if (GET_TCGV(temps[num_temps]))
113 return temps[num_temps++];
114
115 tmp = tcg_temp_new(TCG_TYPE_I32);
116 temps[num_temps++] = tmp;
117 return tmp;
118}
119
120/* Release a temporary variable. */
121static void dead_tmp(TCGv tmp)
122{
123 int i;
124 num_temps--;
125 i = num_temps;
126 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
127 return;
128
129 /* Shuffle this temp to the last slot. */
130 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
131 i--;
132 while (i < num_temps) {
133 temps[i] = temps[i + 1];
134 i++;
135 }
136 temps[i] = tmp;
137}
138
d9ba4830
PB
139static inline TCGv load_cpu_offset(int offset)
140{
141 TCGv tmp = new_tmp();
142 tcg_gen_ld_i32(tmp, cpu_env, offset);
143 return tmp;
144}
145
146#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
147
148static inline void store_cpu_offset(TCGv var, int offset)
149{
150 tcg_gen_st_i32(var, cpu_env, offset);
151 dead_tmp(var);
152}
153
154#define store_cpu_field(var, name) \
155 store_cpu_offset(var, offsetof(CPUState, name))
156
b26eefb6
PB
157/* Set a variable to the value of a CPU register. */
158static void load_reg_var(DisasContext *s, TCGv var, int reg)
159{
160 if (reg == 15) {
161 uint32_t addr;
162 /* normaly, since we updated PC, we need only to add one insn */
163 if (s->thumb)
164 addr = (long)s->pc + 2;
165 else
166 addr = (long)s->pc + 4;
167 tcg_gen_movi_i32(var, addr);
168 } else {
169 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
170 }
171}
172
173/* Create a new temporary and set it to the value of a CPU register. */
174static inline TCGv load_reg(DisasContext *s, int reg)
175{
176 TCGv tmp = new_tmp();
177 load_reg_var(s, tmp, reg);
178 return tmp;
179}
180
181/* Set a CPU register. The source must be a temporary and will be
182 marked as dead. */
183static void store_reg(DisasContext *s, int reg, TCGv var)
184{
185 if (reg == 15) {
186 tcg_gen_andi_i32(var, var, ~1);
187 s->is_jmp = DISAS_JUMP;
188 }
189 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
190 dead_tmp(var);
191}
192
193
194/* Basic operations. */
195#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6 196#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
b26eefb6
PB
197#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
199
200#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
204
8984bd2e
PB
205#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
210#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
211
b26eefb6
PB
212#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
216#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
217#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
218#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
219
220#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
221#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
222#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
223#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
224#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
225
226/* Value extensions. */
86831435
PB
227#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
228#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
229#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
230#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
231
1497c961
PB
232#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
233#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
234
235#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 236
d9ba4830
PB
237#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
238/* Set NZCV flags from the high 4 bits of var. */
239#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
240
241static void gen_exception(int excp)
242{
243 TCGv tmp = new_tmp();
244 tcg_gen_movi_i32(tmp, excp);
245 gen_helper_exception(tmp);
246 dead_tmp(tmp);
247}
248
3670669c
PB
249static void gen_smul_dual(TCGv a, TCGv b)
250{
251 TCGv tmp1 = new_tmp();
252 TCGv tmp2 = new_tmp();
22478e79
AZ
253 tcg_gen_ext16s_i32(tmp1, a);
254 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
255 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
256 dead_tmp(tmp2);
257 tcg_gen_sari_i32(a, a, 16);
258 tcg_gen_sari_i32(b, b, 16);
259 tcg_gen_mul_i32(b, b, a);
260 tcg_gen_mov_i32(a, tmp1);
261 dead_tmp(tmp1);
262}
263
264/* Byteswap each halfword. */
265static void gen_rev16(TCGv var)
266{
267 TCGv tmp = new_tmp();
268 tcg_gen_shri_i32(tmp, var, 8);
269 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
270 tcg_gen_shli_i32(var, var, 8);
271 tcg_gen_andi_i32(var, var, 0xff00ff00);
272 tcg_gen_or_i32(var, var, tmp);
273 dead_tmp(tmp);
274}
275
276/* Byteswap low halfword and sign extend. */
277static void gen_revsh(TCGv var)
278{
279 TCGv tmp = new_tmp();
280 tcg_gen_shri_i32(tmp, var, 8);
281 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
282 tcg_gen_shli_i32(var, var, 8);
283 tcg_gen_ext8s_i32(var, var);
284 tcg_gen_or_i32(var, var, tmp);
285 dead_tmp(tmp);
286}
287
288/* Unsigned bitfield extract. */
289static void gen_ubfx(TCGv var, int shift, uint32_t mask)
290{
291 if (shift)
292 tcg_gen_shri_i32(var, var, shift);
293 tcg_gen_andi_i32(var, var, mask);
294}
295
296/* Signed bitfield extract. */
297static void gen_sbfx(TCGv var, int shift, int width)
298{
299 uint32_t signbit;
300
301 if (shift)
302 tcg_gen_sari_i32(var, var, shift);
303 if (shift + width < 32) {
304 signbit = 1u << (width - 1);
305 tcg_gen_andi_i32(var, var, (1u << width) - 1);
306 tcg_gen_xori_i32(var, var, signbit);
307 tcg_gen_subi_i32(var, var, signbit);
308 }
309}
310
311/* Bitfield insertion. Insert val into base. Clobbers base and val. */
312static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
313{
3670669c 314 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
315 tcg_gen_shli_i32(val, val, shift);
316 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
317 tcg_gen_or_i32(dest, base, val);
318}
319
d9ba4830
PB
320/* Round the top 32 bits of a 64-bit value. */
321static void gen_roundqd(TCGv a, TCGv b)
3670669c 322{
d9ba4830
PB
323 tcg_gen_shri_i32(a, a, 31);
324 tcg_gen_add_i32(a, a, b);
3670669c
PB
325}
326
8f01245e
PB
327/* FIXME: Most targets have native widening multiplication.
328 It would be good to use that instead of a full wide multiply. */
5e3f878a
PB
329/* 32x32->64 multiply. Marks inputs as dead. */
330static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
331{
332 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
333 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
334
335 tcg_gen_extu_i32_i64(tmp1, a);
336 dead_tmp(a);
337 tcg_gen_extu_i32_i64(tmp2, b);
338 dead_tmp(b);
339 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
340 return tmp1;
341}
342
343static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
344{
345 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
346 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
347
348 tcg_gen_ext_i32_i64(tmp1, a);
349 dead_tmp(a);
350 tcg_gen_ext_i32_i64(tmp2, b);
351 dead_tmp(b);
352 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
353 return tmp1;
354}
355
8f01245e
PB
356/* Unsigned 32x32->64 multiply. */
357static void gen_op_mull_T0_T1(void)
358{
359 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
360 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
361
362 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
363 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
364 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
365 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
366 tcg_gen_shri_i64(tmp1, tmp1, 32);
367 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
368}
369
370/* Signed 32x32->64 multiply. */
d9ba4830 371static void gen_imull(TCGv a, TCGv b)
8f01245e
PB
372{
373 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
374 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
375
d9ba4830
PB
376 tcg_gen_ext_i32_i64(tmp1, a);
377 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 378 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 379 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 380 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
381 tcg_gen_trunc_i64_i32(b, tmp1);
382}
383#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
384
8f01245e
PB
385/* Swap low and high halfwords. */
386static void gen_swap_half(TCGv var)
387{
388 TCGv tmp = new_tmp();
389 tcg_gen_shri_i32(tmp, var, 16);
390 tcg_gen_shli_i32(var, var, 16);
391 tcg_gen_or_i32(var, var, tmp);
3670669c 392 dead_tmp(tmp);
8f01245e
PB
393}
394
b26eefb6
PB
395/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
396 tmp = (t0 ^ t1) & 0x8000;
397 t0 &= ~0x8000;
398 t1 &= ~0x8000;
399 t0 = (t0 + t1) ^ tmp;
400 */
401
402static void gen_add16(TCGv t0, TCGv t1)
403{
404 TCGv tmp = new_tmp();
405 tcg_gen_xor_i32(tmp, t0, t1);
406 tcg_gen_andi_i32(tmp, tmp, 0x8000);
407 tcg_gen_andi_i32(t0, t0, ~0x8000);
408 tcg_gen_andi_i32(t1, t1, ~0x8000);
409 tcg_gen_add_i32(t0, t0, t1);
410 tcg_gen_xor_i32(t0, t0, tmp);
411 dead_tmp(tmp);
412 dead_tmp(t1);
413}
414
9a119ff6
PB
415#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
416
b26eefb6
PB
417/* Set CF to the top bit of var. */
418static void gen_set_CF_bit31(TCGv var)
419{
420 TCGv tmp = new_tmp();
421 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 422 gen_set_CF(var);
b26eefb6
PB
423 dead_tmp(tmp);
424}
425
426/* Set N and Z flags from var. */
427static inline void gen_logic_CC(TCGv var)
428{
6fbe23d5
PB
429 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
430 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
431}
432
433/* T0 += T1 + CF. */
434static void gen_adc_T0_T1(void)
435{
d9ba4830 436 TCGv tmp;
b26eefb6 437 gen_op_addl_T0_T1();
d9ba4830 438 tmp = load_cpu_field(CF);
b26eefb6
PB
439 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
440 dead_tmp(tmp);
441}
442
3670669c
PB
443/* dest = T0 - T1 + CF - 1. */
444static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
445{
d9ba4830 446 TCGv tmp;
3670669c 447 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 448 tmp = load_cpu_field(CF);
3670669c
PB
449 tcg_gen_add_i32(dest, dest, tmp);
450 tcg_gen_subi_i32(dest, dest, 1);
451 dead_tmp(tmp);
452}
453
454#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
455#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
456
b26eefb6
PB
457/* T0 &= ~T1. Clobbers T1. */
458/* FIXME: Implement bic natively. */
8f8e3aa4
PB
459static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
460{
461 TCGv tmp = new_tmp();
462 tcg_gen_not_i32(tmp, t1);
463 tcg_gen_and_i32(dest, t0, tmp);
464 dead_tmp(tmp);
465}
b26eefb6
PB
466static inline void gen_op_bicl_T0_T1(void)
467{
468 gen_op_notl_T1();
469 gen_op_andl_T0_T1();
470}
471
ad69471c
PB
472/* FIXME: Implement this natively. */
473#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
474
b26eefb6
PB
475/* FIXME: Implement this natively. */
476static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
477{
478 TCGv tmp;
479
480 if (i == 0)
481 return;
482
483 tmp = new_tmp();
484 tcg_gen_shri_i32(tmp, t1, i);
485 tcg_gen_shli_i32(t1, t1, 32 - i);
486 tcg_gen_or_i32(t0, t1, tmp);
487 dead_tmp(tmp);
488}
489
9a119ff6 490static void shifter_out_im(TCGv var, int shift)
b26eefb6 491{
9a119ff6
PB
492 TCGv tmp = new_tmp();
493 if (shift == 0) {
494 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 495 } else {
9a119ff6
PB
496 tcg_gen_shri_i32(tmp, var, shift);
497 if (shift != 31);
498 tcg_gen_andi_i32(tmp, tmp, 1);
499 }
500 gen_set_CF(tmp);
501 dead_tmp(tmp);
502}
b26eefb6 503
9a119ff6
PB
504/* Shift by immediate. Includes special handling for shift == 0. */
505static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
506{
507 switch (shiftop) {
508 case 0: /* LSL */
509 if (shift != 0) {
510 if (flags)
511 shifter_out_im(var, 32 - shift);
512 tcg_gen_shli_i32(var, var, shift);
513 }
514 break;
515 case 1: /* LSR */
516 if (shift == 0) {
517 if (flags) {
518 tcg_gen_shri_i32(var, var, 31);
519 gen_set_CF(var);
520 }
521 tcg_gen_movi_i32(var, 0);
522 } else {
523 if (flags)
524 shifter_out_im(var, shift - 1);
525 tcg_gen_shri_i32(var, var, shift);
526 }
527 break;
528 case 2: /* ASR */
529 if (shift == 0)
530 shift = 32;
531 if (flags)
532 shifter_out_im(var, shift - 1);
533 if (shift == 32)
534 shift = 31;
535 tcg_gen_sari_i32(var, var, shift);
536 break;
537 case 3: /* ROR/RRX */
538 if (shift != 0) {
539 if (flags)
540 shifter_out_im(var, shift - 1);
541 tcg_gen_rori_i32(var, var, shift); break;
542 } else {
d9ba4830 543 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
544 if (flags)
545 shifter_out_im(var, 0);
546 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
547 tcg_gen_shli_i32(tmp, tmp, 31);
548 tcg_gen_or_i32(var, var, tmp);
549 dead_tmp(tmp);
b26eefb6
PB
550 }
551 }
552};
553
8984bd2e
PB
554static inline void gen_arm_shift_reg(TCGv var, int shiftop,
555 TCGv shift, int flags)
556{
557 if (flags) {
558 switch (shiftop) {
559 case 0: gen_helper_shl_cc(var, var, shift); break;
560 case 1: gen_helper_shr_cc(var, var, shift); break;
561 case 2: gen_helper_sar_cc(var, var, shift); break;
562 case 3: gen_helper_ror_cc(var, var, shift); break;
563 }
564 } else {
565 switch (shiftop) {
566 case 0: gen_helper_shl(var, var, shift); break;
567 case 1: gen_helper_shr(var, var, shift); break;
568 case 2: gen_helper_sar(var, var, shift); break;
569 case 3: gen_helper_ror(var, var, shift); break;
570 }
571 }
572 dead_tmp(shift);
573}
574
6ddbc6e4
PB
575#define PAS_OP(pfx) \
576 switch (op2) { \
577 case 0: gen_pas_helper(glue(pfx,add16)); break; \
578 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
579 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
580 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
581 case 4: gen_pas_helper(glue(pfx,add8)); break; \
582 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
583 }
d9ba4830 584static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
585{
586 TCGv tmp;
587
588 switch (op1) {
589#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
590 case 1:
591 tmp = tcg_temp_new(TCG_TYPE_PTR);
592 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
593 PAS_OP(s)
594 break;
595 case 5:
596 tmp = tcg_temp_new(TCG_TYPE_PTR);
597 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
598 PAS_OP(u)
599 break;
600#undef gen_pas_helper
601#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
602 case 2:
603 PAS_OP(q);
604 break;
605 case 3:
606 PAS_OP(sh);
607 break;
608 case 6:
609 PAS_OP(uq);
610 break;
611 case 7:
612 PAS_OP(uh);
613 break;
614#undef gen_pas_helper
615 }
616}
9ee6e8bb
PB
617#undef PAS_OP
618
6ddbc6e4
PB
619/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
620#define PAS_OP(pfx) \
621 switch (op2) { \
622 case 0: gen_pas_helper(glue(pfx,add8)); break; \
623 case 1: gen_pas_helper(glue(pfx,add16)); break; \
624 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
625 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
626 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
627 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
628 }
d9ba4830 629static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
630{
631 TCGv tmp;
632
633 switch (op1) {
634#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
635 case 0:
636 tmp = tcg_temp_new(TCG_TYPE_PTR);
637 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
638 PAS_OP(s)
639 break;
640 case 4:
641 tmp = tcg_temp_new(TCG_TYPE_PTR);
642 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
643 PAS_OP(u)
644 break;
645#undef gen_pas_helper
646#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
647 case 1:
648 PAS_OP(q);
649 break;
650 case 2:
651 PAS_OP(sh);
652 break;
653 case 5:
654 PAS_OP(uq);
655 break;
656 case 6:
657 PAS_OP(uh);
658 break;
659#undef gen_pas_helper
660 }
661}
9ee6e8bb
PB
662#undef PAS_OP
663
d9ba4830
PB
664static void gen_test_cc(int cc, int label)
665{
666 TCGv tmp;
667 TCGv tmp2;
d9ba4830
PB
668 int inv;
669
d9ba4830
PB
670 switch (cc) {
671 case 0: /* eq: Z */
6fbe23d5 672 tmp = load_cpu_field(ZF);
cb63669a 673 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
674 break;
675 case 1: /* ne: !Z */
6fbe23d5 676 tmp = load_cpu_field(ZF);
cb63669a 677 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
678 break;
679 case 2: /* cs: C */
680 tmp = load_cpu_field(CF);
cb63669a 681 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
682 break;
683 case 3: /* cc: !C */
684 tmp = load_cpu_field(CF);
cb63669a 685 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
686 break;
687 case 4: /* mi: N */
6fbe23d5 688 tmp = load_cpu_field(NF);
cb63669a 689 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
690 break;
691 case 5: /* pl: !N */
6fbe23d5 692 tmp = load_cpu_field(NF);
cb63669a 693 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
694 break;
695 case 6: /* vs: V */
696 tmp = load_cpu_field(VF);
cb63669a 697 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
698 break;
699 case 7: /* vc: !V */
700 tmp = load_cpu_field(VF);
cb63669a 701 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
702 break;
703 case 8: /* hi: C && !Z */
704 inv = gen_new_label();
705 tmp = load_cpu_field(CF);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 707 dead_tmp(tmp);
6fbe23d5 708 tmp = load_cpu_field(ZF);
cb63669a 709 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
710 gen_set_label(inv);
711 break;
712 case 9: /* ls: !C || Z */
713 tmp = load_cpu_field(CF);
cb63669a 714 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 715 dead_tmp(tmp);
6fbe23d5 716 tmp = load_cpu_field(ZF);
cb63669a 717 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
718 break;
719 case 10: /* ge: N == V -> N ^ V == 0 */
720 tmp = load_cpu_field(VF);
6fbe23d5 721 tmp2 = load_cpu_field(NF);
d9ba4830
PB
722 tcg_gen_xor_i32(tmp, tmp, tmp2);
723 dead_tmp(tmp2);
cb63669a 724 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
725 break;
726 case 11: /* lt: N != V -> N ^ V != 0 */
727 tmp = load_cpu_field(VF);
6fbe23d5 728 tmp2 = load_cpu_field(NF);
d9ba4830
PB
729 tcg_gen_xor_i32(tmp, tmp, tmp2);
730 dead_tmp(tmp2);
cb63669a 731 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
732 break;
733 case 12: /* gt: !Z && N == V */
734 inv = gen_new_label();
6fbe23d5 735 tmp = load_cpu_field(ZF);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
737 dead_tmp(tmp);
738 tmp = load_cpu_field(VF);
6fbe23d5 739 tmp2 = load_cpu_field(NF);
d9ba4830
PB
740 tcg_gen_xor_i32(tmp, tmp, tmp2);
741 dead_tmp(tmp2);
cb63669a 742 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
743 gen_set_label(inv);
744 break;
745 case 13: /* le: Z || N != V */
6fbe23d5 746 tmp = load_cpu_field(ZF);
cb63669a 747 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
748 dead_tmp(tmp);
749 tmp = load_cpu_field(VF);
6fbe23d5 750 tmp2 = load_cpu_field(NF);
d9ba4830
PB
751 tcg_gen_xor_i32(tmp, tmp, tmp2);
752 dead_tmp(tmp2);
cb63669a 753 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
754 break;
755 default:
756 fprintf(stderr, "Bad condition code 0x%x\n", cc);
757 abort();
758 }
759 dead_tmp(tmp);
760}
2c0262af
FB
761
762const uint8_t table_logic_cc[16] = {
763 1, /* and */
764 1, /* xor */
765 0, /* sub */
766 0, /* rsb */
767 0, /* add */
768 0, /* adc */
769 0, /* sbc */
770 0, /* rsc */
771 1, /* andl */
772 1, /* xorl */
773 0, /* cmp */
774 0, /* cmn */
775 1, /* orr */
776 1, /* mov */
777 1, /* bic */
778 1, /* mvn */
779};
3b46e624 780
d9ba4830
PB
781/* Set PC and Thumb state from an immediate address. */
782static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 783{
b26eefb6 784 TCGv tmp;
99c475ab 785
b26eefb6
PB
786 s->is_jmp = DISAS_UPDATE;
787 tmp = new_tmp();
d9ba4830
PB
788 if (s->thumb != (addr & 1)) {
789 tcg_gen_movi_i32(tmp, addr & 1);
790 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
791 }
792 tcg_gen_movi_i32(tmp, addr & ~1);
793 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 794 dead_tmp(tmp);
d9ba4830
PB
795}
796
797/* Set PC and Thumb state from var. var is marked as dead. */
798static inline void gen_bx(DisasContext *s, TCGv var)
799{
800 TCGv tmp;
801
802 s->is_jmp = DISAS_UPDATE;
803 tmp = new_tmp();
804 tcg_gen_andi_i32(tmp, var, 1);
805 store_cpu_field(tmp, thumb);
806 tcg_gen_andi_i32(var, var, ~1);
807 store_cpu_field(var, regs[15]);
808}
809
810/* TODO: This should be removed. Use gen_bx instead. */
811static inline void gen_bx_T0(DisasContext *s)
812{
813 TCGv tmp = new_tmp();
814 tcg_gen_mov_i32(tmp, cpu_T[0]);
815 gen_bx(s, tmp);
b26eefb6 816}
b5ff1b31
FB
817
818#if defined(CONFIG_USER_ONLY)
819#define gen_ldst(name, s) gen_op_##name##_raw()
820#else
821#define gen_ldst(name, s) do { \
6658ffb8 822 s->is_mem = 1; \
b5ff1b31
FB
823 if (IS_USER(s)) \
824 gen_op_##name##_user(); \
825 else \
826 gen_op_##name##_kernel(); \
827 } while (0)
828#endif
b0109805
PB
829static inline TCGv gen_ld8s(TCGv addr, int index)
830{
831 TCGv tmp = new_tmp();
832 tcg_gen_qemu_ld8s(tmp, addr, index);
833 return tmp;
834}
835static inline TCGv gen_ld8u(TCGv addr, int index)
836{
837 TCGv tmp = new_tmp();
838 tcg_gen_qemu_ld8u(tmp, addr, index);
839 return tmp;
840}
841static inline TCGv gen_ld16s(TCGv addr, int index)
842{
843 TCGv tmp = new_tmp();
844 tcg_gen_qemu_ld16s(tmp, addr, index);
845 return tmp;
846}
847static inline TCGv gen_ld16u(TCGv addr, int index)
848{
849 TCGv tmp = new_tmp();
850 tcg_gen_qemu_ld16u(tmp, addr, index);
851 return tmp;
852}
853static inline TCGv gen_ld32(TCGv addr, int index)
854{
855 TCGv tmp = new_tmp();
856 tcg_gen_qemu_ld32u(tmp, addr, index);
857 return tmp;
858}
859static inline void gen_st8(TCGv val, TCGv addr, int index)
860{
861 tcg_gen_qemu_st8(val, addr, index);
862 dead_tmp(val);
863}
864static inline void gen_st16(TCGv val, TCGv addr, int index)
865{
866 tcg_gen_qemu_st16(val, addr, index);
867 dead_tmp(val);
868}
869static inline void gen_st32(TCGv val, TCGv addr, int index)
870{
871 tcg_gen_qemu_st32(val, addr, index);
872 dead_tmp(val);
873}
b5ff1b31 874
2c0262af
FB
875static inline void gen_movl_T0_reg(DisasContext *s, int reg)
876{
b26eefb6 877 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
878}
879
880static inline void gen_movl_T1_reg(DisasContext *s, int reg)
881{
b26eefb6 882 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
883}
884
885static inline void gen_movl_T2_reg(DisasContext *s, int reg)
886{
b26eefb6
PB
887 load_reg_var(s, cpu_T[2], reg);
888}
889
5e3f878a
PB
890static inline void gen_set_pc_im(uint32_t val)
891{
892 TCGv tmp = new_tmp();
893 tcg_gen_movi_i32(tmp, val);
894 store_cpu_field(tmp, regs[15]);
895}
896
2c0262af
FB
897static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
898{
b26eefb6
PB
899 TCGv tmp;
900 if (reg == 15) {
901 tmp = new_tmp();
902 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
903 } else {
904 tmp = cpu_T[t];
905 }
906 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 907 if (reg == 15) {
b26eefb6 908 dead_tmp(tmp);
2c0262af
FB
909 s->is_jmp = DISAS_JUMP;
910 }
911}
912
913static inline void gen_movl_reg_T0(DisasContext *s, int reg)
914{
915 gen_movl_reg_TN(s, reg, 0);
916}
917
918static inline void gen_movl_reg_T1(DisasContext *s, int reg)
919{
920 gen_movl_reg_TN(s, reg, 1);
921}
922
b5ff1b31
FB
923/* Force a TB lookup after an instruction that changes the CPU state. */
924static inline void gen_lookup_tb(DisasContext *s)
925{
926 gen_op_movl_T0_im(s->pc);
927 gen_movl_reg_T0(s, 15);
928 s->is_jmp = DISAS_UPDATE;
929}
930
b0109805
PB
931static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
932 TCGv var)
2c0262af 933{
1e8d4eec 934 int val, rm, shift, shiftop;
b26eefb6 935 TCGv offset;
2c0262af
FB
936
937 if (!(insn & (1 << 25))) {
938 /* immediate */
939 val = insn & 0xfff;
940 if (!(insn & (1 << 23)))
941 val = -val;
537730b9 942 if (val != 0)
b0109805 943 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
944 } else {
945 /* shift/register */
946 rm = (insn) & 0xf;
947 shift = (insn >> 7) & 0x1f;
1e8d4eec 948 shiftop = (insn >> 5) & 3;
b26eefb6 949 offset = load_reg(s, rm);
9a119ff6 950 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 951 if (!(insn & (1 << 23)))
b0109805 952 tcg_gen_sub_i32(var, var, offset);
2c0262af 953 else
b0109805 954 tcg_gen_add_i32(var, var, offset);
b26eefb6 955 dead_tmp(offset);
2c0262af
FB
956 }
957}
958
191f9a93 959static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 960 int extra, TCGv var)
2c0262af
FB
961{
962 int val, rm;
b26eefb6 963 TCGv offset;
3b46e624 964
2c0262af
FB
965 if (insn & (1 << 22)) {
966 /* immediate */
967 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
968 if (!(insn & (1 << 23)))
969 val = -val;
18acad92 970 val += extra;
537730b9 971 if (val != 0)
b0109805 972 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
973 } else {
974 /* register */
191f9a93 975 if (extra)
b0109805 976 tcg_gen_addi_i32(var, var, extra);
2c0262af 977 rm = (insn) & 0xf;
b26eefb6 978 offset = load_reg(s, rm);
2c0262af 979 if (!(insn & (1 << 23)))
b0109805 980 tcg_gen_sub_i32(var, var, offset);
2c0262af 981 else
b0109805 982 tcg_gen_add_i32(var, var, offset);
b26eefb6 983 dead_tmp(offset);
2c0262af
FB
984 }
985}
986
4373f3ce
PB
987#define VFP_OP2(name) \
988static inline void gen_vfp_##name(int dp) \
989{ \
990 if (dp) \
991 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
992 else \
993 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
994}
995
5b340b51 996#define VFP_OP1(name) \
9ee6e8bb
PB
997static inline void gen_vfp_##name(int dp, int arg) \
998{ \
999 if (dp) \
1000 gen_op_vfp_##name##d(arg); \
1001 else \
1002 gen_op_vfp_##name##s(arg); \
1003}
1004
4373f3ce
PB
1005VFP_OP2(add)
1006VFP_OP2(sub)
1007VFP_OP2(mul)
1008VFP_OP2(div)
1009
1010#undef VFP_OP2
1011
1012static inline void gen_vfp_abs(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1016 else
1017 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1018}
1019
1020static inline void gen_vfp_neg(int dp)
1021{
1022 if (dp)
1023 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1024 else
1025 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1026}
1027
1028static inline void gen_vfp_sqrt(int dp)
1029{
1030 if (dp)
1031 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1032 else
1033 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1034}
1035
1036static inline void gen_vfp_cmp(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1040 else
1041 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1042}
1043
1044static inline void gen_vfp_cmpe(int dp)
1045{
1046 if (dp)
1047 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1048 else
1049 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1050}
1051
1052static inline void gen_vfp_F1_ld0(int dp)
1053{
1054 if (dp)
5b340b51 1055 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1056 else
5b340b51 1057 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1058}
1059
1060static inline void gen_vfp_uito(int dp)
1061{
1062 if (dp)
1063 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1064 else
1065 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1066}
1067
1068static inline void gen_vfp_sito(int dp)
1069{
1070 if (dp)
66230e0d 1071 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1072 else
66230e0d 1073 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1074}
1075
1076static inline void gen_vfp_toui(int dp)
1077{
1078 if (dp)
1079 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1080 else
1081 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1082}
1083
1084static inline void gen_vfp_touiz(int dp)
1085{
1086 if (dp)
1087 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1088 else
1089 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1090}
1091
1092static inline void gen_vfp_tosi(int dp)
1093{
1094 if (dp)
1095 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1096 else
1097 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1098}
1099
1100static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1101{
1102 if (dp)
4373f3ce 1103 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1104 else
4373f3ce
PB
1105 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1106}
1107
1108#define VFP_GEN_FIX(name) \
1109static inline void gen_vfp_##name(int dp, int shift) \
1110{ \
1111 if (dp) \
1112 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1113 else \
1114 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1115}
4373f3ce
PB
1116VFP_GEN_FIX(tosh)
1117VFP_GEN_FIX(tosl)
1118VFP_GEN_FIX(touh)
1119VFP_GEN_FIX(toul)
1120VFP_GEN_FIX(shto)
1121VFP_GEN_FIX(slto)
1122VFP_GEN_FIX(uhto)
1123VFP_GEN_FIX(ulto)
1124#undef VFP_GEN_FIX
9ee6e8bb 1125
b5ff1b31
FB
1126static inline void gen_vfp_ld(DisasContext *s, int dp)
1127{
1128 if (dp)
4373f3ce 1129 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1130 else
4373f3ce 1131 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1132}
1133
1134static inline void gen_vfp_st(DisasContext *s, int dp)
1135{
1136 if (dp)
4373f3ce 1137 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1138 else
4373f3ce 1139 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1140}
1141
8e96005d
FB
1142static inline long
1143vfp_reg_offset (int dp, int reg)
1144{
1145 if (dp)
1146 return offsetof(CPUARMState, vfp.regs[reg]);
1147 else if (reg & 1) {
1148 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1149 + offsetof(CPU_DoubleU, l.upper);
1150 } else {
1151 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1152 + offsetof(CPU_DoubleU, l.lower);
1153 }
1154}
9ee6e8bb
PB
1155
1156/* Return the offset of a 32-bit piece of a NEON register.
1157 zero is the least significant end of the register. */
1158static inline long
1159neon_reg_offset (int reg, int n)
1160{
1161 int sreg;
1162 sreg = reg * 2 + n;
1163 return vfp_reg_offset(0, sreg);
1164}
1165
ad69471c
PB
1166/* FIXME: Remove these. */
1167#define neon_T0 cpu_T[0]
1168#define neon_T1 cpu_T[1]
1169#define NEON_GET_REG(T, reg, n) \
1170 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1171#define NEON_SET_REG(T, reg, n) \
1172 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1173
8f8e3aa4
PB
1174static TCGv neon_load_reg(int reg, int pass)
1175{
1176 TCGv tmp = new_tmp();
1177 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1178 return tmp;
1179}
1180
1181static void neon_store_reg(int reg, int pass, TCGv var)
1182{
1183 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1184 dead_tmp(var);
1185}
1186
ad69471c
PB
1187static inline void neon_load_reg64(TCGv var, int reg)
1188{
1189 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1190}
1191
1192static inline void neon_store_reg64(TCGv var, int reg)
1193{
1194 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1195}
1196
4373f3ce
PB
1197#define tcg_gen_ld_f32 tcg_gen_ld_i32
1198#define tcg_gen_ld_f64 tcg_gen_ld_i64
1199#define tcg_gen_st_f32 tcg_gen_st_i32
1200#define tcg_gen_st_f64 tcg_gen_st_i64
1201
b7bcbe95
FB
1202static inline void gen_mov_F0_vreg(int dp, int reg)
1203{
1204 if (dp)
4373f3ce 1205 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1206 else
4373f3ce 1207 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1208}
1209
1210static inline void gen_mov_F1_vreg(int dp, int reg)
1211{
1212 if (dp)
4373f3ce 1213 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1214 else
4373f3ce 1215 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1216}
1217
1218static inline void gen_mov_vreg_F0(int dp, int reg)
1219{
1220 if (dp)
4373f3ce 1221 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1222 else
4373f3ce 1223 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1224}
1225
18c9b560
AZ
1226#define ARM_CP_RW_BIT (1 << 20)
1227
e677137d
PB
1228static inline void iwmmxt_load_reg(TCGv var, int reg)
1229{
1230 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1231}
1232
1233static inline void iwmmxt_store_reg(TCGv var, int reg)
1234{
1235 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1236}
1237
1238static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1239{
1240 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1241}
1242
1243static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1244{
1245 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1246}
1247
1248static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1249{
1250 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1251}
1252
1253static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1254{
1255 iwmmxt_store_reg(cpu_M0, rn);
1256}
1257
1258static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1259{
1260 iwmmxt_load_reg(cpu_M0, rn);
1261}
1262
1263static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1264{
1265 iwmmxt_load_reg(cpu_V1, rn);
1266 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1267}
1268
1269static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1270{
1271 iwmmxt_load_reg(cpu_V1, rn);
1272 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1273}
1274
1275static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1276{
1277 iwmmxt_load_reg(cpu_V1, rn);
1278 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1279}
1280
1281#define IWMMXT_OP(name) \
1282static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1283{ \
1284 iwmmxt_load_reg(cpu_V1, rn); \
1285 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1286}
1287
1288#define IWMMXT_OP_ENV(name) \
1289static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1290{ \
1291 iwmmxt_load_reg(cpu_V1, rn); \
1292 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1293}
1294
1295#define IWMMXT_OP_ENV_SIZE(name) \
1296IWMMXT_OP_ENV(name##b) \
1297IWMMXT_OP_ENV(name##w) \
1298IWMMXT_OP_ENV(name##l)
1299
1300#define IWMMXT_OP_ENV1(name) \
1301static inline void gen_op_iwmmxt_##name##_M0(void) \
1302{ \
1303 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1304}
1305
1306IWMMXT_OP(maddsq)
1307IWMMXT_OP(madduq)
1308IWMMXT_OP(sadb)
1309IWMMXT_OP(sadw)
1310IWMMXT_OP(mulslw)
1311IWMMXT_OP(mulshw)
1312IWMMXT_OP(mululw)
1313IWMMXT_OP(muluhw)
1314IWMMXT_OP(macsw)
1315IWMMXT_OP(macuw)
1316
1317IWMMXT_OP_ENV_SIZE(unpackl)
1318IWMMXT_OP_ENV_SIZE(unpackh)
1319
1320IWMMXT_OP_ENV1(unpacklub)
1321IWMMXT_OP_ENV1(unpackluw)
1322IWMMXT_OP_ENV1(unpacklul)
1323IWMMXT_OP_ENV1(unpackhub)
1324IWMMXT_OP_ENV1(unpackhuw)
1325IWMMXT_OP_ENV1(unpackhul)
1326IWMMXT_OP_ENV1(unpacklsb)
1327IWMMXT_OP_ENV1(unpacklsw)
1328IWMMXT_OP_ENV1(unpacklsl)
1329IWMMXT_OP_ENV1(unpackhsb)
1330IWMMXT_OP_ENV1(unpackhsw)
1331IWMMXT_OP_ENV1(unpackhsl)
1332
1333IWMMXT_OP_ENV_SIZE(cmpeq)
1334IWMMXT_OP_ENV_SIZE(cmpgtu)
1335IWMMXT_OP_ENV_SIZE(cmpgts)
1336
1337IWMMXT_OP_ENV_SIZE(mins)
1338IWMMXT_OP_ENV_SIZE(minu)
1339IWMMXT_OP_ENV_SIZE(maxs)
1340IWMMXT_OP_ENV_SIZE(maxu)
1341
1342IWMMXT_OP_ENV_SIZE(subn)
1343IWMMXT_OP_ENV_SIZE(addn)
1344IWMMXT_OP_ENV_SIZE(subu)
1345IWMMXT_OP_ENV_SIZE(addu)
1346IWMMXT_OP_ENV_SIZE(subs)
1347IWMMXT_OP_ENV_SIZE(adds)
1348
1349IWMMXT_OP_ENV(avgb0)
1350IWMMXT_OP_ENV(avgb1)
1351IWMMXT_OP_ENV(avgw0)
1352IWMMXT_OP_ENV(avgw1)
1353
1354IWMMXT_OP(msadb)
1355
1356IWMMXT_OP_ENV(packuw)
1357IWMMXT_OP_ENV(packul)
1358IWMMXT_OP_ENV(packuq)
1359IWMMXT_OP_ENV(packsw)
1360IWMMXT_OP_ENV(packsl)
1361IWMMXT_OP_ENV(packsq)
1362
1363static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1364{
1365 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1366}
1367
1368static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1369{
1370 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1371}
1372
1373static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1374{
1375 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1376}
1377
1378static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1379{
1380 iwmmxt_load_reg(cpu_V1, rn);
1381 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1382}
1383
1384static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1385{
1386 TCGv tmp = tcg_const_i32(shift);
1387 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1388}
1389
1390static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1391{
1392 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1393 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1394 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1395}
1396
1397static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1398{
1399 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1400 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1401 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1402}
1403
1404static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1405{
1406 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1407 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1408 if (mask != ~0u)
1409 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1410}
1411
1412static void gen_op_iwmmxt_set_mup(void)
1413{
1414 TCGv tmp;
1415 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1416 tcg_gen_ori_i32(tmp, tmp, 2);
1417 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1418}
1419
1420static void gen_op_iwmmxt_set_cup(void)
1421{
1422 TCGv tmp;
1423 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1424 tcg_gen_ori_i32(tmp, tmp, 1);
1425 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1426}
1427
1428static void gen_op_iwmmxt_setpsr_nz(void)
1429{
1430 TCGv tmp = new_tmp();
1431 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1432 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1433}
1434
1435static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1436{
1437 iwmmxt_load_reg(cpu_V1, rn);
86831435 1438 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1439 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1440}
1441
1442
1443static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1444{
1445 iwmmxt_load_reg(cpu_V0, rn);
1446 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1447 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1448 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1449}
1450
1451static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1452{
1453 tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
1454 tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
1455 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
1456 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
1457 iwmmxt_store_reg(cpu_V0, rn);
1458}
1459
18c9b560
AZ
1460static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1461{
1462 int rd;
1463 uint32_t offset;
1464
1465 rd = (insn >> 16) & 0xf;
1466 gen_movl_T1_reg(s, rd);
1467
1468 offset = (insn & 0xff) << ((insn >> 7) & 2);
1469 if (insn & (1 << 24)) {
1470 /* Pre indexed */
1471 if (insn & (1 << 23))
1472 gen_op_addl_T1_im(offset);
1473 else
1474 gen_op_addl_T1_im(-offset);
1475
1476 if (insn & (1 << 21))
1477 gen_movl_reg_T1(s, rd);
1478 } else if (insn & (1 << 21)) {
1479 /* Post indexed */
1480 if (insn & (1 << 23))
1481 gen_op_movl_T0_im(offset);
1482 else
1483 gen_op_movl_T0_im(- offset);
1484 gen_op_addl_T0_T1();
1485 gen_movl_reg_T0(s, rd);
1486 } else if (!(insn & (1 << 23)))
1487 return 1;
1488 return 0;
1489}
1490
1491static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1492{
1493 int rd = (insn >> 0) & 0xf;
1494
1495 if (insn & (1 << 8))
1496 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1497 return 1;
1498 else
1499 gen_op_iwmmxt_movl_T0_wCx(rd);
1500 else
e677137d 1501 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1502
1503 gen_op_movl_T1_im(mask);
1504 gen_op_andl_T0_T1();
1505 return 0;
1506}
1507
1508/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1509 (ie. an undefined instruction). */
1510static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1511{
1512 int rd, wrd;
1513 int rdhi, rdlo, rd0, rd1, i;
b0109805 1514 TCGv tmp;
18c9b560
AZ
1515
1516 if ((insn & 0x0e000e00) == 0x0c000000) {
1517 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1518 wrd = insn & 0xf;
1519 rdlo = (insn >> 12) & 0xf;
1520 rdhi = (insn >> 16) & 0xf;
1521 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1522 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1523 gen_movl_reg_T0(s, rdlo);
1524 gen_movl_reg_T1(s, rdhi);
1525 } else { /* TMCRR */
1526 gen_movl_T0_reg(s, rdlo);
1527 gen_movl_T1_reg(s, rdhi);
e677137d 1528 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1529 gen_op_iwmmxt_set_mup();
1530 }
1531 return 0;
1532 }
1533
1534 wrd = (insn >> 12) & 0xf;
1535 if (gen_iwmmxt_address(s, insn))
1536 return 1;
1537 if (insn & ARM_CP_RW_BIT) {
1538 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1539 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1540 tcg_gen_mov_i32(cpu_T[0], tmp);
1541 dead_tmp(tmp);
18c9b560
AZ
1542 gen_op_iwmmxt_movl_wCx_T0(wrd);
1543 } else {
e677137d
PB
1544 i = 1;
1545 if (insn & (1 << 8)) {
1546 if (insn & (1 << 22)) { /* WLDRD */
1547 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1548 i = 0;
1549 } else { /* WLDRW wRd */
1550 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1551 }
1552 } else {
1553 if (insn & (1 << 22)) { /* WLDRH */
1554 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1555 } else { /* WLDRB */
1556 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1557 }
1558 }
1559 if (i) {
1560 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1561 dead_tmp(tmp);
1562 }
18c9b560
AZ
1563 gen_op_iwmmxt_movq_wRn_M0(wrd);
1564 }
1565 } else {
1566 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1567 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1568 tmp = new_tmp();
1569 tcg_gen_mov_i32(tmp, cpu_T[0]);
1570 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1571 } else {
1572 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1573 tmp = new_tmp();
1574 if (insn & (1 << 8)) {
1575 if (insn & (1 << 22)) { /* WSTRD */
1576 dead_tmp(tmp);
1577 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1578 } else { /* WSTRW wRd */
1579 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1580 gen_st32(tmp, cpu_T[1], IS_USER(s));
1581 }
1582 } else {
1583 if (insn & (1 << 22)) { /* WSTRH */
1584 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1585 gen_st16(tmp, cpu_T[1], IS_USER(s));
1586 } else { /* WSTRB */
1587 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1588 gen_st8(tmp, cpu_T[1], IS_USER(s));
1589 }
1590 }
18c9b560
AZ
1591 }
1592 }
1593 return 0;
1594 }
1595
1596 if ((insn & 0x0f000000) != 0x0e000000)
1597 return 1;
1598
1599 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1600 case 0x000: /* WOR */
1601 wrd = (insn >> 12) & 0xf;
1602 rd0 = (insn >> 0) & 0xf;
1603 rd1 = (insn >> 16) & 0xf;
1604 gen_op_iwmmxt_movq_M0_wRn(rd0);
1605 gen_op_iwmmxt_orq_M0_wRn(rd1);
1606 gen_op_iwmmxt_setpsr_nz();
1607 gen_op_iwmmxt_movq_wRn_M0(wrd);
1608 gen_op_iwmmxt_set_mup();
1609 gen_op_iwmmxt_set_cup();
1610 break;
1611 case 0x011: /* TMCR */
1612 if (insn & 0xf)
1613 return 1;
1614 rd = (insn >> 12) & 0xf;
1615 wrd = (insn >> 16) & 0xf;
1616 switch (wrd) {
1617 case ARM_IWMMXT_wCID:
1618 case ARM_IWMMXT_wCASF:
1619 break;
1620 case ARM_IWMMXT_wCon:
1621 gen_op_iwmmxt_set_cup();
1622 /* Fall through. */
1623 case ARM_IWMMXT_wCSSF:
1624 gen_op_iwmmxt_movl_T0_wCx(wrd);
1625 gen_movl_T1_reg(s, rd);
1626 gen_op_bicl_T0_T1();
1627 gen_op_iwmmxt_movl_wCx_T0(wrd);
1628 break;
1629 case ARM_IWMMXT_wCGR0:
1630 case ARM_IWMMXT_wCGR1:
1631 case ARM_IWMMXT_wCGR2:
1632 case ARM_IWMMXT_wCGR3:
1633 gen_op_iwmmxt_set_cup();
1634 gen_movl_reg_T0(s, rd);
1635 gen_op_iwmmxt_movl_wCx_T0(wrd);
1636 break;
1637 default:
1638 return 1;
1639 }
1640 break;
1641 case 0x100: /* WXOR */
1642 wrd = (insn >> 12) & 0xf;
1643 rd0 = (insn >> 0) & 0xf;
1644 rd1 = (insn >> 16) & 0xf;
1645 gen_op_iwmmxt_movq_M0_wRn(rd0);
1646 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1647 gen_op_iwmmxt_setpsr_nz();
1648 gen_op_iwmmxt_movq_wRn_M0(wrd);
1649 gen_op_iwmmxt_set_mup();
1650 gen_op_iwmmxt_set_cup();
1651 break;
1652 case 0x111: /* TMRC */
1653 if (insn & 0xf)
1654 return 1;
1655 rd = (insn >> 12) & 0xf;
1656 wrd = (insn >> 16) & 0xf;
1657 gen_op_iwmmxt_movl_T0_wCx(wrd);
1658 gen_movl_reg_T0(s, rd);
1659 break;
1660 case 0x300: /* WANDN */
1661 wrd = (insn >> 12) & 0xf;
1662 rd0 = (insn >> 0) & 0xf;
1663 rd1 = (insn >> 16) & 0xf;
1664 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1665 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1666 gen_op_iwmmxt_andq_M0_wRn(rd1);
1667 gen_op_iwmmxt_setpsr_nz();
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 gen_op_iwmmxt_set_cup();
1671 break;
1672 case 0x200: /* WAND */
1673 wrd = (insn >> 12) & 0xf;
1674 rd0 = (insn >> 0) & 0xf;
1675 rd1 = (insn >> 16) & 0xf;
1676 gen_op_iwmmxt_movq_M0_wRn(rd0);
1677 gen_op_iwmmxt_andq_M0_wRn(rd1);
1678 gen_op_iwmmxt_setpsr_nz();
1679 gen_op_iwmmxt_movq_wRn_M0(wrd);
1680 gen_op_iwmmxt_set_mup();
1681 gen_op_iwmmxt_set_cup();
1682 break;
1683 case 0x810: case 0xa10: /* WMADD */
1684 wrd = (insn >> 12) & 0xf;
1685 rd0 = (insn >> 0) & 0xf;
1686 rd1 = (insn >> 16) & 0xf;
1687 gen_op_iwmmxt_movq_M0_wRn(rd0);
1688 if (insn & (1 << 21))
1689 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1690 else
1691 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1692 gen_op_iwmmxt_movq_wRn_M0(wrd);
1693 gen_op_iwmmxt_set_mup();
1694 break;
1695 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 16) & 0xf;
1698 rd1 = (insn >> 0) & 0xf;
1699 gen_op_iwmmxt_movq_M0_wRn(rd0);
1700 switch ((insn >> 22) & 3) {
1701 case 0:
1702 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1703 break;
1704 case 1:
1705 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1706 break;
1707 case 2:
1708 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1709 break;
1710 case 3:
1711 return 1;
1712 }
1713 gen_op_iwmmxt_movq_wRn_M0(wrd);
1714 gen_op_iwmmxt_set_mup();
1715 gen_op_iwmmxt_set_cup();
1716 break;
1717 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1718 wrd = (insn >> 12) & 0xf;
1719 rd0 = (insn >> 16) & 0xf;
1720 rd1 = (insn >> 0) & 0xf;
1721 gen_op_iwmmxt_movq_M0_wRn(rd0);
1722 switch ((insn >> 22) & 3) {
1723 case 0:
1724 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1725 break;
1726 case 1:
1727 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1728 break;
1729 case 2:
1730 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1731 break;
1732 case 3:
1733 return 1;
1734 }
1735 gen_op_iwmmxt_movq_wRn_M0(wrd);
1736 gen_op_iwmmxt_set_mup();
1737 gen_op_iwmmxt_set_cup();
1738 break;
1739 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1740 wrd = (insn >> 12) & 0xf;
1741 rd0 = (insn >> 16) & 0xf;
1742 rd1 = (insn >> 0) & 0xf;
1743 gen_op_iwmmxt_movq_M0_wRn(rd0);
1744 if (insn & (1 << 22))
1745 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1746 else
1747 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1748 if (!(insn & (1 << 20)))
1749 gen_op_iwmmxt_addl_M0_wRn(wrd);
1750 gen_op_iwmmxt_movq_wRn_M0(wrd);
1751 gen_op_iwmmxt_set_mup();
1752 break;
1753 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1754 wrd = (insn >> 12) & 0xf;
1755 rd0 = (insn >> 16) & 0xf;
1756 rd1 = (insn >> 0) & 0xf;
1757 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1758 if (insn & (1 << 21)) {
1759 if (insn & (1 << 20))
1760 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1761 else
1762 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1763 } else {
1764 if (insn & (1 << 20))
1765 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1766 else
1767 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1768 }
18c9b560
AZ
1769 gen_op_iwmmxt_movq_wRn_M0(wrd);
1770 gen_op_iwmmxt_set_mup();
1771 break;
1772 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1773 wrd = (insn >> 12) & 0xf;
1774 rd0 = (insn >> 16) & 0xf;
1775 rd1 = (insn >> 0) & 0xf;
1776 gen_op_iwmmxt_movq_M0_wRn(rd0);
1777 if (insn & (1 << 21))
1778 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1779 else
1780 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1781 if (!(insn & (1 << 20))) {
e677137d
PB
1782 iwmmxt_load_reg(cpu_V1, wrd);
1783 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1784 }
1785 gen_op_iwmmxt_movq_wRn_M0(wrd);
1786 gen_op_iwmmxt_set_mup();
1787 break;
1788 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1789 wrd = (insn >> 12) & 0xf;
1790 rd0 = (insn >> 16) & 0xf;
1791 rd1 = (insn >> 0) & 0xf;
1792 gen_op_iwmmxt_movq_M0_wRn(rd0);
1793 switch ((insn >> 22) & 3) {
1794 case 0:
1795 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1796 break;
1797 case 1:
1798 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1799 break;
1800 case 2:
1801 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1802 break;
1803 case 3:
1804 return 1;
1805 }
1806 gen_op_iwmmxt_movq_wRn_M0(wrd);
1807 gen_op_iwmmxt_set_mup();
1808 gen_op_iwmmxt_set_cup();
1809 break;
1810 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1811 wrd = (insn >> 12) & 0xf;
1812 rd0 = (insn >> 16) & 0xf;
1813 rd1 = (insn >> 0) & 0xf;
1814 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1815 if (insn & (1 << 22)) {
1816 if (insn & (1 << 20))
1817 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1818 else
1819 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1820 } else {
1821 if (insn & (1 << 20))
1822 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1823 else
1824 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1825 }
18c9b560
AZ
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 gen_op_iwmmxt_set_cup();
1829 break;
1830 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1831 wrd = (insn >> 12) & 0xf;
1832 rd0 = (insn >> 16) & 0xf;
1833 rd1 = (insn >> 0) & 0xf;
1834 gen_op_iwmmxt_movq_M0_wRn(rd0);
1835 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1836 gen_op_movl_T1_im(7);
1837 gen_op_andl_T0_T1();
1838 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1839 gen_op_iwmmxt_movq_wRn_M0(wrd);
1840 gen_op_iwmmxt_set_mup();
1841 break;
1842 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1843 rd = (insn >> 12) & 0xf;
1844 wrd = (insn >> 16) & 0xf;
1845 gen_movl_T0_reg(s, rd);
1846 gen_op_iwmmxt_movq_M0_wRn(wrd);
1847 switch ((insn >> 6) & 3) {
1848 case 0:
1849 gen_op_movl_T1_im(0xff);
1850 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1851 break;
1852 case 1:
1853 gen_op_movl_T1_im(0xffff);
1854 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1855 break;
1856 case 2:
1857 gen_op_movl_T1_im(0xffffffff);
1858 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1859 break;
1860 case 3:
1861 return 1;
1862 }
1863 gen_op_iwmmxt_movq_wRn_M0(wrd);
1864 gen_op_iwmmxt_set_mup();
1865 break;
1866 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1867 rd = (insn >> 12) & 0xf;
1868 wrd = (insn >> 16) & 0xf;
1869 if (rd == 15)
1870 return 1;
1871 gen_op_iwmmxt_movq_M0_wRn(wrd);
1872 switch ((insn >> 22) & 3) {
1873 case 0:
1874 if (insn & 8)
1875 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1876 else {
e677137d 1877 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1878 }
1879 break;
1880 case 1:
1881 if (insn & 8)
1882 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1883 else {
e677137d 1884 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1885 }
1886 break;
1887 case 2:
e677137d 1888 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1889 break;
1890 case 3:
1891 return 1;
1892 }
b26eefb6 1893 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1894 break;
1895 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1896 if ((insn & 0x000ff008) != 0x0003f000)
1897 return 1;
1898 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1899 switch ((insn >> 22) & 3) {
1900 case 0:
1901 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1902 break;
1903 case 1:
1904 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1905 break;
1906 case 2:
1907 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1908 break;
1909 case 3:
1910 return 1;
1911 }
1912 gen_op_shll_T1_im(28);
d9ba4830 1913 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1914 break;
1915 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1916 rd = (insn >> 12) & 0xf;
1917 wrd = (insn >> 16) & 0xf;
1918 gen_movl_T0_reg(s, rd);
1919 switch ((insn >> 6) & 3) {
1920 case 0:
e677137d 1921 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1922 break;
1923 case 1:
e677137d 1924 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1925 break;
1926 case 2:
e677137d 1927 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1928 break;
1929 case 3:
1930 return 1;
1931 }
1932 gen_op_iwmmxt_movq_wRn_M0(wrd);
1933 gen_op_iwmmxt_set_mup();
1934 break;
1935 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1936 if ((insn & 0x000ff00f) != 0x0003f000)
1937 return 1;
1938 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1939 switch ((insn >> 22) & 3) {
1940 case 0:
1941 for (i = 0; i < 7; i ++) {
1942 gen_op_shll_T1_im(4);
1943 gen_op_andl_T0_T1();
1944 }
1945 break;
1946 case 1:
1947 for (i = 0; i < 3; i ++) {
1948 gen_op_shll_T1_im(8);
1949 gen_op_andl_T0_T1();
1950 }
1951 break;
1952 case 2:
1953 gen_op_shll_T1_im(16);
1954 gen_op_andl_T0_T1();
1955 break;
1956 case 3:
1957 return 1;
1958 }
d9ba4830 1959 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1960 break;
1961 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1962 wrd = (insn >> 12) & 0xf;
1963 rd0 = (insn >> 16) & 0xf;
1964 gen_op_iwmmxt_movq_M0_wRn(rd0);
1965 switch ((insn >> 22) & 3) {
1966 case 0:
e677137d 1967 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1968 break;
1969 case 1:
e677137d 1970 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1971 break;
1972 case 2:
e677137d 1973 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1974 break;
1975 case 3:
1976 return 1;
1977 }
1978 gen_op_iwmmxt_movq_wRn_M0(wrd);
1979 gen_op_iwmmxt_set_mup();
1980 break;
1981 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1982 if ((insn & 0x000ff00f) != 0x0003f000)
1983 return 1;
1984 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1985 switch ((insn >> 22) & 3) {
1986 case 0:
1987 for (i = 0; i < 7; i ++) {
1988 gen_op_shll_T1_im(4);
1989 gen_op_orl_T0_T1();
1990 }
1991 break;
1992 case 1:
1993 for (i = 0; i < 3; i ++) {
1994 gen_op_shll_T1_im(8);
1995 gen_op_orl_T0_T1();
1996 }
1997 break;
1998 case 2:
1999 gen_op_shll_T1_im(16);
2000 gen_op_orl_T0_T1();
2001 break;
2002 case 3:
2003 return 1;
2004 }
d9ba4830 2005 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
2006 break;
2007 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2008 rd = (insn >> 12) & 0xf;
2009 rd0 = (insn >> 16) & 0xf;
2010 if ((insn & 0xf) != 0)
2011 return 1;
2012 gen_op_iwmmxt_movq_M0_wRn(rd0);
2013 switch ((insn >> 22) & 3) {
2014 case 0:
e677137d 2015 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
2016 break;
2017 case 1:
e677137d 2018 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
2019 break;
2020 case 2:
e677137d 2021 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
2022 break;
2023 case 3:
2024 return 1;
2025 }
2026 gen_movl_reg_T0(s, rd);
2027 break;
2028 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2029 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2030 wrd = (insn >> 12) & 0xf;
2031 rd0 = (insn >> 16) & 0xf;
2032 rd1 = (insn >> 0) & 0xf;
2033 gen_op_iwmmxt_movq_M0_wRn(rd0);
2034 switch ((insn >> 22) & 3) {
2035 case 0:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2038 else
2039 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2040 break;
2041 case 1:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2044 else
2045 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2046 break;
2047 case 2:
2048 if (insn & (1 << 21))
2049 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2050 else
2051 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2052 break;
2053 case 3:
2054 return 1;
2055 }
2056 gen_op_iwmmxt_movq_wRn_M0(wrd);
2057 gen_op_iwmmxt_set_mup();
2058 gen_op_iwmmxt_set_cup();
2059 break;
2060 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2061 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2062 wrd = (insn >> 12) & 0xf;
2063 rd0 = (insn >> 16) & 0xf;
2064 gen_op_iwmmxt_movq_M0_wRn(rd0);
2065 switch ((insn >> 22) & 3) {
2066 case 0:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpacklsb_M0();
2069 else
2070 gen_op_iwmmxt_unpacklub_M0();
2071 break;
2072 case 1:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpacklsw_M0();
2075 else
2076 gen_op_iwmmxt_unpackluw_M0();
2077 break;
2078 case 2:
2079 if (insn & (1 << 21))
2080 gen_op_iwmmxt_unpacklsl_M0();
2081 else
2082 gen_op_iwmmxt_unpacklul_M0();
2083 break;
2084 case 3:
2085 return 1;
2086 }
2087 gen_op_iwmmxt_movq_wRn_M0(wrd);
2088 gen_op_iwmmxt_set_mup();
2089 gen_op_iwmmxt_set_cup();
2090 break;
2091 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2092 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2093 wrd = (insn >> 12) & 0xf;
2094 rd0 = (insn >> 16) & 0xf;
2095 gen_op_iwmmxt_movq_M0_wRn(rd0);
2096 switch ((insn >> 22) & 3) {
2097 case 0:
2098 if (insn & (1 << 21))
2099 gen_op_iwmmxt_unpackhsb_M0();
2100 else
2101 gen_op_iwmmxt_unpackhub_M0();
2102 break;
2103 case 1:
2104 if (insn & (1 << 21))
2105 gen_op_iwmmxt_unpackhsw_M0();
2106 else
2107 gen_op_iwmmxt_unpackhuw_M0();
2108 break;
2109 case 2:
2110 if (insn & (1 << 21))
2111 gen_op_iwmmxt_unpackhsl_M0();
2112 else
2113 gen_op_iwmmxt_unpackhul_M0();
2114 break;
2115 case 3:
2116 return 1;
2117 }
2118 gen_op_iwmmxt_movq_wRn_M0(wrd);
2119 gen_op_iwmmxt_set_mup();
2120 gen_op_iwmmxt_set_cup();
2121 break;
2122 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2123 case 0x214: case 0x614: case 0xa14: case 0xe14:
2124 wrd = (insn >> 12) & 0xf;
2125 rd0 = (insn >> 16) & 0xf;
2126 gen_op_iwmmxt_movq_M0_wRn(rd0);
2127 if (gen_iwmmxt_shift(insn, 0xff))
2128 return 1;
2129 switch ((insn >> 22) & 3) {
2130 case 0:
2131 return 1;
2132 case 1:
e677137d 2133 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2134 break;
2135 case 2:
e677137d 2136 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2137 break;
2138 case 3:
e677137d 2139 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2140 break;
2141 }
2142 gen_op_iwmmxt_movq_wRn_M0(wrd);
2143 gen_op_iwmmxt_set_mup();
2144 gen_op_iwmmxt_set_cup();
2145 break;
2146 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2147 case 0x014: case 0x414: case 0x814: case 0xc14:
2148 wrd = (insn >> 12) & 0xf;
2149 rd0 = (insn >> 16) & 0xf;
2150 gen_op_iwmmxt_movq_M0_wRn(rd0);
2151 if (gen_iwmmxt_shift(insn, 0xff))
2152 return 1;
2153 switch ((insn >> 22) & 3) {
2154 case 0:
2155 return 1;
2156 case 1:
e677137d 2157 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2158 break;
2159 case 2:
e677137d 2160 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2161 break;
2162 case 3:
e677137d 2163 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2164 break;
2165 }
2166 gen_op_iwmmxt_movq_wRn_M0(wrd);
2167 gen_op_iwmmxt_set_mup();
2168 gen_op_iwmmxt_set_cup();
2169 break;
2170 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2171 case 0x114: case 0x514: case 0x914: case 0xd14:
2172 wrd = (insn >> 12) & 0xf;
2173 rd0 = (insn >> 16) & 0xf;
2174 gen_op_iwmmxt_movq_M0_wRn(rd0);
2175 if (gen_iwmmxt_shift(insn, 0xff))
2176 return 1;
2177 switch ((insn >> 22) & 3) {
2178 case 0:
2179 return 1;
2180 case 1:
e677137d 2181 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2182 break;
2183 case 2:
e677137d 2184 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2185 break;
2186 case 3:
e677137d 2187 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2188 break;
2189 }
2190 gen_op_iwmmxt_movq_wRn_M0(wrd);
2191 gen_op_iwmmxt_set_mup();
2192 gen_op_iwmmxt_set_cup();
2193 break;
2194 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2195 case 0x314: case 0x714: case 0xb14: case 0xf14:
2196 wrd = (insn >> 12) & 0xf;
2197 rd0 = (insn >> 16) & 0xf;
2198 gen_op_iwmmxt_movq_M0_wRn(rd0);
2199 switch ((insn >> 22) & 3) {
2200 case 0:
2201 return 1;
2202 case 1:
2203 if (gen_iwmmxt_shift(insn, 0xf))
2204 return 1;
e677137d 2205 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2206 break;
2207 case 2:
2208 if (gen_iwmmxt_shift(insn, 0x1f))
2209 return 1;
e677137d 2210 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2211 break;
2212 case 3:
2213 if (gen_iwmmxt_shift(insn, 0x3f))
2214 return 1;
e677137d 2215 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2216 break;
2217 }
2218 gen_op_iwmmxt_movq_wRn_M0(wrd);
2219 gen_op_iwmmxt_set_mup();
2220 gen_op_iwmmxt_set_cup();
2221 break;
2222 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2223 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2224 wrd = (insn >> 12) & 0xf;
2225 rd0 = (insn >> 16) & 0xf;
2226 rd1 = (insn >> 0) & 0xf;
2227 gen_op_iwmmxt_movq_M0_wRn(rd0);
2228 switch ((insn >> 22) & 3) {
2229 case 0:
2230 if (insn & (1 << 21))
2231 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2232 else
2233 gen_op_iwmmxt_minub_M0_wRn(rd1);
2234 break;
2235 case 1:
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2238 else
2239 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2240 break;
2241 case 2:
2242 if (insn & (1 << 21))
2243 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2244 else
2245 gen_op_iwmmxt_minul_M0_wRn(rd1);
2246 break;
2247 case 3:
2248 return 1;
2249 }
2250 gen_op_iwmmxt_movq_wRn_M0(wrd);
2251 gen_op_iwmmxt_set_mup();
2252 break;
2253 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2254 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2255 wrd = (insn >> 12) & 0xf;
2256 rd0 = (insn >> 16) & 0xf;
2257 rd1 = (insn >> 0) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
2259 switch ((insn >> 22) & 3) {
2260 case 0:
2261 if (insn & (1 << 21))
2262 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2263 else
2264 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2265 break;
2266 case 1:
2267 if (insn & (1 << 21))
2268 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2269 else
2270 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2271 break;
2272 case 2:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2277 break;
2278 case 3:
2279 return 1;
2280 }
2281 gen_op_iwmmxt_movq_wRn_M0(wrd);
2282 gen_op_iwmmxt_set_mup();
2283 break;
2284 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2285 case 0x402: case 0x502: case 0x602: case 0x702:
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 gen_op_movl_T0_im((insn >> 20) & 3);
2291 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2292 gen_op_iwmmxt_movq_wRn_M0(wrd);
2293 gen_op_iwmmxt_set_mup();
2294 break;
2295 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2296 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2297 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2298 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2299 wrd = (insn >> 12) & 0xf;
2300 rd0 = (insn >> 16) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 gen_op_iwmmxt_movq_M0_wRn(rd0);
2303 switch ((insn >> 20) & 0xf) {
2304 case 0x0:
2305 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2306 break;
2307 case 0x1:
2308 gen_op_iwmmxt_subub_M0_wRn(rd1);
2309 break;
2310 case 0x3:
2311 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2312 break;
2313 case 0x4:
2314 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2315 break;
2316 case 0x5:
2317 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2318 break;
2319 case 0x7:
2320 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2321 break;
2322 case 0x8:
2323 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2324 break;
2325 case 0x9:
2326 gen_op_iwmmxt_subul_M0_wRn(rd1);
2327 break;
2328 case 0xb:
2329 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2330 break;
2331 default:
2332 return 1;
2333 }
2334 gen_op_iwmmxt_movq_wRn_M0(wrd);
2335 gen_op_iwmmxt_set_mup();
2336 gen_op_iwmmxt_set_cup();
2337 break;
2338 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2339 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2340 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2341 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2342 wrd = (insn >> 12) & 0xf;
2343 rd0 = (insn >> 16) & 0xf;
2344 gen_op_iwmmxt_movq_M0_wRn(rd0);
2345 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2346 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2347 gen_op_iwmmxt_movq_wRn_M0(wrd);
2348 gen_op_iwmmxt_set_mup();
2349 gen_op_iwmmxt_set_cup();
2350 break;
2351 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2352 case 0x418: case 0x518: case 0x618: case 0x718:
2353 case 0x818: case 0x918: case 0xa18: case 0xb18:
2354 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2355 wrd = (insn >> 12) & 0xf;
2356 rd0 = (insn >> 16) & 0xf;
2357 rd1 = (insn >> 0) & 0xf;
2358 gen_op_iwmmxt_movq_M0_wRn(rd0);
2359 switch ((insn >> 20) & 0xf) {
2360 case 0x0:
2361 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2362 break;
2363 case 0x1:
2364 gen_op_iwmmxt_addub_M0_wRn(rd1);
2365 break;
2366 case 0x3:
2367 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2368 break;
2369 case 0x4:
2370 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2371 break;
2372 case 0x5:
2373 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2374 break;
2375 case 0x7:
2376 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2377 break;
2378 case 0x8:
2379 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2380 break;
2381 case 0x9:
2382 gen_op_iwmmxt_addul_M0_wRn(rd1);
2383 break;
2384 case 0xb:
2385 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2386 break;
2387 default:
2388 return 1;
2389 }
2390 gen_op_iwmmxt_movq_wRn_M0(wrd);
2391 gen_op_iwmmxt_set_mup();
2392 gen_op_iwmmxt_set_cup();
2393 break;
2394 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2395 case 0x408: case 0x508: case 0x608: case 0x708:
2396 case 0x808: case 0x908: case 0xa08: case 0xb08:
2397 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2398 wrd = (insn >> 12) & 0xf;
2399 rd0 = (insn >> 16) & 0xf;
2400 rd1 = (insn >> 0) & 0xf;
2401 gen_op_iwmmxt_movq_M0_wRn(rd0);
2402 if (!(insn & (1 << 20)))
2403 return 1;
2404 switch ((insn >> 22) & 3) {
2405 case 0:
2406 return 1;
2407 case 1:
2408 if (insn & (1 << 21))
2409 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2410 else
2411 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2412 break;
2413 case 2:
2414 if (insn & (1 << 21))
2415 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2416 else
2417 gen_op_iwmmxt_packul_M0_wRn(rd1);
2418 break;
2419 case 3:
2420 if (insn & (1 << 21))
2421 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2422 else
2423 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2424 break;
2425 }
2426 gen_op_iwmmxt_movq_wRn_M0(wrd);
2427 gen_op_iwmmxt_set_mup();
2428 gen_op_iwmmxt_set_cup();
2429 break;
2430 case 0x201: case 0x203: case 0x205: case 0x207:
2431 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2432 case 0x211: case 0x213: case 0x215: case 0x217:
2433 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2434 wrd = (insn >> 5) & 0xf;
2435 rd0 = (insn >> 12) & 0xf;
2436 rd1 = (insn >> 0) & 0xf;
2437 if (rd0 == 0xf || rd1 == 0xf)
2438 return 1;
2439 gen_op_iwmmxt_movq_M0_wRn(wrd);
2440 switch ((insn >> 16) & 0xf) {
2441 case 0x0: /* TMIA */
b26eefb6
PB
2442 gen_movl_T0_reg(s, rd0);
2443 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2444 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2445 break;
2446 case 0x8: /* TMIAPH */
b26eefb6
PB
2447 gen_movl_T0_reg(s, rd0);
2448 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2449 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2450 break;
2451 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2452 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2453 if (insn & (1 << 16))
2454 gen_op_shrl_T1_im(16);
2455 gen_op_movl_T0_T1();
b26eefb6 2456 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2457 if (insn & (1 << 17))
2458 gen_op_shrl_T1_im(16);
2459 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2460 break;
2461 default:
2462 return 1;
2463 }
2464 gen_op_iwmmxt_movq_wRn_M0(wrd);
2465 gen_op_iwmmxt_set_mup();
2466 break;
2467 default:
2468 return 1;
2469 }
2470
2471 return 0;
2472}
2473
2474/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2475 (ie. an undefined instruction). */
2476static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2477{
2478 int acc, rd0, rd1, rdhi, rdlo;
2479
2480 if ((insn & 0x0ff00f10) == 0x0e200010) {
2481 /* Multiply with Internal Accumulate Format */
2482 rd0 = (insn >> 12) & 0xf;
2483 rd1 = insn & 0xf;
2484 acc = (insn >> 5) & 7;
2485
2486 if (acc != 0)
2487 return 1;
2488
2489 switch ((insn >> 16) & 0xf) {
2490 case 0x0: /* MIA */
b26eefb6
PB
2491 gen_movl_T0_reg(s, rd0);
2492 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2493 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2494 break;
2495 case 0x8: /* MIAPH */
b26eefb6
PB
2496 gen_movl_T0_reg(s, rd0);
2497 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2498 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2499 break;
2500 case 0xc: /* MIABB */
2501 case 0xd: /* MIABT */
2502 case 0xe: /* MIATB */
2503 case 0xf: /* MIATT */
b26eefb6 2504 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2505 if (insn & (1 << 16))
2506 gen_op_shrl_T1_im(16);
2507 gen_op_movl_T0_T1();
b26eefb6 2508 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2509 if (insn & (1 << 17))
2510 gen_op_shrl_T1_im(16);
2511 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2512 break;
2513 default:
2514 return 1;
2515 }
2516
2517 gen_op_iwmmxt_movq_wRn_M0(acc);
2518 return 0;
2519 }
2520
2521 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2522 /* Internal Accumulator Access Format */
2523 rdhi = (insn >> 16) & 0xf;
2524 rdlo = (insn >> 12) & 0xf;
2525 acc = insn & 7;
2526
2527 if (acc != 0)
2528 return 1;
2529
2530 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2531 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2532 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2533 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2534 gen_op_andl_T0_T1();
b26eefb6 2535 gen_movl_reg_T0(s, rdhi);
18c9b560 2536 } else { /* MAR */
b26eefb6
PB
2537 gen_movl_T0_reg(s, rdlo);
2538 gen_movl_T1_reg(s, rdhi);
e677137d 2539 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2540 }
2541 return 0;
2542 }
2543
2544 return 1;
2545}
2546
c1713132
AZ
2547/* Disassemble system coprocessor instruction. Return nonzero if
2548 instruction is not defined. */
2549static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2550{
8984bd2e 2551 TCGv tmp;
c1713132
AZ
2552 uint32_t rd = (insn >> 12) & 0xf;
2553 uint32_t cp = (insn >> 8) & 0xf;
2554 if (IS_USER(s)) {
2555 return 1;
2556 }
2557
18c9b560 2558 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2559 if (!env->cp[cp].cp_read)
2560 return 1;
8984bd2e
PB
2561 gen_set_pc_im(s->pc);
2562 tmp = new_tmp();
2563 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2564 store_reg(s, rd, tmp);
c1713132
AZ
2565 } else {
2566 if (!env->cp[cp].cp_write)
2567 return 1;
8984bd2e
PB
2568 gen_set_pc_im(s->pc);
2569 tmp = load_reg(s, rd);
2570 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2571 dead_tmp(tmp);
c1713132
AZ
2572 }
2573 return 0;
2574}
2575
9ee6e8bb
PB
2576static int cp15_user_ok(uint32_t insn)
2577{
2578 int cpn = (insn >> 16) & 0xf;
2579 int cpm = insn & 0xf;
2580 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2581
2582 if (cpn == 13 && cpm == 0) {
2583 /* TLS register. */
2584 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2585 return 1;
2586 }
2587 if (cpn == 7) {
2588 /* ISB, DSB, DMB. */
2589 if ((cpm == 5 && op == 4)
2590 || (cpm == 10 && (op == 4 || op == 5)))
2591 return 1;
2592 }
2593 return 0;
2594}
2595
b5ff1b31
FB
2596/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2597 instruction is not defined. */
a90b7318 2598static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2599{
2600 uint32_t rd;
8984bd2e 2601 TCGv tmp;
b5ff1b31 2602
9ee6e8bb
PB
2603 /* M profile cores use memory mapped registers instead of cp15. */
2604 if (arm_feature(env, ARM_FEATURE_M))
2605 return 1;
2606
2607 if ((insn & (1 << 25)) == 0) {
2608 if (insn & (1 << 20)) {
2609 /* mrrc */
2610 return 1;
2611 }
2612 /* mcrr. Used for block cache operations, so implement as no-op. */
2613 return 0;
2614 }
2615 if ((insn & (1 << 4)) == 0) {
2616 /* cdp */
2617 return 1;
2618 }
2619 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2620 return 1;
2621 }
9332f9da
FB
2622 if ((insn & 0x0fff0fff) == 0x0e070f90
2623 || (insn & 0x0fff0fff) == 0x0e070f58) {
2624 /* Wait for interrupt. */
8984bd2e 2625 gen_set_pc_im(s->pc);
9ee6e8bb 2626 s->is_jmp = DISAS_WFI;
9332f9da
FB
2627 return 0;
2628 }
b5ff1b31 2629 rd = (insn >> 12) & 0xf;
18c9b560 2630 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2631 tmp = new_tmp();
2632 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2633 /* If the destination register is r15 then sets condition codes. */
2634 if (rd != 15)
8984bd2e
PB
2635 store_reg(s, rd, tmp);
2636 else
2637 dead_tmp(tmp);
b5ff1b31 2638 } else {
8984bd2e
PB
2639 tmp = load_reg(s, rd);
2640 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2641 dead_tmp(tmp);
a90b7318
AZ
2642 /* Normally we would always end the TB here, but Linux
2643 * arch/arm/mach-pxa/sleep.S expects two instructions following
2644 * an MMU enable to execute from cache. Imitate this behaviour. */
2645 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2646 (insn & 0x0fff0fff) != 0x0e010f10)
2647 gen_lookup_tb(s);
b5ff1b31 2648 }
b5ff1b31
FB
2649 return 0;
2650}
2651
9ee6e8bb
PB
2652#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2653#define VFP_SREG(insn, bigbit, smallbit) \
2654 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2655#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2656 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2657 reg = (((insn) >> (bigbit)) & 0x0f) \
2658 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2659 } else { \
2660 if (insn & (1 << (smallbit))) \
2661 return 1; \
2662 reg = ((insn) >> (bigbit)) & 0x0f; \
2663 }} while (0)
2664
2665#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2666#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2667#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2668#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2669#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2670#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2671
4373f3ce
PB
2672/* Move between integer and VFP cores. */
2673static TCGv gen_vfp_mrs(void)
2674{
2675 TCGv tmp = new_tmp();
2676 tcg_gen_mov_i32(tmp, cpu_F0s);
2677 return tmp;
2678}
2679
2680static void gen_vfp_msr(TCGv tmp)
2681{
2682 tcg_gen_mov_i32(cpu_F0s, tmp);
2683 dead_tmp(tmp);
2684}
2685
9ee6e8bb
PB
2686static inline int
2687vfp_enabled(CPUState * env)
2688{
2689 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2690}
2691
ad69471c
PB
2692static void gen_neon_dup_u8(TCGv var, int shift)
2693{
2694 TCGv tmp = new_tmp();
2695 if (shift)
2696 tcg_gen_shri_i32(var, var, shift);
86831435 2697 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2698 tcg_gen_shli_i32(tmp, var, 8);
2699 tcg_gen_or_i32(var, var, tmp);
2700 tcg_gen_shli_i32(tmp, var, 16);
2701 tcg_gen_or_i32(var, var, tmp);
2702 dead_tmp(tmp);
2703}
2704
2705static void gen_neon_dup_low16(TCGv var)
2706{
2707 TCGv tmp = new_tmp();
86831435 2708 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2709 tcg_gen_shli_i32(tmp, var, 16);
2710 tcg_gen_or_i32(var, var, tmp);
2711 dead_tmp(tmp);
2712}
2713
2714static void gen_neon_dup_high16(TCGv var)
2715{
2716 TCGv tmp = new_tmp();
2717 tcg_gen_andi_i32(var, var, 0xffff0000);
2718 tcg_gen_shri_i32(tmp, var, 16);
2719 tcg_gen_or_i32(var, var, tmp);
2720 dead_tmp(tmp);
2721}
2722
b7bcbe95
FB
2723/* Disassemble a VFP instruction. Returns nonzero if an error occured
2724 (ie. an undefined instruction). */
2725static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2726{
2727 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2728 int dp, veclen;
4373f3ce 2729 TCGv tmp;
ad69471c 2730 TCGv tmp2;
b7bcbe95 2731
40f137e1
PB
2732 if (!arm_feature(env, ARM_FEATURE_VFP))
2733 return 1;
2734
9ee6e8bb
PB
2735 if (!vfp_enabled(env)) {
2736 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2737 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2738 return 1;
2739 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2740 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2741 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2742 return 1;
2743 }
b7bcbe95
FB
2744 dp = ((insn & 0xf00) == 0xb00);
2745 switch ((insn >> 24) & 0xf) {
2746 case 0xe:
2747 if (insn & (1 << 4)) {
2748 /* single register transfer */
b7bcbe95
FB
2749 rd = (insn >> 12) & 0xf;
2750 if (dp) {
9ee6e8bb
PB
2751 int size;
2752 int pass;
2753
2754 VFP_DREG_N(rn, insn);
2755 if (insn & 0xf)
b7bcbe95 2756 return 1;
9ee6e8bb
PB
2757 if (insn & 0x00c00060
2758 && !arm_feature(env, ARM_FEATURE_NEON))
2759 return 1;
2760
2761 pass = (insn >> 21) & 1;
2762 if (insn & (1 << 22)) {
2763 size = 0;
2764 offset = ((insn >> 5) & 3) * 8;
2765 } else if (insn & (1 << 5)) {
2766 size = 1;
2767 offset = (insn & (1 << 6)) ? 16 : 0;
2768 } else {
2769 size = 2;
2770 offset = 0;
2771 }
18c9b560 2772 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2773 /* vfp->arm */
ad69471c 2774 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2775 switch (size) {
2776 case 0:
9ee6e8bb 2777 if (offset)
ad69471c 2778 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2779 if (insn & (1 << 23))
ad69471c 2780 gen_uxtb(tmp);
9ee6e8bb 2781 else
ad69471c 2782 gen_sxtb(tmp);
9ee6e8bb
PB
2783 break;
2784 case 1:
9ee6e8bb
PB
2785 if (insn & (1 << 23)) {
2786 if (offset) {
ad69471c 2787 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2788 } else {
ad69471c 2789 gen_uxth(tmp);
9ee6e8bb
PB
2790 }
2791 } else {
2792 if (offset) {
ad69471c 2793 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2794 } else {
ad69471c 2795 gen_sxth(tmp);
9ee6e8bb
PB
2796 }
2797 }
2798 break;
2799 case 2:
9ee6e8bb
PB
2800 break;
2801 }
ad69471c 2802 store_reg(s, rd, tmp);
b7bcbe95
FB
2803 } else {
2804 /* arm->vfp */
ad69471c 2805 tmp = load_reg(s, rd);
9ee6e8bb
PB
2806 if (insn & (1 << 23)) {
2807 /* VDUP */
2808 if (size == 0) {
ad69471c 2809 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2810 } else if (size == 1) {
ad69471c 2811 gen_neon_dup_low16(tmp);
9ee6e8bb 2812 }
ad69471c
PB
2813 tmp2 = new_tmp();
2814 tcg_gen_mov_i32(tmp2, tmp);
2815 neon_store_reg(rn, 0, tmp2);
2816 neon_store_reg(rn, 0, tmp);
9ee6e8bb
PB
2817 } else {
2818 /* VMOV */
2819 switch (size) {
2820 case 0:
ad69471c
PB
2821 tmp2 = neon_load_reg(rn, pass);
2822 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2823 dead_tmp(tmp2);
9ee6e8bb
PB
2824 break;
2825 case 1:
ad69471c
PB
2826 tmp2 = neon_load_reg(rn, pass);
2827 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2828 dead_tmp(tmp2);
9ee6e8bb
PB
2829 break;
2830 case 2:
9ee6e8bb
PB
2831 break;
2832 }
ad69471c 2833 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2834 }
b7bcbe95 2835 }
9ee6e8bb
PB
2836 } else { /* !dp */
2837 if ((insn & 0x6f) != 0x00)
2838 return 1;
2839 rn = VFP_SREG_N(insn);
18c9b560 2840 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2841 /* vfp->arm */
2842 if (insn & (1 << 21)) {
2843 /* system register */
40f137e1 2844 rn >>= 1;
9ee6e8bb 2845
b7bcbe95 2846 switch (rn) {
40f137e1 2847 case ARM_VFP_FPSID:
4373f3ce 2848 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2849 VFP3 restricts all id registers to privileged
2850 accesses. */
2851 if (IS_USER(s)
2852 && arm_feature(env, ARM_FEATURE_VFP3))
2853 return 1;
4373f3ce 2854 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2855 break;
40f137e1 2856 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2857 if (IS_USER(s))
2858 return 1;
4373f3ce 2859 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2860 break;
40f137e1
PB
2861 case ARM_VFP_FPINST:
2862 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2863 /* Not present in VFP3. */
2864 if (IS_USER(s)
2865 || arm_feature(env, ARM_FEATURE_VFP3))
2866 return 1;
4373f3ce 2867 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2868 break;
40f137e1 2869 case ARM_VFP_FPSCR:
601d70b9 2870 if (rd == 15) {
4373f3ce
PB
2871 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2872 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2873 } else {
2874 tmp = new_tmp();
2875 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2876 }
b7bcbe95 2877 break;
9ee6e8bb
PB
2878 case ARM_VFP_MVFR0:
2879 case ARM_VFP_MVFR1:
2880 if (IS_USER(s)
2881 || !arm_feature(env, ARM_FEATURE_VFP3))
2882 return 1;
4373f3ce 2883 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2884 break;
b7bcbe95
FB
2885 default:
2886 return 1;
2887 }
2888 } else {
2889 gen_mov_F0_vreg(0, rn);
4373f3ce 2890 tmp = gen_vfp_mrs();
b7bcbe95
FB
2891 }
2892 if (rd == 15) {
b5ff1b31 2893 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2894 gen_set_nzcv(tmp);
2895 dead_tmp(tmp);
2896 } else {
2897 store_reg(s, rd, tmp);
2898 }
b7bcbe95
FB
2899 } else {
2900 /* arm->vfp */
4373f3ce 2901 tmp = load_reg(s, rd);
b7bcbe95 2902 if (insn & (1 << 21)) {
40f137e1 2903 rn >>= 1;
b7bcbe95
FB
2904 /* system register */
2905 switch (rn) {
40f137e1 2906 case ARM_VFP_FPSID:
9ee6e8bb
PB
2907 case ARM_VFP_MVFR0:
2908 case ARM_VFP_MVFR1:
b7bcbe95
FB
2909 /* Writes are ignored. */
2910 break;
40f137e1 2911 case ARM_VFP_FPSCR:
4373f3ce
PB
2912 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2913 dead_tmp(tmp);
b5ff1b31 2914 gen_lookup_tb(s);
b7bcbe95 2915 break;
40f137e1 2916 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2917 if (IS_USER(s))
2918 return 1;
4373f3ce 2919 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2920 gen_lookup_tb(s);
2921 break;
2922 case ARM_VFP_FPINST:
2923 case ARM_VFP_FPINST2:
4373f3ce 2924 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2925 break;
b7bcbe95
FB
2926 default:
2927 return 1;
2928 }
2929 } else {
4373f3ce 2930 gen_vfp_msr(tmp);
b7bcbe95
FB
2931 gen_mov_vreg_F0(0, rn);
2932 }
2933 }
2934 }
2935 } else {
2936 /* data processing */
2937 /* The opcode is in bits 23, 21, 20 and 6. */
2938 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2939 if (dp) {
2940 if (op == 15) {
2941 /* rn is opcode */
2942 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2943 } else {
2944 /* rn is register number */
9ee6e8bb 2945 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2946 }
2947
2948 if (op == 15 && (rn == 15 || rn > 17)) {
2949 /* Integer or single precision destination. */
9ee6e8bb 2950 rd = VFP_SREG_D(insn);
b7bcbe95 2951 } else {
9ee6e8bb 2952 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2953 }
2954
2955 if (op == 15 && (rn == 16 || rn == 17)) {
2956 /* Integer source. */
2957 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2958 } else {
9ee6e8bb 2959 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2960 }
2961 } else {
9ee6e8bb 2962 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2963 if (op == 15 && rn == 15) {
2964 /* Double precision destination. */
9ee6e8bb
PB
2965 VFP_DREG_D(rd, insn);
2966 } else {
2967 rd = VFP_SREG_D(insn);
2968 }
2969 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2970 }
2971
2972 veclen = env->vfp.vec_len;
2973 if (op == 15 && rn > 3)
2974 veclen = 0;
2975
2976 /* Shut up compiler warnings. */
2977 delta_m = 0;
2978 delta_d = 0;
2979 bank_mask = 0;
3b46e624 2980
b7bcbe95
FB
2981 if (veclen > 0) {
2982 if (dp)
2983 bank_mask = 0xc;
2984 else
2985 bank_mask = 0x18;
2986
2987 /* Figure out what type of vector operation this is. */
2988 if ((rd & bank_mask) == 0) {
2989 /* scalar */
2990 veclen = 0;
2991 } else {
2992 if (dp)
2993 delta_d = (env->vfp.vec_stride >> 1) + 1;
2994 else
2995 delta_d = env->vfp.vec_stride + 1;
2996
2997 if ((rm & bank_mask) == 0) {
2998 /* mixed scalar/vector */
2999 delta_m = 0;
3000 } else {
3001 /* vector */
3002 delta_m = delta_d;
3003 }
3004 }
3005 }
3006
3007 /* Load the initial operands. */
3008 if (op == 15) {
3009 switch (rn) {
3010 case 16:
3011 case 17:
3012 /* Integer source */
3013 gen_mov_F0_vreg(0, rm);
3014 break;
3015 case 8:
3016 case 9:
3017 /* Compare */
3018 gen_mov_F0_vreg(dp, rd);
3019 gen_mov_F1_vreg(dp, rm);
3020 break;
3021 case 10:
3022 case 11:
3023 /* Compare with zero */
3024 gen_mov_F0_vreg(dp, rd);
3025 gen_vfp_F1_ld0(dp);
3026 break;
9ee6e8bb
PB
3027 case 20:
3028 case 21:
3029 case 22:
3030 case 23:
3031 /* Source and destination the same. */
3032 gen_mov_F0_vreg(dp, rd);
3033 break;
b7bcbe95
FB
3034 default:
3035 /* One source operand. */
3036 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3037 break;
b7bcbe95
FB
3038 }
3039 } else {
3040 /* Two source operands. */
3041 gen_mov_F0_vreg(dp, rn);
3042 gen_mov_F1_vreg(dp, rm);
3043 }
3044
3045 for (;;) {
3046 /* Perform the calculation. */
3047 switch (op) {
3048 case 0: /* mac: fd + (fn * fm) */
3049 gen_vfp_mul(dp);
3050 gen_mov_F1_vreg(dp, rd);
3051 gen_vfp_add(dp);
3052 break;
3053 case 1: /* nmac: fd - (fn * fm) */
3054 gen_vfp_mul(dp);
3055 gen_vfp_neg(dp);
3056 gen_mov_F1_vreg(dp, rd);
3057 gen_vfp_add(dp);
3058 break;
3059 case 2: /* msc: -fd + (fn * fm) */
3060 gen_vfp_mul(dp);
3061 gen_mov_F1_vreg(dp, rd);
3062 gen_vfp_sub(dp);
3063 break;
3064 case 3: /* nmsc: -fd - (fn * fm) */
3065 gen_vfp_mul(dp);
3066 gen_mov_F1_vreg(dp, rd);
3067 gen_vfp_add(dp);
3068 gen_vfp_neg(dp);
3069 break;
3070 case 4: /* mul: fn * fm */
3071 gen_vfp_mul(dp);
3072 break;
3073 case 5: /* nmul: -(fn * fm) */
3074 gen_vfp_mul(dp);
3075 gen_vfp_neg(dp);
3076 break;
3077 case 6: /* add: fn + fm */
3078 gen_vfp_add(dp);
3079 break;
3080 case 7: /* sub: fn - fm */
3081 gen_vfp_sub(dp);
3082 break;
3083 case 8: /* div: fn / fm */
3084 gen_vfp_div(dp);
3085 break;
9ee6e8bb
PB
3086 case 14: /* fconst */
3087 if (!arm_feature(env, ARM_FEATURE_VFP3))
3088 return 1;
3089
3090 n = (insn << 12) & 0x80000000;
3091 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3092 if (dp) {
3093 if (i & 0x40)
3094 i |= 0x3f80;
3095 else
3096 i |= 0x4000;
3097 n |= i << 16;
4373f3ce 3098 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3099 } else {
3100 if (i & 0x40)
3101 i |= 0x780;
3102 else
3103 i |= 0x800;
3104 n |= i << 19;
5b340b51 3105 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3106 }
9ee6e8bb 3107 break;
b7bcbe95
FB
3108 case 15: /* extension space */
3109 switch (rn) {
3110 case 0: /* cpy */
3111 /* no-op */
3112 break;
3113 case 1: /* abs */
3114 gen_vfp_abs(dp);
3115 break;
3116 case 2: /* neg */
3117 gen_vfp_neg(dp);
3118 break;
3119 case 3: /* sqrt */
3120 gen_vfp_sqrt(dp);
3121 break;
3122 case 8: /* cmp */
3123 gen_vfp_cmp(dp);
3124 break;
3125 case 9: /* cmpe */
3126 gen_vfp_cmpe(dp);
3127 break;
3128 case 10: /* cmpz */
3129 gen_vfp_cmp(dp);
3130 break;
3131 case 11: /* cmpez */
3132 gen_vfp_F1_ld0(dp);
3133 gen_vfp_cmpe(dp);
3134 break;
3135 case 15: /* single<->double conversion */
3136 if (dp)
4373f3ce 3137 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3138 else
4373f3ce 3139 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3140 break;
3141 case 16: /* fuito */
3142 gen_vfp_uito(dp);
3143 break;
3144 case 17: /* fsito */
3145 gen_vfp_sito(dp);
3146 break;
9ee6e8bb
PB
3147 case 20: /* fshto */
3148 if (!arm_feature(env, ARM_FEATURE_VFP3))
3149 return 1;
3150 gen_vfp_shto(dp, rm);
3151 break;
3152 case 21: /* fslto */
3153 if (!arm_feature(env, ARM_FEATURE_VFP3))
3154 return 1;
3155 gen_vfp_slto(dp, rm);
3156 break;
3157 case 22: /* fuhto */
3158 if (!arm_feature(env, ARM_FEATURE_VFP3))
3159 return 1;
3160 gen_vfp_uhto(dp, rm);
3161 break;
3162 case 23: /* fulto */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
3165 gen_vfp_ulto(dp, rm);
3166 break;
b7bcbe95
FB
3167 case 24: /* ftoui */
3168 gen_vfp_toui(dp);
3169 break;
3170 case 25: /* ftouiz */
3171 gen_vfp_touiz(dp);
3172 break;
3173 case 26: /* ftosi */
3174 gen_vfp_tosi(dp);
3175 break;
3176 case 27: /* ftosiz */
3177 gen_vfp_tosiz(dp);
3178 break;
9ee6e8bb
PB
3179 case 28: /* ftosh */
3180 if (!arm_feature(env, ARM_FEATURE_VFP3))
3181 return 1;
3182 gen_vfp_tosh(dp, rm);
3183 break;
3184 case 29: /* ftosl */
3185 if (!arm_feature(env, ARM_FEATURE_VFP3))
3186 return 1;
3187 gen_vfp_tosl(dp, rm);
3188 break;
3189 case 30: /* ftouh */
3190 if (!arm_feature(env, ARM_FEATURE_VFP3))
3191 return 1;
3192 gen_vfp_touh(dp, rm);
3193 break;
3194 case 31: /* ftoul */
3195 if (!arm_feature(env, ARM_FEATURE_VFP3))
3196 return 1;
3197 gen_vfp_toul(dp, rm);
3198 break;
b7bcbe95
FB
3199 default: /* undefined */
3200 printf ("rn:%d\n", rn);
3201 return 1;
3202 }
3203 break;
3204 default: /* undefined */
3205 printf ("op:%d\n", op);
3206 return 1;
3207 }
3208
3209 /* Write back the result. */
3210 if (op == 15 && (rn >= 8 && rn <= 11))
3211 ; /* Comparison, do nothing. */
3212 else if (op == 15 && rn > 17)
3213 /* Integer result. */
3214 gen_mov_vreg_F0(0, rd);
3215 else if (op == 15 && rn == 15)
3216 /* conversion */
3217 gen_mov_vreg_F0(!dp, rd);
3218 else
3219 gen_mov_vreg_F0(dp, rd);
3220
3221 /* break out of the loop if we have finished */
3222 if (veclen == 0)
3223 break;
3224
3225 if (op == 15 && delta_m == 0) {
3226 /* single source one-many */
3227 while (veclen--) {
3228 rd = ((rd + delta_d) & (bank_mask - 1))
3229 | (rd & bank_mask);
3230 gen_mov_vreg_F0(dp, rd);
3231 }
3232 break;
3233 }
3234 /* Setup the next operands. */
3235 veclen--;
3236 rd = ((rd + delta_d) & (bank_mask - 1))
3237 | (rd & bank_mask);
3238
3239 if (op == 15) {
3240 /* One source operand. */
3241 rm = ((rm + delta_m) & (bank_mask - 1))
3242 | (rm & bank_mask);
3243 gen_mov_F0_vreg(dp, rm);
3244 } else {
3245 /* Two source operands. */
3246 rn = ((rn + delta_d) & (bank_mask - 1))
3247 | (rn & bank_mask);
3248 gen_mov_F0_vreg(dp, rn);
3249 if (delta_m) {
3250 rm = ((rm + delta_m) & (bank_mask - 1))
3251 | (rm & bank_mask);
3252 gen_mov_F1_vreg(dp, rm);
3253 }
3254 }
3255 }
3256 }
3257 break;
3258 case 0xc:
3259 case 0xd:
9ee6e8bb 3260 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3261 /* two-register transfer */
3262 rn = (insn >> 16) & 0xf;
3263 rd = (insn >> 12) & 0xf;
3264 if (dp) {
9ee6e8bb
PB
3265 VFP_DREG_M(rm, insn);
3266 } else {
3267 rm = VFP_SREG_M(insn);
3268 }
b7bcbe95 3269
18c9b560 3270 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3271 /* vfp->arm */
3272 if (dp) {
4373f3ce
PB
3273 gen_mov_F0_vreg(0, rm * 2);
3274 tmp = gen_vfp_mrs();
3275 store_reg(s, rd, tmp);
3276 gen_mov_F0_vreg(0, rm * 2 + 1);
3277 tmp = gen_vfp_mrs();
3278 store_reg(s, rn, tmp);
b7bcbe95
FB
3279 } else {
3280 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3281 tmp = gen_vfp_mrs();
3282 store_reg(s, rn, tmp);
b7bcbe95 3283 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3284 tmp = gen_vfp_mrs();
3285 store_reg(s, rd, tmp);
b7bcbe95
FB
3286 }
3287 } else {
3288 /* arm->vfp */
3289 if (dp) {
4373f3ce
PB
3290 tmp = load_reg(s, rd);
3291 gen_vfp_msr(tmp);
3292 gen_mov_vreg_F0(0, rm * 2);
3293 tmp = load_reg(s, rn);
3294 gen_vfp_msr(tmp);
3295 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3296 } else {
4373f3ce
PB
3297 tmp = load_reg(s, rn);
3298 gen_vfp_msr(tmp);
b7bcbe95 3299 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3300 tmp = load_reg(s, rd);
3301 gen_vfp_msr(tmp);
b7bcbe95
FB
3302 gen_mov_vreg_F0(0, rm + 1);
3303 }
3304 }
3305 } else {
3306 /* Load/store */
3307 rn = (insn >> 16) & 0xf;
3308 if (dp)
9ee6e8bb 3309 VFP_DREG_D(rd, insn);
b7bcbe95 3310 else
9ee6e8bb
PB
3311 rd = VFP_SREG_D(insn);
3312 if (s->thumb && rn == 15) {
3313 gen_op_movl_T1_im(s->pc & ~2);
3314 } else {
3315 gen_movl_T1_reg(s, rn);
3316 }
b7bcbe95
FB
3317 if ((insn & 0x01200000) == 0x01000000) {
3318 /* Single load/store */
3319 offset = (insn & 0xff) << 2;
3320 if ((insn & (1 << 23)) == 0)
3321 offset = -offset;
3322 gen_op_addl_T1_im(offset);
3323 if (insn & (1 << 20)) {
b5ff1b31 3324 gen_vfp_ld(s, dp);
b7bcbe95
FB
3325 gen_mov_vreg_F0(dp, rd);
3326 } else {
3327 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3328 gen_vfp_st(s, dp);
b7bcbe95
FB
3329 }
3330 } else {
3331 /* load/store multiple */
3332 if (dp)
3333 n = (insn >> 1) & 0x7f;
3334 else
3335 n = insn & 0xff;
3336
3337 if (insn & (1 << 24)) /* pre-decrement */
3338 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3339
3340 if (dp)
3341 offset = 8;
3342 else
3343 offset = 4;
3344 for (i = 0; i < n; i++) {
18c9b560 3345 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3346 /* load */
b5ff1b31 3347 gen_vfp_ld(s, dp);
b7bcbe95
FB
3348 gen_mov_vreg_F0(dp, rd + i);
3349 } else {
3350 /* store */
3351 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3352 gen_vfp_st(s, dp);
b7bcbe95
FB
3353 }
3354 gen_op_addl_T1_im(offset);
3355 }
3356 if (insn & (1 << 21)) {
3357 /* writeback */
3358 if (insn & (1 << 24))
3359 offset = -offset * n;
3360 else if (dp && (insn & 1))
3361 offset = 4;
3362 else
3363 offset = 0;
3364
3365 if (offset != 0)
3366 gen_op_addl_T1_im(offset);
3367 gen_movl_reg_T1(s, rn);
3368 }
3369 }
3370 }
3371 break;
3372 default:
3373 /* Should never happen. */
3374 return 1;
3375 }
3376 return 0;
3377}
3378
6e256c93 3379static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3380{
6e256c93
FB
3381 TranslationBlock *tb;
3382
3383 tb = s->tb;
3384 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3385 tcg_gen_goto_tb(n);
8984bd2e 3386 gen_set_pc_im(dest);
57fec1fe 3387 tcg_gen_exit_tb((long)tb + n);
6e256c93 3388 } else {
8984bd2e 3389 gen_set_pc_im(dest);
57fec1fe 3390 tcg_gen_exit_tb(0);
6e256c93 3391 }
c53be334
FB
3392}
3393
8aaca4c0
FB
3394static inline void gen_jmp (DisasContext *s, uint32_t dest)
3395{
551bd27f 3396 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3397 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3398 if (s->thumb)
d9ba4830
PB
3399 dest |= 1;
3400 gen_bx_im(s, dest);
8aaca4c0 3401 } else {
6e256c93 3402 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3403 s->is_jmp = DISAS_TB_JUMP;
3404 }
3405}
3406
d9ba4830 3407static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3408{
ee097184 3409 if (x)
d9ba4830 3410 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3411 else
d9ba4830 3412 gen_sxth(t0);
ee097184 3413 if (y)
d9ba4830 3414 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3415 else
d9ba4830
PB
3416 gen_sxth(t1);
3417 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3418}
3419
3420/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3421static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3422 uint32_t mask;
3423
3424 mask = 0;
3425 if (flags & (1 << 0))
3426 mask |= 0xff;
3427 if (flags & (1 << 1))
3428 mask |= 0xff00;
3429 if (flags & (1 << 2))
3430 mask |= 0xff0000;
3431 if (flags & (1 << 3))
3432 mask |= 0xff000000;
9ee6e8bb 3433
2ae23e75 3434 /* Mask out undefined bits. */
9ee6e8bb
PB
3435 mask &= ~CPSR_RESERVED;
3436 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3437 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3438 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3439 mask &= ~CPSR_IT;
9ee6e8bb 3440 /* Mask out execution state bits. */
2ae23e75 3441 if (!spsr)
e160c51c 3442 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3443 /* Mask out privileged bits. */
3444 if (IS_USER(s))
9ee6e8bb 3445 mask &= CPSR_USER;
b5ff1b31
FB
3446 return mask;
3447}
3448
3449/* Returns nonzero if access to the PSR is not permitted. */
3450static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3451{
d9ba4830 3452 TCGv tmp;
b5ff1b31
FB
3453 if (spsr) {
3454 /* ??? This is also undefined in system mode. */
3455 if (IS_USER(s))
3456 return 1;
d9ba4830
PB
3457
3458 tmp = load_cpu_field(spsr);
3459 tcg_gen_andi_i32(tmp, tmp, ~mask);
3460 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3461 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3462 store_cpu_field(tmp, spsr);
b5ff1b31 3463 } else {
d9ba4830 3464 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3465 }
3466 gen_lookup_tb(s);
3467 return 0;
3468}
3469
9ee6e8bb 3470/* Generate an old-style exception return. */
b5ff1b31
FB
3471static void gen_exception_return(DisasContext *s)
3472{
d9ba4830 3473 TCGv tmp;
e22f8f39 3474 gen_movl_reg_T0(s, 15);
d9ba4830
PB
3475 tmp = load_cpu_field(spsr);
3476 gen_set_cpsr(tmp, 0xffffffff);
3477 dead_tmp(tmp);
b5ff1b31
FB
3478 s->is_jmp = DISAS_UPDATE;
3479}
3480
b0109805
PB
3481/* Generate a v6 exception return. Marks both values as dead. */
3482static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3483{
b0109805
PB
3484 gen_set_cpsr(cpsr, 0xffffffff);
3485 dead_tmp(cpsr);
3486 store_reg(s, 15, pc);
9ee6e8bb
PB
3487 s->is_jmp = DISAS_UPDATE;
3488}
3b46e624 3489
9ee6e8bb
PB
3490static inline void
3491gen_set_condexec (DisasContext *s)
3492{
3493 if (s->condexec_mask) {
8f01245e
PB
3494 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3495 TCGv tmp = new_tmp();
3496 tcg_gen_movi_i32(tmp, val);
d9ba4830 3497 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3498 }
3499}
3b46e624 3500
9ee6e8bb
PB
3501static void gen_nop_hint(DisasContext *s, int val)
3502{
3503 switch (val) {
3504 case 3: /* wfi */
8984bd2e 3505 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3506 s->is_jmp = DISAS_WFI;
3507 break;
3508 case 2: /* wfe */
3509 case 4: /* sev */
3510 /* TODO: Implement SEV and WFE. May help SMP performance. */
3511 default: /* nop */
3512 break;
3513 }
3514}
99c475ab 3515
ad69471c
PB
3516/* These macros help make the code more readable when migrating from the
3517 old dyngen helpers. They should probably be removed when
3518 T0/T1 are removed. */
3519#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3520#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3521
ad69471c 3522#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3523
3524static inline int gen_neon_add(int size)
3525{
3526 switch (size) {
ad69471c
PB
3527 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3528 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3529 case 2: gen_op_addl_T0_T1(); break;
3530 default: return 1;
3531 }
3532 return 0;
3533}
3534
ad69471c
PB
3535static inline void gen_neon_rsb(int size)
3536{
3537 switch (size) {
3538 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3539 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3540 case 2: gen_op_rsbl_T0_T1(); break;
3541 default: return;
3542 }
3543}
3544
3545/* 32-bit pairwise ops end up the same as the elementwise versions. */
3546#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3547#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3548#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3549#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3550
3551/* FIXME: This is wrong. They set the wrong overflow bit. */
3552#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3553#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3554#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3555#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3556
3557#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3558 switch ((size << 1) | u) { \
3559 case 0: \
3560 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3561 break; \
3562 case 1: \
3563 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3564 break; \
3565 case 2: \
3566 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3567 break; \
3568 case 3: \
3569 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3570 break; \
3571 case 4: \
3572 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3573 break; \
3574 case 5: \
3575 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3576 break; \
3577 default: return 1; \
3578 }} while (0)
9ee6e8bb
PB
3579
3580#define GEN_NEON_INTEGER_OP(name) do { \
3581 switch ((size << 1) | u) { \
ad69471c
PB
3582 case 0: \
3583 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3584 break; \
3585 case 1: \
3586 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3587 break; \
3588 case 2: \
3589 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3590 break; \
3591 case 3: \
3592 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3593 break; \
3594 case 4: \
3595 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3596 break; \
3597 case 5: \
3598 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3599 break; \
9ee6e8bb
PB
3600 default: return 1; \
3601 }} while (0)
3602
3603static inline void
3604gen_neon_movl_scratch_T0(int scratch)
3605{
3606 uint32_t offset;
3607
3608 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3609 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3610}
3611
3612static inline void
3613gen_neon_movl_scratch_T1(int scratch)
3614{
3615 uint32_t offset;
3616
3617 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3618 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3619}
3620
3621static inline void
3622gen_neon_movl_T0_scratch(int scratch)
3623{
3624 uint32_t offset;
3625
3626 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3627 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3628}
3629
3630static inline void
3631gen_neon_movl_T1_scratch(int scratch)
3632{
3633 uint32_t offset;
3634
3635 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3636 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3637}
3638
3639static inline void gen_neon_get_scalar(int size, int reg)
3640{
3641 if (size == 1) {
3642 NEON_GET_REG(T0, reg >> 1, reg & 1);
3643 } else {
3644 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3645 if (reg & 1)
ad69471c 3646 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3647 else
ad69471c 3648 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3649 }
3650}
3651
3652static void gen_neon_unzip(int reg, int q, int tmp, int size)
3653{
3654 int n;
3655
3656 for (n = 0; n < q + 1; n += 2) {
3657 NEON_GET_REG(T0, reg, n);
3658 NEON_GET_REG(T0, reg, n + n);
3659 switch (size) {
ad69471c
PB
3660 case 0: gen_helper_neon_unzip_u8(); break;
3661 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3662 case 2: /* no-op */; break;
3663 default: abort();
3664 }
3665 gen_neon_movl_scratch_T0(tmp + n);
3666 gen_neon_movl_scratch_T1(tmp + n + 1);
3667 }
3668}
3669
3670static struct {
3671 int nregs;
3672 int interleave;
3673 int spacing;
3674} neon_ls_element_type[11] = {
3675 {4, 4, 1},
3676 {4, 4, 2},
3677 {4, 1, 1},
3678 {4, 2, 1},
3679 {3, 3, 1},
3680 {3, 3, 2},
3681 {3, 1, 1},
3682 {1, 1, 1},
3683 {2, 2, 1},
3684 {2, 2, 2},
3685 {2, 1, 1}
3686};
3687
3688/* Translate a NEON load/store element instruction. Return nonzero if the
3689 instruction is invalid. */
3690static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3691{
3692 int rd, rn, rm;
3693 int op;
3694 int nregs;
3695 int interleave;
3696 int stride;
3697 int size;
3698 int reg;
3699 int pass;
3700 int load;
3701 int shift;
9ee6e8bb 3702 int n;
b0109805 3703 TCGv tmp;
8f8e3aa4 3704 TCGv tmp2;
9ee6e8bb
PB
3705
3706 if (!vfp_enabled(env))
3707 return 1;
3708 VFP_DREG_D(rd, insn);
3709 rn = (insn >> 16) & 0xf;
3710 rm = insn & 0xf;
3711 load = (insn & (1 << 21)) != 0;
3712 if ((insn & (1 << 23)) == 0) {
3713 /* Load store all elements. */
3714 op = (insn >> 8) & 0xf;
3715 size = (insn >> 6) & 3;
3716 if (op > 10 || size == 3)
3717 return 1;
3718 nregs = neon_ls_element_type[op].nregs;
3719 interleave = neon_ls_element_type[op].interleave;
3720 gen_movl_T1_reg(s, rn);
3721 stride = (1 << size) * interleave;
3722 for (reg = 0; reg < nregs; reg++) {
3723 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3724 gen_movl_T1_reg(s, rn);
3725 gen_op_addl_T1_im((1 << size) * reg);
3726 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3727 gen_movl_T1_reg(s, rn);
3728 gen_op_addl_T1_im(1 << size);
3729 }
3730 for (pass = 0; pass < 2; pass++) {
3731 if (size == 2) {
3732 if (load) {
b0109805 3733 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3734 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3735 } else {
ad69471c 3736 tmp = neon_load_reg(rd, pass);
b0109805 3737 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3738 }
3739 gen_op_addl_T1_im(stride);
3740 } else if (size == 1) {
3741 if (load) {
b0109805 3742 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3743 gen_op_addl_T1_im(stride);
8f8e3aa4 3744 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3745 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3746 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3747 dead_tmp(tmp2);
3748 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3749 } else {
8f8e3aa4
PB
3750 tmp = neon_load_reg(rd, pass);
3751 tmp2 = new_tmp();
3752 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3753 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3754 gen_op_addl_T1_im(stride);
8f8e3aa4 3755 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3756 gen_op_addl_T1_im(stride);
3757 }
3758 } else /* size == 0 */ {
3759 if (load) {
a50f5b91 3760 TCGV_UNUSED(tmp2);
9ee6e8bb 3761 for (n = 0; n < 4; n++) {
b0109805 3762 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3763 gen_op_addl_T1_im(stride);
3764 if (n == 0) {
8f8e3aa4 3765 tmp2 = tmp;
9ee6e8bb 3766 } else {
8f8e3aa4
PB
3767 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3768 dead_tmp(tmp);
9ee6e8bb 3769 }
9ee6e8bb 3770 }
8f8e3aa4 3771 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3772 } else {
8f8e3aa4 3773 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3774 for (n = 0; n < 4; n++) {
8f8e3aa4 3775 tmp = new_tmp();
9ee6e8bb 3776 if (n == 0) {
8f8e3aa4 3777 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3778 } else {
8f8e3aa4 3779 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3780 }
b0109805 3781 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3782 gen_op_addl_T1_im(stride);
9ee6e8bb 3783 }
8f8e3aa4 3784 dead_tmp(tmp2);
9ee6e8bb
PB
3785 }
3786 }
3787 }
3788 rd += neon_ls_element_type[op].spacing;
3789 }
3790 stride = nregs * 8;
3791 } else {
3792 size = (insn >> 10) & 3;
3793 if (size == 3) {
3794 /* Load single element to all lanes. */
3795 if (!load)
3796 return 1;
3797 size = (insn >> 6) & 3;
3798 nregs = ((insn >> 8) & 3) + 1;
3799 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3800 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3801 for (reg = 0; reg < nregs; reg++) {
3802 switch (size) {
3803 case 0:
b0109805 3804 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3805 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3806 break;
3807 case 1:
b0109805 3808 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3809 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3810 break;
3811 case 2:
b0109805 3812 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3813 break;
3814 case 3:
3815 return 1;
a50f5b91
PB
3816 default: /* Avoid compiler warnings. */
3817 abort();
99c475ab 3818 }
9ee6e8bb 3819 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3820 tmp2 = new_tmp();
3821 tcg_gen_mov_i32(tmp2, tmp);
3822 neon_store_reg(rd, 0, tmp2);
3823 neon_store_reg(rd, 0, tmp);
9ee6e8bb
PB
3824 rd += stride;
3825 }
3826 stride = (1 << size) * nregs;
3827 } else {
3828 /* Single element. */
3829 pass = (insn >> 7) & 1;
3830 switch (size) {
3831 case 0:
3832 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3833 stride = 1;
3834 break;
3835 case 1:
3836 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3837 stride = (insn & (1 << 5)) ? 2 : 1;
3838 break;
3839 case 2:
3840 shift = 0;
9ee6e8bb
PB
3841 stride = (insn & (1 << 6)) ? 2 : 1;
3842 break;
3843 default:
3844 abort();
3845 }
3846 nregs = ((insn >> 8) & 3) + 1;
3847 gen_movl_T1_reg(s, rn);
3848 for (reg = 0; reg < nregs; reg++) {
3849 if (load) {
9ee6e8bb
PB
3850 switch (size) {
3851 case 0:
b0109805 3852 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3853 break;
3854 case 1:
b0109805 3855 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3856 break;
3857 case 2:
b0109805 3858 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3859 break;
a50f5b91
PB
3860 default: /* Avoid compiler warnings. */
3861 abort();
9ee6e8bb
PB
3862 }
3863 if (size != 2) {
8f8e3aa4
PB
3864 tmp2 = neon_load_reg(rd, pass);
3865 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3866 dead_tmp(tmp2);
9ee6e8bb 3867 }
8f8e3aa4 3868 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3869 } else { /* Store */
8f8e3aa4
PB
3870 tmp = neon_load_reg(rd, pass);
3871 if (shift)
3872 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3873 switch (size) {
3874 case 0:
b0109805 3875 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3876 break;
3877 case 1:
b0109805 3878 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3879 break;
3880 case 2:
b0109805 3881 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3882 break;
99c475ab 3883 }
99c475ab 3884 }
9ee6e8bb
PB
3885 rd += stride;
3886 gen_op_addl_T1_im(1 << size);
99c475ab 3887 }
9ee6e8bb 3888 stride = nregs * (1 << size);
99c475ab 3889 }
9ee6e8bb
PB
3890 }
3891 if (rm != 15) {
b26eefb6
PB
3892 TCGv base;
3893
3894 base = load_reg(s, rn);
9ee6e8bb 3895 if (rm == 13) {
b26eefb6 3896 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3897 } else {
b26eefb6
PB
3898 TCGv index;
3899 index = load_reg(s, rm);
3900 tcg_gen_add_i32(base, base, index);
3901 dead_tmp(index);
9ee6e8bb 3902 }
b26eefb6 3903 store_reg(s, rn, base);
9ee6e8bb
PB
3904 }
3905 return 0;
3906}
3b46e624 3907
8f8e3aa4
PB
3908/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3909static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3910{
3911 tcg_gen_and_i32(t, t, c);
3912 tcg_gen_bic_i32(f, f, c);
3913 tcg_gen_or_i32(dest, t, f);
3914}
3915
ad69471c
PB
3916static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3917{
3918 switch (size) {
3919 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3920 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3921 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3922 default: abort();
3923 }
3924}
3925
3926static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3927{
3928 switch (size) {
3929 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3930 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3931 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3932 default: abort();
3933 }
3934}
3935
3936static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3937{
3938 switch (size) {
3939 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3940 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3941 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3942 default: abort();
3943 }
3944}
3945
3946static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3947 int q, int u)
3948{
3949 if (q) {
3950 if (u) {
3951 switch (size) {
3952 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3953 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3954 default: abort();
3955 }
3956 } else {
3957 switch (size) {
3958 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3959 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3960 default: abort();
3961 }
3962 }
3963 } else {
3964 if (u) {
3965 switch (size) {
3966 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3967 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3968 default: abort();
3969 }
3970 } else {
3971 switch (size) {
3972 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3973 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3974 default: abort();
3975 }
3976 }
3977 }
3978}
3979
3980static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3981{
3982 if (u) {
3983 switch (size) {
3984 case 0: gen_helper_neon_widen_u8(dest, src); break;
3985 case 1: gen_helper_neon_widen_u16(dest, src); break;
3986 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3987 default: abort();
3988 }
3989 } else {
3990 switch (size) {
3991 case 0: gen_helper_neon_widen_s8(dest, src); break;
3992 case 1: gen_helper_neon_widen_s16(dest, src); break;
3993 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3994 default: abort();
3995 }
3996 }
3997 dead_tmp(src);
3998}
3999
4000static inline void gen_neon_addl(int size)
4001{
4002 switch (size) {
4003 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4004 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4005 case 2: tcg_gen_add_i64(CPU_V001); break;
4006 default: abort();
4007 }
4008}
4009
4010static inline void gen_neon_subl(int size)
4011{
4012 switch (size) {
4013 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4014 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4015 case 2: tcg_gen_sub_i64(CPU_V001); break;
4016 default: abort();
4017 }
4018}
4019
4020static inline void gen_neon_negl(TCGv var, int size)
4021{
4022 switch (size) {
4023 case 0: gen_helper_neon_negl_u16(var, var); break;
4024 case 1: gen_helper_neon_negl_u32(var, var); break;
4025 case 2: gen_helper_neon_negl_u64(var, var); break;
4026 default: abort();
4027 }
4028}
4029
4030static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4031{
4032 switch (size) {
4033 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4034 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4035 default: abort();
4036 }
4037}
4038
4039static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4040{
4041 TCGv tmp;
4042
4043 switch ((size << 1) | u) {
4044 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4045 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4046 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4047 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4048 case 4:
4049 tmp = gen_muls_i64_i32(a, b);
4050 tcg_gen_mov_i64(dest, tmp);
4051 break;
4052 case 5:
4053 tmp = gen_mulu_i64_i32(a, b);
4054 tcg_gen_mov_i64(dest, tmp);
4055 break;
4056 default: abort();
4057 }
4058 if (size < 2) {
4059 dead_tmp(b);
4060 dead_tmp(a);
4061 }
4062}
4063
9ee6e8bb
PB
4064/* Translate a NEON data processing instruction. Return nonzero if the
4065 instruction is invalid.
ad69471c
PB
4066 We process data in a mixture of 32-bit and 64-bit chunks.
4067 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4068
9ee6e8bb
PB
4069static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4070{
4071 int op;
4072 int q;
4073 int rd, rn, rm;
4074 int size;
4075 int shift;
4076 int pass;
4077 int count;
4078 int pairwise;
4079 int u;
4080 int n;
4081 uint32_t imm;
8f8e3aa4
PB
4082 TCGv tmp;
4083 TCGv tmp2;
4084 TCGv tmp3;
9ee6e8bb
PB
4085
4086 if (!vfp_enabled(env))
4087 return 1;
4088 q = (insn & (1 << 6)) != 0;
4089 u = (insn >> 24) & 1;
4090 VFP_DREG_D(rd, insn);
4091 VFP_DREG_N(rn, insn);
4092 VFP_DREG_M(rm, insn);
4093 size = (insn >> 20) & 3;
4094 if ((insn & (1 << 23)) == 0) {
4095 /* Three register same length. */
4096 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4097 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4098 || op == 10 || op == 11 || op == 16)) {
4099 /* 64-bit element instructions. */
9ee6e8bb 4100 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4101 neon_load_reg64(cpu_V0, rn + pass);
4102 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4103 switch (op) {
4104 case 1: /* VQADD */
4105 if (u) {
ad69471c 4106 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4107 } else {
ad69471c 4108 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4109 }
9ee6e8bb
PB
4110 break;
4111 case 5: /* VQSUB */
4112 if (u) {
ad69471c
PB
4113 gen_helper_neon_sub_saturate_u64(CPU_V001);
4114 } else {
4115 gen_helper_neon_sub_saturate_s64(CPU_V001);
4116 }
4117 break;
4118 case 8: /* VSHL */
4119 if (u) {
4120 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4121 } else {
4122 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4123 }
4124 break;
4125 case 9: /* VQSHL */
4126 if (u) {
4127 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4128 cpu_V0, cpu_V0);
4129 } else {
4130 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4131 cpu_V1, cpu_V0);
4132 }
4133 break;
4134 case 10: /* VRSHL */
4135 if (u) {
4136 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4137 } else {
ad69471c
PB
4138 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4139 }
4140 break;
4141 case 11: /* VQRSHL */
4142 if (u) {
4143 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4144 cpu_V1, cpu_V0);
4145 } else {
4146 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4147 cpu_V1, cpu_V0);
1e8d4eec 4148 }
9ee6e8bb
PB
4149 break;
4150 case 16:
4151 if (u) {
ad69471c 4152 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4153 } else {
ad69471c 4154 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4155 }
4156 break;
4157 default:
4158 abort();
2c0262af 4159 }
ad69471c 4160 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4161 }
9ee6e8bb 4162 return 0;
2c0262af 4163 }
9ee6e8bb
PB
4164 switch (op) {
4165 case 8: /* VSHL */
4166 case 9: /* VQSHL */
4167 case 10: /* VRSHL */
ad69471c 4168 case 11: /* VQRSHL */
9ee6e8bb 4169 {
ad69471c
PB
4170 int rtmp;
4171 /* Shift instruction operands are reversed. */
4172 rtmp = rn;
9ee6e8bb 4173 rn = rm;
ad69471c 4174 rm = rtmp;
9ee6e8bb
PB
4175 pairwise = 0;
4176 }
2c0262af 4177 break;
9ee6e8bb
PB
4178 case 20: /* VPMAX */
4179 case 21: /* VPMIN */
4180 case 23: /* VPADD */
4181 pairwise = 1;
2c0262af 4182 break;
9ee6e8bb
PB
4183 case 26: /* VPADD (float) */
4184 pairwise = (u && size < 2);
2c0262af 4185 break;
9ee6e8bb
PB
4186 case 30: /* VPMIN/VPMAX (float) */
4187 pairwise = u;
2c0262af 4188 break;
9ee6e8bb
PB
4189 default:
4190 pairwise = 0;
2c0262af 4191 break;
9ee6e8bb
PB
4192 }
4193 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4194
4195 if (pairwise) {
4196 /* Pairwise. */
4197 if (q)
4198 n = (pass & 1) * 2;
2c0262af 4199 else
9ee6e8bb
PB
4200 n = 0;
4201 if (pass < q + 1) {
4202 NEON_GET_REG(T0, rn, n);
4203 NEON_GET_REG(T1, rn, n + 1);
4204 } else {
4205 NEON_GET_REG(T0, rm, n);
4206 NEON_GET_REG(T1, rm, n + 1);
4207 }
4208 } else {
4209 /* Elementwise. */
4210 NEON_GET_REG(T0, rn, pass);
4211 NEON_GET_REG(T1, rm, pass);
4212 }
4213 switch (op) {
4214 case 0: /* VHADD */
4215 GEN_NEON_INTEGER_OP(hadd);
4216 break;
4217 case 1: /* VQADD */
ad69471c 4218 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4219 break;
9ee6e8bb
PB
4220 case 2: /* VRHADD */
4221 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4222 break;
9ee6e8bb
PB
4223 case 3: /* Logic ops. */
4224 switch ((u << 2) | size) {
4225 case 0: /* VAND */
2c0262af 4226 gen_op_andl_T0_T1();
9ee6e8bb
PB
4227 break;
4228 case 1: /* BIC */
4229 gen_op_bicl_T0_T1();
4230 break;
4231 case 2: /* VORR */
4232 gen_op_orl_T0_T1();
4233 break;
4234 case 3: /* VORN */
4235 gen_op_notl_T1();
4236 gen_op_orl_T0_T1();
4237 break;
4238 case 4: /* VEOR */
4239 gen_op_xorl_T0_T1();
4240 break;
4241 case 5: /* VBSL */
8f8e3aa4
PB
4242 tmp = neon_load_reg(rd, pass);
4243 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4244 dead_tmp(tmp);
9ee6e8bb
PB
4245 break;
4246 case 6: /* VBIT */
8f8e3aa4
PB
4247 tmp = neon_load_reg(rd, pass);
4248 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4249 dead_tmp(tmp);
9ee6e8bb
PB
4250 break;
4251 case 7: /* VBIF */
8f8e3aa4
PB
4252 tmp = neon_load_reg(rd, pass);
4253 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4254 dead_tmp(tmp);
9ee6e8bb 4255 break;
2c0262af
FB
4256 }
4257 break;
9ee6e8bb
PB
4258 case 4: /* VHSUB */
4259 GEN_NEON_INTEGER_OP(hsub);
4260 break;
4261 case 5: /* VQSUB */
ad69471c 4262 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4263 break;
9ee6e8bb
PB
4264 case 6: /* VCGT */
4265 GEN_NEON_INTEGER_OP(cgt);
4266 break;
4267 case 7: /* VCGE */
4268 GEN_NEON_INTEGER_OP(cge);
4269 break;
4270 case 8: /* VSHL */
ad69471c 4271 GEN_NEON_INTEGER_OP(shl);
2c0262af 4272 break;
9ee6e8bb 4273 case 9: /* VQSHL */
ad69471c 4274 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4275 break;
9ee6e8bb 4276 case 10: /* VRSHL */
ad69471c 4277 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4278 break;
9ee6e8bb 4279 case 11: /* VQRSHL */
ad69471c 4280 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4281 break;
4282 case 12: /* VMAX */
4283 GEN_NEON_INTEGER_OP(max);
4284 break;
4285 case 13: /* VMIN */
4286 GEN_NEON_INTEGER_OP(min);
4287 break;
4288 case 14: /* VABD */
4289 GEN_NEON_INTEGER_OP(abd);
4290 break;
4291 case 15: /* VABA */
4292 GEN_NEON_INTEGER_OP(abd);
4293 NEON_GET_REG(T1, rd, pass);
4294 gen_neon_add(size);
4295 break;
4296 case 16:
4297 if (!u) { /* VADD */
4298 if (gen_neon_add(size))
4299 return 1;
4300 } else { /* VSUB */
4301 switch (size) {
ad69471c
PB
4302 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4303 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4304 case 2: gen_op_subl_T0_T1(); break;
4305 default: return 1;
4306 }
4307 }
4308 break;
4309 case 17:
4310 if (!u) { /* VTST */
4311 switch (size) {
ad69471c
PB
4312 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4313 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4314 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4315 default: return 1;
4316 }
4317 } else { /* VCEQ */
4318 switch (size) {
ad69471c
PB
4319 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4320 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4321 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4322 default: return 1;
4323 }
4324 }
4325 break;
4326 case 18: /* Multiply. */
4327 switch (size) {
ad69471c
PB
4328 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4329 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4330 case 2: gen_op_mul_T0_T1(); break;
4331 default: return 1;
4332 }
4333 NEON_GET_REG(T1, rd, pass);
4334 if (u) { /* VMLS */
ad69471c 4335 gen_neon_rsb(size);
9ee6e8bb
PB
4336 } else { /* VMLA */
4337 gen_neon_add(size);
4338 }
4339 break;
4340 case 19: /* VMUL */
4341 if (u) { /* polynomial */
ad69471c 4342 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4343 } else { /* Integer */
4344 switch (size) {
ad69471c
PB
4345 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4346 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4347 case 2: gen_op_mul_T0_T1(); break;
4348 default: return 1;
4349 }
4350 }
4351 break;
4352 case 20: /* VPMAX */
4353 GEN_NEON_INTEGER_OP(pmax);
4354 break;
4355 case 21: /* VPMIN */
4356 GEN_NEON_INTEGER_OP(pmin);
4357 break;
4358 case 22: /* Hultiply high. */
4359 if (!u) { /* VQDMULH */
4360 switch (size) {
ad69471c
PB
4361 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4362 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4363 default: return 1;
4364 }
4365 } else { /* VQRDHMUL */
4366 switch (size) {
ad69471c
PB
4367 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4368 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4369 default: return 1;
4370 }
4371 }
4372 break;
4373 case 23: /* VPADD */
4374 if (u)
4375 return 1;
4376 switch (size) {
ad69471c
PB
4377 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4378 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4379 case 2: gen_op_addl_T0_T1(); break;
4380 default: return 1;
4381 }
4382 break;
4383 case 26: /* Floating point arithnetic. */
4384 switch ((u << 2) | size) {
4385 case 0: /* VADD */
ad69471c 4386 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4387 break;
4388 case 2: /* VSUB */
ad69471c 4389 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4390 break;
4391 case 4: /* VPADD */
ad69471c 4392 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4393 break;
4394 case 6: /* VABD */
ad69471c 4395 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4396 break;
4397 default:
4398 return 1;
4399 }
4400 break;
4401 case 27: /* Float multiply. */
ad69471c 4402 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4403 if (!u) {
4404 NEON_GET_REG(T1, rd, pass);
4405 if (size == 0) {
ad69471c 4406 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4407 } else {
ad69471c 4408 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4409 }
4410 }
4411 break;
4412 case 28: /* Float compare. */
4413 if (!u) {
ad69471c 4414 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4415 } else {
9ee6e8bb 4416 if (size == 0)
ad69471c 4417 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4418 else
ad69471c 4419 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4420 }
2c0262af 4421 break;
9ee6e8bb
PB
4422 case 29: /* Float compare absolute. */
4423 if (!u)
4424 return 1;
4425 if (size == 0)
ad69471c 4426 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4427 else
ad69471c 4428 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4429 break;
9ee6e8bb
PB
4430 case 30: /* Float min/max. */
4431 if (size == 0)
ad69471c 4432 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4433 else
ad69471c 4434 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4435 break;
4436 case 31:
4437 if (size == 0)
4373f3ce 4438 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4439 else
4373f3ce 4440 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4441 break;
9ee6e8bb
PB
4442 default:
4443 abort();
2c0262af 4444 }
9ee6e8bb
PB
4445 /* Save the result. For elementwise operations we can put it
4446 straight into the destination register. For pairwise operations
4447 we have to be careful to avoid clobbering the source operands. */
4448 if (pairwise && rd == rm) {
4449 gen_neon_movl_scratch_T0(pass);
4450 } else {
4451 NEON_SET_REG(T0, rd, pass);
4452 }
4453
4454 } /* for pass */
4455 if (pairwise && rd == rm) {
4456 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4457 gen_neon_movl_T0_scratch(pass);
4458 NEON_SET_REG(T0, rd, pass);
4459 }
4460 }
ad69471c 4461 /* End of 3 register same size operations. */
9ee6e8bb
PB
4462 } else if (insn & (1 << 4)) {
4463 if ((insn & 0x00380080) != 0) {
4464 /* Two registers and shift. */
4465 op = (insn >> 8) & 0xf;
4466 if (insn & (1 << 7)) {
4467 /* 64-bit shift. */
4468 size = 3;
4469 } else {
4470 size = 2;
4471 while ((insn & (1 << (size + 19))) == 0)
4472 size--;
4473 }
4474 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4475 /* To avoid excessive dumplication of ops we implement shift
4476 by immediate using the variable shift operations. */
4477 if (op < 8) {
4478 /* Shift by immediate:
4479 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4480 /* Right shifts are encoded as N - shift, where N is the
4481 element size in bits. */
4482 if (op <= 4)
4483 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4484 if (size == 3) {
4485 count = q + 1;
4486 } else {
4487 count = q ? 4: 2;
4488 }
4489 switch (size) {
4490 case 0:
4491 imm = (uint8_t) shift;
4492 imm |= imm << 8;
4493 imm |= imm << 16;
4494 break;
4495 case 1:
4496 imm = (uint16_t) shift;
4497 imm |= imm << 16;
4498 break;
4499 case 2:
4500 case 3:
4501 imm = shift;
4502 break;
4503 default:
4504 abort();
4505 }
4506
4507 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4508 if (size == 3) {
4509 neon_load_reg64(cpu_V0, rm + pass);
4510 tcg_gen_movi_i64(cpu_V1, imm);
4511 switch (op) {
4512 case 0: /* VSHR */
4513 case 1: /* VSRA */
4514 if (u)
4515 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4516 else
ad69471c 4517 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4518 break;
ad69471c
PB
4519 case 2: /* VRSHR */
4520 case 3: /* VRSRA */
4521 if (u)
4522 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4523 else
ad69471c 4524 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4525 break;
ad69471c
PB
4526 case 4: /* VSRI */
4527 if (!u)
4528 return 1;
4529 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4530 break;
4531 case 5: /* VSHL, VSLI */
4532 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4533 break;
4534 case 6: /* VQSHL */
4535 if (u)
4536 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4537 else
ad69471c
PB
4538 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4539 break;
4540 case 7: /* VQSHLU */
4541 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4542 break;
9ee6e8bb 4543 }
ad69471c
PB
4544 if (op == 1 || op == 3) {
4545 /* Accumulate. */
4546 neon_load_reg64(cpu_V0, rd + pass);
4547 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4548 } else if (op == 4 || (op == 5 && u)) {
4549 /* Insert */
4550 cpu_abort(env, "VS[LR]I.64 not implemented");
4551 }
4552 neon_store_reg64(cpu_V0, rd + pass);
4553 } else { /* size < 3 */
4554 /* Operands in T0 and T1. */
4555 gen_op_movl_T1_im(imm);
4556 NEON_GET_REG(T0, rm, pass);
4557 switch (op) {
4558 case 0: /* VSHR */
4559 case 1: /* VSRA */
4560 GEN_NEON_INTEGER_OP(shl);
4561 break;
4562 case 2: /* VRSHR */
4563 case 3: /* VRSRA */
4564 GEN_NEON_INTEGER_OP(rshl);
4565 break;
4566 case 4: /* VSRI */
4567 if (!u)
4568 return 1;
4569 GEN_NEON_INTEGER_OP(shl);
4570 break;
4571 case 5: /* VSHL, VSLI */
4572 switch (size) {
4573 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4574 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4575 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4576 default: return 1;
4577 }
4578 break;
4579 case 6: /* VQSHL */
4580 GEN_NEON_INTEGER_OP_ENV(qshl);
4581 break;
4582 case 7: /* VQSHLU */
4583 switch (size) {
4584 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4585 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4586 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4587 default: return 1;
4588 }
4589 break;
4590 }
4591
4592 if (op == 1 || op == 3) {
4593 /* Accumulate. */
4594 NEON_GET_REG(T1, rd, pass);
4595 gen_neon_add(size);
4596 } else if (op == 4 || (op == 5 && u)) {
4597 /* Insert */
4598 switch (size) {
4599 case 0:
4600 if (op == 4)
4601 imm = 0xff >> -shift;
4602 else
4603 imm = (uint8_t)(0xff << shift);
4604 imm |= imm << 8;
4605 imm |= imm << 16;
4606 break;
4607 case 1:
4608 if (op == 4)
4609 imm = 0xffff >> -shift;
4610 else
4611 imm = (uint16_t)(0xffff << shift);
4612 imm |= imm << 16;
4613 break;
4614 case 2:
4615 if (op == 4)
4616 imm = 0xffffffffu >> -shift;
4617 else
4618 imm = 0xffffffffu << shift;
4619 break;
4620 default:
4621 abort();
4622 }
4623 tmp = neon_load_reg(rd, pass);
4624 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4625 tcg_gen_andi_i32(tmp, tmp, ~imm);
4626 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4627 }
9ee6e8bb
PB
4628 NEON_SET_REG(T0, rd, pass);
4629 }
4630 } /* for pass */
4631 } else if (op < 10) {
ad69471c 4632 /* Shift by immediate and narrow:
9ee6e8bb
PB
4633 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4634 shift = shift - (1 << (size + 3));
4635 size++;
9ee6e8bb
PB
4636 switch (size) {
4637 case 1:
ad69471c 4638 imm = (uint16_t)shift;
9ee6e8bb 4639 imm |= imm << 16;
ad69471c 4640 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
4641 break;
4642 case 2:
ad69471c
PB
4643 imm = (uint32_t)shift;
4644 tmp2 = tcg_const_i32(imm);
9ee6e8bb 4645 case 3:
ad69471c 4646 tmp2 = tcg_const_i64(shift);
9ee6e8bb
PB
4647 break;
4648 default:
4649 abort();
4650 }
4651
ad69471c
PB
4652 for (pass = 0; pass < 2; pass++) {
4653 if (size == 3) {
4654 neon_load_reg64(cpu_V0, rm + pass);
4655 if (q) {
4656 if (u)
4657 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4658 else
4659 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4660 } else {
4661 if (u)
4662 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4663 else
4664 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4665 }
2c0262af 4666 } else {
ad69471c
PB
4667 tmp = neon_load_reg(rm + pass, 0);
4668 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4669 tcg_gen_extu_i32_i64(cpu_V0, tmp);
4670 dead_tmp(tmp);
4671 tmp = neon_load_reg(rm + pass, 1);
4672 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4673 tcg_gen_extu_i32_i64(cpu_V1, tmp);
4674 dead_tmp(tmp);
4675 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
4676 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4677 }
ad69471c
PB
4678 tmp = new_tmp();
4679 if (op == 8 && !u) {
4680 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4681 } else {
ad69471c
PB
4682 if (op == 8)
4683 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4684 else
ad69471c
PB
4685 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4686 }
4687 if (pass == 0) {
4688 tmp2 = tmp;
4689 } else {
4690 neon_store_reg(rd, 0, tmp2);
4691 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4692 }
4693 } /* for pass */
4694 } else if (op == 10) {
4695 /* VSHLL */
ad69471c 4696 if (q || size == 3)
9ee6e8bb 4697 return 1;
ad69471c
PB
4698 tmp = neon_load_reg(rm, 0);
4699 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4700 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4701 if (pass == 1)
4702 tmp = tmp2;
4703
4704 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4705
9ee6e8bb
PB
4706 if (shift != 0) {
4707 /* The shift is less than the width of the source
ad69471c
PB
4708 type, so we can just shift the whole register. */
4709 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4710 if (size < 2 || !u) {
4711 uint64_t imm64;
4712 if (size == 0) {
4713 imm = (0xffu >> (8 - shift));
4714 imm |= imm << 16;
4715 } else {
4716 imm = 0xffff >> (16 - shift);
9ee6e8bb 4717 }
ad69471c
PB
4718 imm64 = imm | (((uint64_t)imm) << 32);
4719 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4720 }
4721 }
ad69471c 4722 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4723 }
4724 } else if (op == 15 || op == 16) {
4725 /* VCVT fixed-point. */
4726 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4727 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4728 if (op & 1) {
4729 if (u)
4373f3ce 4730 gen_vfp_ulto(0, shift);
9ee6e8bb 4731 else
4373f3ce 4732 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4733 } else {
4734 if (u)
4373f3ce 4735 gen_vfp_toul(0, shift);
9ee6e8bb 4736 else
4373f3ce 4737 gen_vfp_tosl(0, shift);
2c0262af 4738 }
4373f3ce 4739 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4740 }
4741 } else {
9ee6e8bb
PB
4742 return 1;
4743 }
4744 } else { /* (insn & 0x00380080) == 0 */
4745 int invert;
4746
4747 op = (insn >> 8) & 0xf;
4748 /* One register and immediate. */
4749 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4750 invert = (insn & (1 << 5)) != 0;
4751 switch (op) {
4752 case 0: case 1:
4753 /* no-op */
4754 break;
4755 case 2: case 3:
4756 imm <<= 8;
4757 break;
4758 case 4: case 5:
4759 imm <<= 16;
4760 break;
4761 case 6: case 7:
4762 imm <<= 24;
4763 break;
4764 case 8: case 9:
4765 imm |= imm << 16;
4766 break;
4767 case 10: case 11:
4768 imm = (imm << 8) | (imm << 24);
4769 break;
4770 case 12:
4771 imm = (imm < 8) | 0xff;
4772 break;
4773 case 13:
4774 imm = (imm << 16) | 0xffff;
4775 break;
4776 case 14:
4777 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4778 if (invert)
4779 imm = ~imm;
4780 break;
4781 case 15:
4782 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4783 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4784 break;
4785 }
4786 if (invert)
4787 imm = ~imm;
4788
4789 if (op != 14 || !invert)
4790 gen_op_movl_T1_im(imm);
4791
4792 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4793 if (op & 1 && op < 12) {
ad69471c 4794 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4795 if (invert) {
4796 /* The immediate value has already been inverted, so
4797 BIC becomes AND. */
ad69471c 4798 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4799 } else {
ad69471c 4800 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4801 }
9ee6e8bb 4802 } else {
ad69471c
PB
4803 /* VMOV, VMVN. */
4804 tmp = new_tmp();
9ee6e8bb 4805 if (op == 14 && invert) {
ad69471c
PB
4806 uint32_t val;
4807 val = 0;
9ee6e8bb
PB
4808 for (n = 0; n < 4; n++) {
4809 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4810 val |= 0xff << (n * 8);
9ee6e8bb 4811 }
ad69471c
PB
4812 tcg_gen_movi_i32(tmp, val);
4813 } else {
4814 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4815 }
9ee6e8bb 4816 }
ad69471c 4817 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4818 }
4819 }
4820 } else { /* (insn & 0x00800010 == 0x00800010) */
4821 if (size != 3) {
4822 op = (insn >> 8) & 0xf;
4823 if ((insn & (1 << 6)) == 0) {
4824 /* Three registers of different lengths. */
4825 int src1_wide;
4826 int src2_wide;
4827 int prewiden;
4828 /* prewiden, src1_wide, src2_wide */
4829 static const int neon_3reg_wide[16][3] = {
4830 {1, 0, 0}, /* VADDL */
4831 {1, 1, 0}, /* VADDW */
4832 {1, 0, 0}, /* VSUBL */
4833 {1, 1, 0}, /* VSUBW */
4834 {0, 1, 1}, /* VADDHN */
4835 {0, 0, 0}, /* VABAL */
4836 {0, 1, 1}, /* VSUBHN */
4837 {0, 0, 0}, /* VABDL */
4838 {0, 0, 0}, /* VMLAL */
4839 {0, 0, 0}, /* VQDMLAL */
4840 {0, 0, 0}, /* VMLSL */
4841 {0, 0, 0}, /* VQDMLSL */
4842 {0, 0, 0}, /* Integer VMULL */
4843 {0, 0, 0}, /* VQDMULL */
4844 {0, 0, 0} /* Polynomial VMULL */
4845 };
4846
4847 prewiden = neon_3reg_wide[op][0];
4848 src1_wide = neon_3reg_wide[op][1];
4849 src2_wide = neon_3reg_wide[op][2];
4850
ad69471c
PB
4851 if (size == 0 && (op == 9 || op == 11 || op == 13))
4852 return 1;
4853
9ee6e8bb
PB
4854 /* Avoid overlapping operands. Wide source operands are
4855 always aligned so will never overlap with wide
4856 destinations in problematic ways. */
8f8e3aa4
PB
4857 if (rd == rm && !src2_wide) {
4858 NEON_GET_REG(T0, rm, 1);
4859 gen_neon_movl_scratch_T0(2);
4860 } else if (rd == rn && !src1_wide) {
4861 NEON_GET_REG(T0, rn, 1);
4862 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4863 }
a50f5b91 4864 TCGV_UNUSED(tmp3);
9ee6e8bb 4865 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4866 if (src1_wide) {
4867 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4868 TCGV_UNUSED(tmp);
9ee6e8bb 4869 } else {
ad69471c
PB
4870 if (pass == 1 && rd == rn) {
4871 gen_neon_movl_T0_scratch(2);
4872 tmp = new_tmp();
4873 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4874 } else {
ad69471c
PB
4875 tmp = neon_load_reg(rn, pass);
4876 }
4877 if (prewiden) {
4878 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4879 }
4880 }
ad69471c
PB
4881 if (src2_wide) {
4882 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4883 TCGV_UNUSED(tmp2);
9ee6e8bb 4884 } else {
ad69471c 4885 if (pass == 1 && rd == rm) {
8f8e3aa4 4886 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4887 tmp2 = new_tmp();
4888 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4889 } else {
ad69471c
PB
4890 tmp2 = neon_load_reg(rm, pass);
4891 }
4892 if (prewiden) {
4893 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4894 }
9ee6e8bb
PB
4895 }
4896 switch (op) {
4897 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4898 gen_neon_addl(size);
9ee6e8bb
PB
4899 break;
4900 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4901 gen_neon_subl(size);
9ee6e8bb
PB
4902 break;
4903 case 5: case 7: /* VABAL, VABDL */
4904 switch ((size << 1) | u) {
ad69471c
PB
4905 case 0:
4906 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4907 break;
4908 case 1:
4909 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4910 break;
4911 case 2:
4912 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4913 break;
4914 case 3:
4915 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4916 break;
4917 case 4:
4918 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4919 break;
4920 case 5:
4921 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4922 break;
9ee6e8bb
PB
4923 default: abort();
4924 }
ad69471c
PB
4925 dead_tmp(tmp2);
4926 dead_tmp(tmp);
9ee6e8bb
PB
4927 break;
4928 case 8: case 9: case 10: case 11: case 12: case 13:
4929 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4930 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4931 break;
4932 case 14: /* Polynomial VMULL */
4933 cpu_abort(env, "Polynomial VMULL not implemented");
4934
4935 default: /* 15 is RESERVED. */
4936 return 1;
4937 }
4938 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4939 /* Accumulate. */
4940 if (op == 10 || op == 11) {
ad69471c 4941 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4942 }
4943
9ee6e8bb 4944 if (op != 13) {
ad69471c 4945 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4946 }
4947
4948 switch (op) {
4949 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4950 gen_neon_addl(size);
9ee6e8bb
PB
4951 break;
4952 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4953 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4954 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4955 break;
9ee6e8bb
PB
4956 /* Fall through. */
4957 case 13: /* VQDMULL */
ad69471c 4958 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4959 break;
4960 default:
4961 abort();
4962 }
ad69471c 4963 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4964 } else if (op == 4 || op == 6) {
4965 /* Narrowing operation. */
ad69471c 4966 tmp = new_tmp();
9ee6e8bb
PB
4967 if (u) {
4968 switch (size) {
ad69471c
PB
4969 case 0:
4970 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4971 break;
4972 case 1:
4973 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4974 break;
4975 case 2:
4976 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4977 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4978 break;
9ee6e8bb
PB
4979 default: abort();
4980 }
4981 } else {
4982 switch (size) {
ad69471c
PB
4983 case 0:
4984 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4985 break;
4986 case 1:
4987 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4988 break;
4989 case 2:
4990 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4991 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4992 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4993 break;
9ee6e8bb
PB
4994 default: abort();
4995 }
4996 }
ad69471c
PB
4997 if (pass == 0) {
4998 tmp3 = tmp;
4999 } else {
5000 neon_store_reg(rd, 0, tmp3);
5001 neon_store_reg(rd, 1, tmp);
5002 }
9ee6e8bb
PB
5003 } else {
5004 /* Write back the result. */
ad69471c 5005 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5006 }
5007 }
5008 } else {
5009 /* Two registers and a scalar. */
5010 switch (op) {
5011 case 0: /* Integer VMLA scalar */
5012 case 1: /* Float VMLA scalar */
5013 case 4: /* Integer VMLS scalar */
5014 case 5: /* Floating point VMLS scalar */
5015 case 8: /* Integer VMUL scalar */
5016 case 9: /* Floating point VMUL scalar */
5017 case 12: /* VQDMULH scalar */
5018 case 13: /* VQRDMULH scalar */
5019 gen_neon_get_scalar(size, rm);
8f8e3aa4 5020 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5021 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5022 if (pass != 0)
8f8e3aa4 5023 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5024 NEON_GET_REG(T1, rn, pass);
5025 if (op == 12) {
5026 if (size == 1) {
ad69471c 5027 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5028 } else {
ad69471c 5029 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5030 }
5031 } else if (op == 13) {
5032 if (size == 1) {
ad69471c 5033 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5034 } else {
ad69471c 5035 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5036 }
5037 } else if (op & 1) {
ad69471c 5038 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5039 } else {
5040 switch (size) {
ad69471c
PB
5041 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5042 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5043 case 2: gen_op_mul_T0_T1(); break;
5044 default: return 1;
5045 }
5046 }
5047 if (op < 8) {
5048 /* Accumulate. */
5049 NEON_GET_REG(T1, rd, pass);
5050 switch (op) {
5051 case 0:
5052 gen_neon_add(size);
5053 break;
5054 case 1:
ad69471c 5055 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5056 break;
5057 case 4:
ad69471c 5058 gen_neon_rsb(size);
9ee6e8bb
PB
5059 break;
5060 case 5:
ad69471c 5061 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5062 break;
5063 default:
5064 abort();
5065 }
5066 }
5067 NEON_SET_REG(T0, rd, pass);
5068 }
5069 break;
5070 case 2: /* VMLAL sclar */
5071 case 3: /* VQDMLAL scalar */
5072 case 6: /* VMLSL scalar */
5073 case 7: /* VQDMLSL scalar */
5074 case 10: /* VMULL scalar */
5075 case 11: /* VQDMULL scalar */
ad69471c
PB
5076 if (size == 0 && (op == 3 || op == 7 || op == 11))
5077 return 1;
5078
9ee6e8bb 5079 gen_neon_get_scalar(size, rm);
ad69471c
PB
5080 NEON_GET_REG(T1, rn, 1);
5081
9ee6e8bb 5082 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5083 if (pass == 0) {
5084 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5085 } else {
ad69471c
PB
5086 tmp = new_tmp();
5087 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5088 }
ad69471c
PB
5089 tmp2 = new_tmp();
5090 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5091 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5092 if (op == 6 || op == 7) {
ad69471c
PB
5093 gen_neon_negl(cpu_V0, size);
5094 }
5095 if (op != 11) {
5096 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5097 }
9ee6e8bb
PB
5098 switch (op) {
5099 case 2: case 6:
ad69471c 5100 gen_neon_addl(size);
9ee6e8bb
PB
5101 break;
5102 case 3: case 7:
ad69471c
PB
5103 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5104 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5105 break;
5106 case 10:
5107 /* no-op */
5108 break;
5109 case 11:
ad69471c 5110 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5111 break;
5112 default:
5113 abort();
5114 }
ad69471c 5115 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5116 }
5117 break;
5118 default: /* 14 and 15 are RESERVED */
5119 return 1;
5120 }
5121 }
5122 } else { /* size == 3 */
5123 if (!u) {
5124 /* Extract. */
9ee6e8bb 5125 imm = (insn >> 8) & 0xf;
ad69471c
PB
5126 count = q + 1;
5127
5128 if (imm > 7 && !q)
5129 return 1;
5130
5131 if (imm == 0) {
5132 neon_load_reg64(cpu_V0, rn);
5133 if (q) {
5134 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5135 }
ad69471c
PB
5136 } else if (imm == 8) {
5137 neon_load_reg64(cpu_V0, rn + 1);
5138 if (q) {
5139 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5140 }
ad69471c
PB
5141 } else if (q) {
5142 tmp = tcg_temp_new(TCG_TYPE_I64);
5143 if (imm < 8) {
5144 neon_load_reg64(cpu_V0, rn);
5145 neon_load_reg64(tmp, rn + 1);
5146 } else {
5147 neon_load_reg64(cpu_V0, rn + 1);
5148 neon_load_reg64(tmp, rm);
5149 }
5150 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5151 tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5152 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5153 if (imm < 8) {
5154 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5155 } else {
ad69471c
PB
5156 neon_load_reg64(cpu_V1, rm + 1);
5157 imm -= 8;
9ee6e8bb 5158 }
ad69471c
PB
5159 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5160 tcg_gen_shri_i64(tmp, tmp, imm * 8);
5161 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5162 } else {
5163 neon_load_reg64(cpu_V0, rn);
5164 tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5165 neon_load_reg64(cpu_V1, rm);
5166 tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5167 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5168 }
5169 neon_store_reg64(cpu_V0, rd);
5170 if (q) {
5171 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5172 }
5173 } else if ((insn & (1 << 11)) == 0) {
5174 /* Two register misc. */
5175 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5176 size = (insn >> 18) & 3;
5177 switch (op) {
5178 case 0: /* VREV64 */
5179 if (size == 3)
5180 return 1;
5181 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5182 NEON_GET_REG(T0, rm, pass * 2);
5183 NEON_GET_REG(T1, rm, pass * 2 + 1);
5184 switch (size) {
b0109805 5185 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5186 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5187 case 2: /* no-op */ break;
5188 default: abort();
5189 }
5190 NEON_SET_REG(T0, rd, pass * 2 + 1);
5191 if (size == 2) {
5192 NEON_SET_REG(T1, rd, pass * 2);
5193 } else {
5194 gen_op_movl_T0_T1();
5195 switch (size) {
b0109805 5196 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5197 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5198 default: abort();
5199 }
5200 NEON_SET_REG(T0, rd, pass * 2);
5201 }
5202 }
5203 break;
5204 case 4: case 5: /* VPADDL */
5205 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5206 if (size == 3)
5207 return 1;
ad69471c
PB
5208 for (pass = 0; pass < q + 1; pass++) {
5209 tmp = neon_load_reg(rm, pass * 2);
5210 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5211 tmp = neon_load_reg(rm, pass * 2 + 1);
5212 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5213 switch (size) {
5214 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5215 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5216 case 2: tcg_gen_add_i64(CPU_V001); break;
5217 default: abort();
5218 }
9ee6e8bb
PB
5219 if (op >= 12) {
5220 /* Accumulate. */
ad69471c
PB
5221 neon_load_reg64(cpu_V1, rd + pass);
5222 gen_neon_addl(size);
9ee6e8bb 5223 }
ad69471c 5224 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5225 }
5226 break;
5227 case 33: /* VTRN */
5228 if (size == 2) {
5229 for (n = 0; n < (q ? 4 : 2); n += 2) {
5230 NEON_GET_REG(T0, rm, n);
5231 NEON_GET_REG(T1, rd, n + 1);
5232 NEON_SET_REG(T1, rm, n);
5233 NEON_SET_REG(T0, rd, n + 1);
5234 }
5235 } else {
5236 goto elementwise;
5237 }
5238 break;
5239 case 34: /* VUZP */
5240 /* Reg Before After
5241 Rd A3 A2 A1 A0 B2 B0 A2 A0
5242 Rm B3 B2 B1 B0 B3 B1 A3 A1
5243 */
5244 if (size == 3)
5245 return 1;
5246 gen_neon_unzip(rd, q, 0, size);
5247 gen_neon_unzip(rm, q, 4, size);
5248 if (q) {
5249 static int unzip_order_q[8] =
5250 {0, 2, 4, 6, 1, 3, 5, 7};
5251 for (n = 0; n < 8; n++) {
5252 int reg = (n < 4) ? rd : rm;
5253 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5254 NEON_SET_REG(T0, reg, n % 4);
5255 }
5256 } else {
5257 static int unzip_order[4] =
5258 {0, 4, 1, 5};
5259 for (n = 0; n < 4; n++) {
5260 int reg = (n < 2) ? rd : rm;
5261 gen_neon_movl_T0_scratch(unzip_order[n]);
5262 NEON_SET_REG(T0, reg, n % 2);
5263 }
5264 }
5265 break;
5266 case 35: /* VZIP */
5267 /* Reg Before After
5268 Rd A3 A2 A1 A0 B1 A1 B0 A0
5269 Rm B3 B2 B1 B0 B3 A3 B2 A2
5270 */
5271 if (size == 3)
5272 return 1;
5273 count = (q ? 4 : 2);
5274 for (n = 0; n < count; n++) {
5275 NEON_GET_REG(T0, rd, n);
5276 NEON_GET_REG(T1, rd, n);
5277 switch (size) {
ad69471c
PB
5278 case 0: gen_helper_neon_zip_u8(); break;
5279 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5280 case 2: /* no-op */; break;
5281 default: abort();
5282 }
5283 gen_neon_movl_scratch_T0(n * 2);
5284 gen_neon_movl_scratch_T1(n * 2 + 1);
5285 }
5286 for (n = 0; n < count * 2; n++) {
5287 int reg = (n < count) ? rd : rm;
5288 gen_neon_movl_T0_scratch(n);
5289 NEON_SET_REG(T0, reg, n % count);
5290 }
5291 break;
5292 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5293 if (size == 3)
5294 return 1;
a50f5b91 5295 TCGV_UNUSED(tmp2);
9ee6e8bb 5296 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5297 neon_load_reg64(cpu_V0, rm + pass);
5298 tmp = new_tmp();
9ee6e8bb 5299 if (op == 36 && q == 0) {
ad69471c 5300 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5301 } else if (q) {
ad69471c 5302 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5303 } else {
ad69471c
PB
5304 gen_neon_narrow_sats(size, tmp, cpu_V0);
5305 }
5306 if (pass == 0) {
5307 tmp2 = tmp;
5308 } else {
5309 neon_store_reg(rd, 0, tmp2);
5310 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5311 }
9ee6e8bb
PB
5312 }
5313 break;
5314 case 38: /* VSHLL */
ad69471c 5315 if (q || size == 3)
9ee6e8bb 5316 return 1;
ad69471c
PB
5317 tmp = neon_load_reg(rm, 0);
5318 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5319 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5320 if (pass == 1)
5321 tmp = tmp2;
5322 gen_neon_widen(cpu_V0, tmp, size, 1);
5323 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5324 }
5325 break;
5326 default:
5327 elementwise:
5328 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5329 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5330 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5331 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5332 } else {
5333 NEON_GET_REG(T0, rm, pass);
5334 }
5335 switch (op) {
5336 case 1: /* VREV32 */
5337 switch (size) {
b0109805 5338 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5339 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5340 default: return 1;
5341 }
5342 break;
5343 case 2: /* VREV16 */
5344 if (size != 0)
5345 return 1;
3670669c 5346 gen_rev16(cpu_T[0]);
9ee6e8bb 5347 break;
9ee6e8bb
PB
5348 case 8: /* CLS */
5349 switch (size) {
ad69471c
PB
5350 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5351 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5352 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5353 default: return 1;
5354 }
5355 break;
5356 case 9: /* CLZ */
5357 switch (size) {
ad69471c
PB
5358 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5359 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5360 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5361 default: return 1;
5362 }
5363 break;
5364 case 10: /* CNT */
5365 if (size != 0)
5366 return 1;
ad69471c 5367 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5368 break;
5369 case 11: /* VNOT */
5370 if (size != 0)
5371 return 1;
5372 gen_op_notl_T0();
5373 break;
5374 case 14: /* VQABS */
5375 switch (size) {
ad69471c
PB
5376 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5377 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5378 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5379 default: return 1;
5380 }
5381 break;
5382 case 15: /* VQNEG */
5383 switch (size) {
ad69471c
PB
5384 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5385 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5386 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5387 default: return 1;
5388 }
5389 break;
5390 case 16: case 19: /* VCGT #0, VCLE #0 */
5391 gen_op_movl_T1_im(0);
5392 switch(size) {
ad69471c
PB
5393 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5394 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5395 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5396 default: return 1;
5397 }
5398 if (op == 19)
5399 gen_op_notl_T0();
5400 break;
5401 case 17: case 20: /* VCGE #0, VCLT #0 */
5402 gen_op_movl_T1_im(0);
5403 switch(size) {
ad69471c
PB
5404 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5405 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5406 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5407 default: return 1;
5408 }
5409 if (op == 20)
5410 gen_op_notl_T0();
5411 break;
5412 case 18: /* VCEQ #0 */
5413 gen_op_movl_T1_im(0);
5414 switch(size) {
ad69471c
PB
5415 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5416 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5417 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5418 default: return 1;
5419 }
5420 break;
5421 case 22: /* VABS */
5422 switch(size) {
ad69471c
PB
5423 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5424 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5425 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5426 default: return 1;
5427 }
5428 break;
5429 case 23: /* VNEG */
5430 gen_op_movl_T1_im(0);
ad69471c
PB
5431 if (size == 3)
5432 return 1;
5433 gen_neon_rsb(size);
9ee6e8bb
PB
5434 break;
5435 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5436 gen_op_movl_T1_im(0);
ad69471c 5437 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5438 if (op == 27)
5439 gen_op_notl_T0();
5440 break;
5441 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5442 gen_op_movl_T1_im(0);
ad69471c 5443 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5444 if (op == 28)
5445 gen_op_notl_T0();
5446 break;
5447 case 26: /* Float VCEQ #0 */
5448 gen_op_movl_T1_im(0);
ad69471c 5449 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5450 break;
5451 case 30: /* Float VABS */
4373f3ce 5452 gen_vfp_abs(0);
9ee6e8bb
PB
5453 break;
5454 case 31: /* Float VNEG */
4373f3ce 5455 gen_vfp_neg(0);
9ee6e8bb
PB
5456 break;
5457 case 32: /* VSWP */
5458 NEON_GET_REG(T1, rd, pass);
5459 NEON_SET_REG(T1, rm, pass);
5460 break;
5461 case 33: /* VTRN */
5462 NEON_GET_REG(T1, rd, pass);
5463 switch (size) {
ad69471c
PB
5464 case 0: gen_helper_neon_trn_u8(); break;
5465 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5466 case 2: abort();
5467 default: return 1;
5468 }
5469 NEON_SET_REG(T1, rm, pass);
5470 break;
5471 case 56: /* Integer VRECPE */
4373f3ce 5472 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5473 break;
5474 case 57: /* Integer VRSQRTE */
4373f3ce 5475 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5476 break;
5477 case 58: /* Float VRECPE */
4373f3ce 5478 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5479 break;
5480 case 59: /* Float VRSQRTE */
4373f3ce 5481 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5482 break;
5483 case 60: /* VCVT.F32.S32 */
4373f3ce 5484 gen_vfp_tosiz(0);
9ee6e8bb
PB
5485 break;
5486 case 61: /* VCVT.F32.U32 */
4373f3ce 5487 gen_vfp_touiz(0);
9ee6e8bb
PB
5488 break;
5489 case 62: /* VCVT.S32.F32 */
4373f3ce 5490 gen_vfp_sito(0);
9ee6e8bb
PB
5491 break;
5492 case 63: /* VCVT.U32.F32 */
4373f3ce 5493 gen_vfp_uito(0);
9ee6e8bb
PB
5494 break;
5495 default:
5496 /* Reserved: 21, 29, 39-56 */
5497 return 1;
5498 }
5499 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5500 tcg_gen_st_f32(cpu_F0s, cpu_env,
5501 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5502 } else {
5503 NEON_SET_REG(T0, rd, pass);
5504 }
5505 }
5506 break;
5507 }
5508 } else if ((insn & (1 << 10)) == 0) {
5509 /* VTBL, VTBX. */
5510 n = (insn >> 5) & 0x18;
9ee6e8bb 5511 if (insn & (1 << 6)) {
8f8e3aa4 5512 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5513 } else {
8f8e3aa4
PB
5514 tmp = new_tmp();
5515 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5516 }
8f8e3aa4
PB
5517 tmp2 = neon_load_reg(rm, 0);
5518 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5519 tcg_const_i32(n));
9ee6e8bb 5520 if (insn & (1 << 6)) {
8f8e3aa4 5521 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5522 } else {
8f8e3aa4
PB
5523 tmp = new_tmp();
5524 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5525 }
8f8e3aa4
PB
5526 tmp3 = neon_load_reg(rm, 1);
5527 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5528 tcg_const_i32(n));
5529 neon_store_reg(rd, 0, tmp2);
5530 neon_store_reg(rd, 1, tmp2);
9ee6e8bb
PB
5531 } else if ((insn & 0x380) == 0) {
5532 /* VDUP */
5533 if (insn & (1 << 19)) {
5534 NEON_SET_REG(T0, rm, 1);
5535 } else {
5536 NEON_SET_REG(T0, rm, 0);
5537 }
5538 if (insn & (1 << 16)) {
ad69471c 5539 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5540 } else if (insn & (1 << 17)) {
5541 if ((insn >> 18) & 1)
ad69471c 5542 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5543 else
ad69471c 5544 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5545 }
5546 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5547 NEON_SET_REG(T0, rd, pass);
5548 }
5549 } else {
5550 return 1;
5551 }
5552 }
5553 }
5554 return 0;
5555}
5556
5557static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5558{
5559 int cpnum;
5560
5561 cpnum = (insn >> 8) & 0xf;
5562 if (arm_feature(env, ARM_FEATURE_XSCALE)
5563 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5564 return 1;
5565
5566 switch (cpnum) {
5567 case 0:
5568 case 1:
5569 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5570 return disas_iwmmxt_insn(env, s, insn);
5571 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5572 return disas_dsp_insn(env, s, insn);
5573 }
5574 return 1;
5575 case 10:
5576 case 11:
5577 return disas_vfp_insn (env, s, insn);
5578 case 15:
5579 return disas_cp15_insn (env, s, insn);
5580 default:
5581 /* Unknown coprocessor. See if the board has hooked it. */
5582 return disas_cp_insn (env, s, insn);
5583 }
5584}
5585
5e3f878a
PB
5586
5587/* Store a 64-bit value to a register pair. Clobbers val. */
5588static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5589{
5590 TCGv tmp;
5591 tmp = new_tmp();
5592 tcg_gen_trunc_i64_i32(tmp, val);
5593 store_reg(s, rlow, tmp);
5594 tmp = new_tmp();
5595 tcg_gen_shri_i64(val, val, 32);
5596 tcg_gen_trunc_i64_i32(tmp, val);
5597 store_reg(s, rhigh, tmp);
5598}
5599
5600/* load a 32-bit value from a register and perform a 64-bit accumulate. */
5601static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5602{
5603 TCGv tmp;
5604 TCGv tmp2;
5605
5606 /* Load 64-bit value rd:rn. */
5607 tmp = tcg_temp_new(TCG_TYPE_I64);
5608 tmp2 = load_reg(s, rlow);
5609 tcg_gen_extu_i32_i64(tmp, tmp2);
5610 dead_tmp(tmp2);
5611 tcg_gen_add_i64(val, val, tmp);
5612}
5613
5614/* load and add a 64-bit value from a register pair. */
5615static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5616{
5617 TCGv tmp;
5618 TCGv tmp2;
5619
5620 /* Load 64-bit value rd:rn. */
5621 tmp = tcg_temp_new(TCG_TYPE_I64);
5622 tmp2 = load_reg(s, rhigh);
5623 tcg_gen_extu_i32_i64(tmp, tmp2);
5624 dead_tmp(tmp2);
5625 tcg_gen_shli_i64(tmp, tmp, 32);
5626 tcg_gen_add_i64(val, val, tmp);
5627
5628 tmp2 = load_reg(s, rlow);
5629 tcg_gen_extu_i32_i64(tmp, tmp2);
5630 dead_tmp(tmp2);
5631 tcg_gen_add_i64(val, val, tmp);
5632}
5633
5634/* Set N and Z flags from a 64-bit value. */
5635static void gen_logicq_cc(TCGv val)
5636{
5637 TCGv tmp = new_tmp();
5638 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5639 gen_logic_CC(tmp);
5640 dead_tmp(tmp);
5e3f878a
PB
5641}
5642
9ee6e8bb
PB
5643static void disas_arm_insn(CPUState * env, DisasContext *s)
5644{
5645 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5646 TCGv tmp;
3670669c 5647 TCGv tmp2;
6ddbc6e4 5648 TCGv tmp3;
b0109805 5649 TCGv addr;
9ee6e8bb
PB
5650
5651 insn = ldl_code(s->pc);
5652 s->pc += 4;
5653
5654 /* M variants do not implement ARM mode. */
5655 if (IS_M(env))
5656 goto illegal_op;
5657 cond = insn >> 28;
5658 if (cond == 0xf){
5659 /* Unconditional instructions. */
5660 if (((insn >> 25) & 7) == 1) {
5661 /* NEON Data processing. */
5662 if (!arm_feature(env, ARM_FEATURE_NEON))
5663 goto illegal_op;
5664
5665 if (disas_neon_data_insn(env, s, insn))
5666 goto illegal_op;
5667 return;
5668 }
5669 if ((insn & 0x0f100000) == 0x04000000) {
5670 /* NEON load/store. */
5671 if (!arm_feature(env, ARM_FEATURE_NEON))
5672 goto illegal_op;
5673
5674 if (disas_neon_ls_insn(env, s, insn))
5675 goto illegal_op;
5676 return;
5677 }
5678 if ((insn & 0x0d70f000) == 0x0550f000)
5679 return; /* PLD */
5680 else if ((insn & 0x0ffffdff) == 0x01010000) {
5681 ARCH(6);
5682 /* setend */
5683 if (insn & (1 << 9)) {
5684 /* BE8 mode not implemented. */
5685 goto illegal_op;
5686 }
5687 return;
5688 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5689 switch ((insn >> 4) & 0xf) {
5690 case 1: /* clrex */
5691 ARCH(6K);
8f8e3aa4 5692 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5693 return;
5694 case 4: /* dsb */
5695 case 5: /* dmb */
5696 case 6: /* isb */
5697 ARCH(7);
5698 /* We don't emulate caches so these are a no-op. */
5699 return;
5700 default:
5701 goto illegal_op;
5702 }
5703 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5704 /* srs */
5705 uint32_t offset;
5706 if (IS_USER(s))
5707 goto illegal_op;
5708 ARCH(6);
5709 op1 = (insn & 0x1f);
5710 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5711 addr = load_reg(s, 13);
9ee6e8bb 5712 } else {
b0109805
PB
5713 addr = new_tmp();
5714 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5715 }
5716 i = (insn >> 23) & 3;
5717 switch (i) {
5718 case 0: offset = -4; break; /* DA */
5719 case 1: offset = -8; break; /* DB */
5720 case 2: offset = 0; break; /* IA */
5721 case 3: offset = 4; break; /* IB */
5722 default: abort();
5723 }
5724 if (offset)
b0109805
PB
5725 tcg_gen_addi_i32(addr, addr, offset);
5726 tmp = load_reg(s, 14);
5727 gen_st32(tmp, addr, 0);
5728 tmp = new_tmp();
5729 gen_helper_cpsr_read(tmp);
5730 tcg_gen_addi_i32(addr, addr, 4);
5731 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5732 if (insn & (1 << 21)) {
5733 /* Base writeback. */
5734 switch (i) {
5735 case 0: offset = -8; break;
5736 case 1: offset = -4; break;
5737 case 2: offset = 4; break;
5738 case 3: offset = 0; break;
5739 default: abort();
5740 }
5741 if (offset)
b0109805 5742 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5743 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5744 gen_movl_reg_T1(s, 13);
5745 } else {
b0109805 5746 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5747 }
b0109805
PB
5748 } else {
5749 dead_tmp(addr);
9ee6e8bb
PB
5750 }
5751 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5752 /* rfe */
5753 uint32_t offset;
5754 if (IS_USER(s))
5755 goto illegal_op;
5756 ARCH(6);
5757 rn = (insn >> 16) & 0xf;
b0109805 5758 addr = load_reg(s, rn);
9ee6e8bb
PB
5759 i = (insn >> 23) & 3;
5760 switch (i) {
b0109805
PB
5761 case 0: offset = -4; break; /* DA */
5762 case 1: offset = -8; break; /* DB */
5763 case 2: offset = 0; break; /* IA */
5764 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5765 default: abort();
5766 }
5767 if (offset)
b0109805
PB
5768 tcg_gen_addi_i32(addr, addr, offset);
5769 /* Load PC into tmp and CPSR into tmp2. */
5770 tmp = gen_ld32(addr, 0);
5771 tcg_gen_addi_i32(addr, addr, 4);
5772 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5773 if (insn & (1 << 21)) {
5774 /* Base writeback. */
5775 switch (i) {
b0109805
PB
5776 case 0: offset = -8; break;
5777 case 1: offset = -4; break;
5778 case 2: offset = 4; break;
5779 case 3: offset = 0; break;
9ee6e8bb
PB
5780 default: abort();
5781 }
5782 if (offset)
b0109805
PB
5783 tcg_gen_addi_i32(addr, addr, offset);
5784 store_reg(s, rn, addr);
5785 } else {
5786 dead_tmp(addr);
9ee6e8bb 5787 }
b0109805 5788 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5789 } else if ((insn & 0x0e000000) == 0x0a000000) {
5790 /* branch link and change to thumb (blx <offset>) */
5791 int32_t offset;
5792
5793 val = (uint32_t)s->pc;
d9ba4830
PB
5794 tmp = new_tmp();
5795 tcg_gen_movi_i32(tmp, val);
5796 store_reg(s, 14, tmp);
9ee6e8bb
PB
5797 /* Sign-extend the 24-bit offset */
5798 offset = (((int32_t)insn) << 8) >> 8;
5799 /* offset * 4 + bit24 * 2 + (thumb bit) */
5800 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5801 /* pipeline offset */
5802 val += 4;
d9ba4830 5803 gen_bx_im(s, val);
9ee6e8bb
PB
5804 return;
5805 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5806 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5807 /* iWMMXt register transfer. */
5808 if (env->cp15.c15_cpar & (1 << 1))
5809 if (!disas_iwmmxt_insn(env, s, insn))
5810 return;
5811 }
5812 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5813 /* Coprocessor double register transfer. */
5814 } else if ((insn & 0x0f000010) == 0x0e000010) {
5815 /* Additional coprocessor register transfer. */
5816 } else if ((insn & 0x0ff10010) == 0x01000000) {
5817 uint32_t mask;
5818 uint32_t val;
5819 /* cps (privileged) */
5820 if (IS_USER(s))
5821 return;
5822 mask = val = 0;
5823 if (insn & (1 << 19)) {
5824 if (insn & (1 << 8))
5825 mask |= CPSR_A;
5826 if (insn & (1 << 7))
5827 mask |= CPSR_I;
5828 if (insn & (1 << 6))
5829 mask |= CPSR_F;
5830 if (insn & (1 << 18))
5831 val |= mask;
5832 }
5833 if (insn & (1 << 14)) {
5834 mask |= CPSR_M;
5835 val |= (insn & 0x1f);
5836 }
5837 if (mask) {
5838 gen_op_movl_T0_im(val);
5839 gen_set_psr_T0(s, mask, 0);
5840 }
5841 return;
5842 }
5843 goto illegal_op;
5844 }
5845 if (cond != 0xe) {
5846 /* if not always execute, we generate a conditional jump to
5847 next instruction */
5848 s->condlabel = gen_new_label();
d9ba4830 5849 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5850 s->condjmp = 1;
5851 }
5852 if ((insn & 0x0f900000) == 0x03000000) {
5853 if ((insn & (1 << 21)) == 0) {
5854 ARCH(6T2);
5855 rd = (insn >> 12) & 0xf;
5856 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5857 if ((insn & (1 << 22)) == 0) {
5858 /* MOVW */
5e3f878a
PB
5859 tmp = new_tmp();
5860 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5861 } else {
5862 /* MOVT */
5e3f878a 5863 tmp = load_reg(s, rd);
86831435 5864 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5865 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5866 }
5e3f878a 5867 store_reg(s, rd, tmp);
9ee6e8bb
PB
5868 } else {
5869 if (((insn >> 12) & 0xf) != 0xf)
5870 goto illegal_op;
5871 if (((insn >> 16) & 0xf) == 0) {
5872 gen_nop_hint(s, insn & 0xff);
5873 } else {
5874 /* CPSR = immediate */
5875 val = insn & 0xff;
5876 shift = ((insn >> 8) & 0xf) * 2;
5877 if (shift)
5878 val = (val >> shift) | (val << (32 - shift));
5879 gen_op_movl_T0_im(val);
5880 i = ((insn & (1 << 22)) != 0);
5881 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5882 goto illegal_op;
5883 }
5884 }
5885 } else if ((insn & 0x0f900000) == 0x01000000
5886 && (insn & 0x00000090) != 0x00000090) {
5887 /* miscellaneous instructions */
5888 op1 = (insn >> 21) & 3;
5889 sh = (insn >> 4) & 0xf;
5890 rm = insn & 0xf;
5891 switch (sh) {
5892 case 0x0: /* move program status register */
5893 if (op1 & 1) {
5894 /* PSR = reg */
5895 gen_movl_T0_reg(s, rm);
5896 i = ((op1 & 2) != 0);
5897 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5898 goto illegal_op;
5899 } else {
5900 /* reg = PSR */
5901 rd = (insn >> 12) & 0xf;
5902 if (op1 & 2) {
5903 if (IS_USER(s))
5904 goto illegal_op;
d9ba4830 5905 tmp = load_cpu_field(spsr);
9ee6e8bb 5906 } else {
d9ba4830
PB
5907 tmp = new_tmp();
5908 gen_helper_cpsr_read(tmp);
9ee6e8bb 5909 }
d9ba4830 5910 store_reg(s, rd, tmp);
9ee6e8bb
PB
5911 }
5912 break;
5913 case 0x1:
5914 if (op1 == 1) {
5915 /* branch/exchange thumb (bx). */
d9ba4830
PB
5916 tmp = load_reg(s, rm);
5917 gen_bx(s, tmp);
9ee6e8bb
PB
5918 } else if (op1 == 3) {
5919 /* clz */
5920 rd = (insn >> 12) & 0xf;
1497c961
PB
5921 tmp = load_reg(s, rm);
5922 gen_helper_clz(tmp, tmp);
5923 store_reg(s, rd, tmp);
9ee6e8bb
PB
5924 } else {
5925 goto illegal_op;
5926 }
5927 break;
5928 case 0x2:
5929 if (op1 == 1) {
5930 ARCH(5J); /* bxj */
5931 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5932 tmp = load_reg(s, rm);
5933 gen_bx(s, tmp);
9ee6e8bb
PB
5934 } else {
5935 goto illegal_op;
5936 }
5937 break;
5938 case 0x3:
5939 if (op1 != 1)
5940 goto illegal_op;
5941
5942 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5943 tmp = load_reg(s, rm);
5944 tmp2 = new_tmp();
5945 tcg_gen_movi_i32(tmp2, s->pc);
5946 store_reg(s, 14, tmp2);
5947 gen_bx(s, tmp);
9ee6e8bb
PB
5948 break;
5949 case 0x5: /* saturating add/subtract */
5950 rd = (insn >> 12) & 0xf;
5951 rn = (insn >> 16) & 0xf;
5e3f878a
PB
5952 tmp = load_reg(s, rn);
5953 tmp2 = load_reg(s, rn);
9ee6e8bb 5954 if (op1 & 2)
5e3f878a 5955 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 5956 if (op1 & 1)
5e3f878a 5957 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 5958 else
5e3f878a
PB
5959 gen_helper_add_saturate(tmp, tmp, tmp2);
5960 dead_tmp(tmp2);
5961 store_reg(s, rd, tmp);
9ee6e8bb
PB
5962 break;
5963 case 7: /* bkpt */
5964 gen_set_condexec(s);
5e3f878a 5965 gen_set_pc_im(s->pc - 4);
d9ba4830 5966 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5967 s->is_jmp = DISAS_JUMP;
5968 break;
5969 case 0x8: /* signed multiply */
5970 case 0xa:
5971 case 0xc:
5972 case 0xe:
5973 rs = (insn >> 8) & 0xf;
5974 rn = (insn >> 12) & 0xf;
5975 rd = (insn >> 16) & 0xf;
5976 if (op1 == 1) {
5977 /* (32 * 16) >> 16 */
5e3f878a
PB
5978 tmp = load_reg(s, rm);
5979 tmp2 = load_reg(s, rs);
9ee6e8bb 5980 if (sh & 4)
5e3f878a 5981 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 5982 else
5e3f878a
PB
5983 gen_sxth(tmp2);
5984 tmp2 = gen_muls_i64_i32(tmp, tmp2);
5985 tcg_gen_shri_i64(tmp2, tmp2, 16);
5986 tmp = new_tmp();
5987 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 5988 if ((sh & 2) == 0) {
5e3f878a
PB
5989 tmp2 = load_reg(s, rn);
5990 gen_helper_add_setq(tmp, tmp, tmp2);
5991 dead_tmp(tmp2);
9ee6e8bb 5992 }
5e3f878a 5993 store_reg(s, rd, tmp);
9ee6e8bb
PB
5994 } else {
5995 /* 16 * 16 */
5e3f878a
PB
5996 tmp = load_reg(s, rm);
5997 tmp2 = load_reg(s, rs);
5998 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5999 dead_tmp(tmp2);
9ee6e8bb 6000 if (op1 == 2) {
22478e79
AZ
6001 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6002 tcg_gen_ext_i32_i64(tmp2, tmp);
6003 dead_tmp(tmp);
6004 gen_addq(s, tmp2, rn, rd);
6005 gen_storeq_reg(s, rn, rd, tmp2);
9ee6e8bb
PB
6006 } else {
6007 if (op1 == 0) {
5e3f878a
PB
6008 tmp2 = load_reg(s, rn);
6009 gen_helper_add_setq(tmp, tmp, tmp2);
6010 dead_tmp(tmp2);
9ee6e8bb 6011 }
5e3f878a 6012 store_reg(s, rd, tmp);
9ee6e8bb
PB
6013 }
6014 }
6015 break;
6016 default:
6017 goto illegal_op;
6018 }
6019 } else if (((insn & 0x0e000000) == 0 &&
6020 (insn & 0x00000090) != 0x90) ||
6021 ((insn & 0x0e000000) == (1 << 25))) {
6022 int set_cc, logic_cc, shiftop;
6023
6024 op1 = (insn >> 21) & 0xf;
6025 set_cc = (insn >> 20) & 1;
6026 logic_cc = table_logic_cc[op1] & set_cc;
6027
6028 /* data processing instruction */
6029 if (insn & (1 << 25)) {
6030 /* immediate operand */
6031 val = insn & 0xff;
6032 shift = ((insn >> 8) & 0xf) * 2;
6033 if (shift)
6034 val = (val >> shift) | (val << (32 - shift));
6035 gen_op_movl_T1_im(val);
6036 if (logic_cc && shift)
b26eefb6 6037 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6038 } else {
6039 /* register */
6040 rm = (insn) & 0xf;
6041 gen_movl_T1_reg(s, rm);
6042 shiftop = (insn >> 5) & 3;
6043 if (!(insn & (1 << 4))) {
6044 shift = (insn >> 7) & 0x1f;
9a119ff6 6045 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6046 } else {
6047 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6048 tmp = load_reg(s, rs);
6049 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6050 }
6051 }
6052 if (op1 != 0x0f && op1 != 0x0d) {
6053 rn = (insn >> 16) & 0xf;
6054 gen_movl_T0_reg(s, rn);
6055 }
6056 rd = (insn >> 12) & 0xf;
6057 switch(op1) {
6058 case 0x00:
6059 gen_op_andl_T0_T1();
6060 gen_movl_reg_T0(s, rd);
6061 if (logic_cc)
6062 gen_op_logic_T0_cc();
6063 break;
6064 case 0x01:
6065 gen_op_xorl_T0_T1();
6066 gen_movl_reg_T0(s, rd);
6067 if (logic_cc)
6068 gen_op_logic_T0_cc();
6069 break;
6070 case 0x02:
6071 if (set_cc && rd == 15) {
6072 /* SUBS r15, ... is used for exception return. */
6073 if (IS_USER(s))
6074 goto illegal_op;
6075 gen_op_subl_T0_T1_cc();
6076 gen_exception_return(s);
6077 } else {
6078 if (set_cc)
6079 gen_op_subl_T0_T1_cc();
6080 else
6081 gen_op_subl_T0_T1();
6082 gen_movl_reg_T0(s, rd);
6083 }
6084 break;
6085 case 0x03:
6086 if (set_cc)
6087 gen_op_rsbl_T0_T1_cc();
6088 else
6089 gen_op_rsbl_T0_T1();
6090 gen_movl_reg_T0(s, rd);
6091 break;
6092 case 0x04:
6093 if (set_cc)
6094 gen_op_addl_T0_T1_cc();
6095 else
6096 gen_op_addl_T0_T1();
6097 gen_movl_reg_T0(s, rd);
6098 break;
6099 case 0x05:
6100 if (set_cc)
6101 gen_op_adcl_T0_T1_cc();
6102 else
b26eefb6 6103 gen_adc_T0_T1();
9ee6e8bb
PB
6104 gen_movl_reg_T0(s, rd);
6105 break;
6106 case 0x06:
6107 if (set_cc)
6108 gen_op_sbcl_T0_T1_cc();
6109 else
3670669c 6110 gen_sbc_T0_T1();
9ee6e8bb
PB
6111 gen_movl_reg_T0(s, rd);
6112 break;
6113 case 0x07:
6114 if (set_cc)
6115 gen_op_rscl_T0_T1_cc();
6116 else
3670669c 6117 gen_rsc_T0_T1();
9ee6e8bb
PB
6118 gen_movl_reg_T0(s, rd);
6119 break;
6120 case 0x08:
6121 if (set_cc) {
6122 gen_op_andl_T0_T1();
6123 gen_op_logic_T0_cc();
6124 }
6125 break;
6126 case 0x09:
6127 if (set_cc) {
6128 gen_op_xorl_T0_T1();
6129 gen_op_logic_T0_cc();
6130 }
6131 break;
6132 case 0x0a:
6133 if (set_cc) {
6134 gen_op_subl_T0_T1_cc();
6135 }
6136 break;
6137 case 0x0b:
6138 if (set_cc) {
6139 gen_op_addl_T0_T1_cc();
6140 }
6141 break;
6142 case 0x0c:
6143 gen_op_orl_T0_T1();
6144 gen_movl_reg_T0(s, rd);
6145 if (logic_cc)
6146 gen_op_logic_T0_cc();
6147 break;
6148 case 0x0d:
6149 if (logic_cc && rd == 15) {
6150 /* MOVS r15, ... is used for exception return. */
6151 if (IS_USER(s))
6152 goto illegal_op;
6153 gen_op_movl_T0_T1();
6154 gen_exception_return(s);
6155 } else {
6156 gen_movl_reg_T1(s, rd);
6157 if (logic_cc)
6158 gen_op_logic_T1_cc();
6159 }
6160 break;
6161 case 0x0e:
6162 gen_op_bicl_T0_T1();
6163 gen_movl_reg_T0(s, rd);
6164 if (logic_cc)
6165 gen_op_logic_T0_cc();
6166 break;
6167 default:
6168 case 0x0f:
6169 gen_op_notl_T1();
6170 gen_movl_reg_T1(s, rd);
6171 if (logic_cc)
6172 gen_op_logic_T1_cc();
6173 break;
6174 }
6175 } else {
6176 /* other instructions */
6177 op1 = (insn >> 24) & 0xf;
6178 switch(op1) {
6179 case 0x0:
6180 case 0x1:
6181 /* multiplies, extra load/stores */
6182 sh = (insn >> 5) & 3;
6183 if (sh == 0) {
6184 if (op1 == 0x0) {
6185 rd = (insn >> 16) & 0xf;
6186 rn = (insn >> 12) & 0xf;
6187 rs = (insn >> 8) & 0xf;
6188 rm = (insn) & 0xf;
6189 op1 = (insn >> 20) & 0xf;
6190 switch (op1) {
6191 case 0: case 1: case 2: case 3: case 6:
6192 /* 32 bit mul */
5e3f878a
PB
6193 tmp = load_reg(s, rs);
6194 tmp2 = load_reg(s, rm);
6195 tcg_gen_mul_i32(tmp, tmp, tmp2);
6196 dead_tmp(tmp2);
9ee6e8bb
PB
6197 if (insn & (1 << 22)) {
6198 /* Subtract (mls) */
6199 ARCH(6T2);
5e3f878a
PB
6200 tmp2 = load_reg(s, rn);
6201 tcg_gen_sub_i32(tmp, tmp2, tmp);
6202 dead_tmp(tmp2);
9ee6e8bb
PB
6203 } else if (insn & (1 << 21)) {
6204 /* Add */
5e3f878a
PB
6205 tmp2 = load_reg(s, rn);
6206 tcg_gen_add_i32(tmp, tmp, tmp2);
6207 dead_tmp(tmp2);
9ee6e8bb
PB
6208 }
6209 if (insn & (1 << 20))
5e3f878a
PB
6210 gen_logic_CC(tmp);
6211 store_reg(s, rd, tmp);
9ee6e8bb
PB
6212 break;
6213 default:
6214 /* 64 bit mul */
5e3f878a
PB
6215 tmp = load_reg(s, rs);
6216 tmp2 = load_reg(s, rm);
9ee6e8bb 6217 if (insn & (1 << 22))
5e3f878a 6218 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6219 else
5e3f878a 6220 tmp = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6221 if (insn & (1 << 21)) /* mult accumulate */
5e3f878a 6222 gen_addq(s, tmp, rn, rd);
9ee6e8bb
PB
6223 if (!(insn & (1 << 23))) { /* double accumulate */
6224 ARCH(6);
5e3f878a
PB
6225 gen_addq_lo(s, tmp, rn);
6226 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
6227 }
6228 if (insn & (1 << 20))
5e3f878a
PB
6229 gen_logicq_cc(tmp);
6230 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
6231 break;
6232 }
6233 } else {
6234 rn = (insn >> 16) & 0xf;
6235 rd = (insn >> 12) & 0xf;
6236 if (insn & (1 << 23)) {
6237 /* load/store exclusive */
6238 gen_movl_T1_reg(s, rn);
72f1c62f 6239 addr = cpu_T[1];
9ee6e8bb 6240 if (insn & (1 << 20)) {
8f8e3aa4
PB
6241 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6242 tmp = gen_ld32(addr, IS_USER(s));
6243 store_reg(s, rd, tmp);
9ee6e8bb 6244 } else {
8f8e3aa4 6245 int label = gen_new_label();
9ee6e8bb 6246 rm = insn & 0xf;
8f8e3aa4 6247 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6248 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6249 0, label);
8f8e3aa4
PB
6250 tmp = load_reg(s,rm);
6251 gen_st32(tmp, cpu_T[1], IS_USER(s));
2637a3be 6252 gen_set_label(label);
8f8e3aa4 6253 gen_movl_reg_T0(s, rd);
9ee6e8bb 6254 }
9ee6e8bb
PB
6255 } else {
6256 /* SWP instruction */
6257 rm = (insn) & 0xf;
6258
8984bd2e
PB
6259 /* ??? This is not really atomic. However we know
6260 we never have multiple CPUs running in parallel,
6261 so it is good enough. */
6262 addr = load_reg(s, rn);
6263 tmp = load_reg(s, rm);
9ee6e8bb 6264 if (insn & (1 << 22)) {
8984bd2e
PB
6265 tmp2 = gen_ld8u(addr, IS_USER(s));
6266 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6267 } else {
8984bd2e
PB
6268 tmp2 = gen_ld32(addr, IS_USER(s));
6269 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6270 }
8984bd2e
PB
6271 dead_tmp(addr);
6272 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6273 }
6274 }
6275 } else {
6276 int address_offset;
6277 int load;
6278 /* Misc load/store */
6279 rn = (insn >> 16) & 0xf;
6280 rd = (insn >> 12) & 0xf;
b0109805 6281 addr = load_reg(s, rn);
9ee6e8bb 6282 if (insn & (1 << 24))
b0109805 6283 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6284 address_offset = 0;
6285 if (insn & (1 << 20)) {
6286 /* load */
6287 switch(sh) {
6288 case 1:
b0109805 6289 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6290 break;
6291 case 2:
b0109805 6292 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6293 break;
6294 default:
6295 case 3:
b0109805 6296 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6297 break;
6298 }
6299 load = 1;
6300 } else if (sh & 2) {
6301 /* doubleword */
6302 if (sh & 1) {
6303 /* store */
b0109805
PB
6304 tmp = load_reg(s, rd);
6305 gen_st32(tmp, addr, IS_USER(s));
6306 tcg_gen_addi_i32(addr, addr, 4);
6307 tmp = load_reg(s, rd + 1);
6308 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6309 load = 0;
6310 } else {
6311 /* load */
b0109805
PB
6312 tmp = gen_ld32(addr, IS_USER(s));
6313 store_reg(s, rd, tmp);
6314 tcg_gen_addi_i32(addr, addr, 4);
6315 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6316 rd++;
6317 load = 1;
6318 }
6319 address_offset = -4;
6320 } else {
6321 /* store */
b0109805
PB
6322 tmp = load_reg(s, rd);
6323 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6324 load = 0;
6325 }
6326 /* Perform base writeback before the loaded value to
6327 ensure correct behavior with overlapping index registers.
6328 ldrd with base writeback is is undefined if the
6329 destination and index registers overlap. */
6330 if (!(insn & (1 << 24))) {
b0109805
PB
6331 gen_add_datah_offset(s, insn, address_offset, addr);
6332 store_reg(s, rn, addr);
9ee6e8bb
PB
6333 } else if (insn & (1 << 21)) {
6334 if (address_offset)
b0109805
PB
6335 tcg_gen_addi_i32(addr, addr, address_offset);
6336 store_reg(s, rn, addr);
6337 } else {
6338 dead_tmp(addr);
9ee6e8bb
PB
6339 }
6340 if (load) {
6341 /* Complete the load. */
b0109805 6342 store_reg(s, rd, tmp);
9ee6e8bb
PB
6343 }
6344 }
6345 break;
6346 case 0x4:
6347 case 0x5:
6348 goto do_ldst;
6349 case 0x6:
6350 case 0x7:
6351 if (insn & (1 << 4)) {
6352 ARCH(6);
6353 /* Armv6 Media instructions. */
6354 rm = insn & 0xf;
6355 rn = (insn >> 16) & 0xf;
2c0262af 6356 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6357 rs = (insn >> 8) & 0xf;
6358 switch ((insn >> 23) & 3) {
6359 case 0: /* Parallel add/subtract. */
6360 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6361 tmp = load_reg(s, rn);
6362 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6363 sh = (insn >> 5) & 7;
6364 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6365 goto illegal_op;
6ddbc6e4
PB
6366 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6367 dead_tmp(tmp2);
6368 store_reg(s, rd, tmp);
9ee6e8bb
PB
6369 break;
6370 case 1:
6371 if ((insn & 0x00700020) == 0) {
6c95676b 6372 /* Halfword pack. */
3670669c
PB
6373 tmp = load_reg(s, rn);
6374 tmp2 = load_reg(s, rm);
9ee6e8bb 6375 shift = (insn >> 7) & 0x1f;
3670669c
PB
6376 if (insn & (1 << 6)) {
6377 /* pkhtb */
22478e79
AZ
6378 if (shift == 0)
6379 shift = 31;
6380 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6381 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6382 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6383 } else {
6384 /* pkhbt */
22478e79
AZ
6385 if (shift)
6386 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6387 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6388 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6389 }
6390 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6391 dead_tmp(tmp2);
3670669c 6392 store_reg(s, rd, tmp);
9ee6e8bb
PB
6393 } else if ((insn & 0x00200020) == 0x00200000) {
6394 /* [us]sat */
6ddbc6e4 6395 tmp = load_reg(s, rm);
9ee6e8bb
PB
6396 shift = (insn >> 7) & 0x1f;
6397 if (insn & (1 << 6)) {
6398 if (shift == 0)
6399 shift = 31;
6ddbc6e4 6400 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6401 } else {
6ddbc6e4 6402 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6403 }
6404 sh = (insn >> 16) & 0x1f;
6405 if (sh != 0) {
6406 if (insn & (1 << 22))
6ddbc6e4 6407 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6408 else
6ddbc6e4 6409 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6410 }
6ddbc6e4 6411 store_reg(s, rd, tmp);
9ee6e8bb
PB
6412 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6413 /* [us]sat16 */
6ddbc6e4 6414 tmp = load_reg(s, rm);
9ee6e8bb
PB
6415 sh = (insn >> 16) & 0x1f;
6416 if (sh != 0) {
6417 if (insn & (1 << 22))
6ddbc6e4 6418 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6419 else
6ddbc6e4 6420 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6421 }
6ddbc6e4 6422 store_reg(s, rd, tmp);
9ee6e8bb
PB
6423 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6424 /* Select bytes. */
6ddbc6e4
PB
6425 tmp = load_reg(s, rn);
6426 tmp2 = load_reg(s, rm);
6427 tmp3 = new_tmp();
6428 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6429 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6430 dead_tmp(tmp3);
6431 dead_tmp(tmp2);
6432 store_reg(s, rd, tmp);
9ee6e8bb 6433 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6434 tmp = load_reg(s, rm);
9ee6e8bb
PB
6435 shift = (insn >> 10) & 3;
6436 /* ??? In many cases it's not neccessary to do a
6437 rotate, a shift is sufficient. */
6438 if (shift != 0)
5e3f878a 6439 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6440 op1 = (insn >> 20) & 7;
6441 switch (op1) {
5e3f878a
PB
6442 case 0: gen_sxtb16(tmp); break;
6443 case 2: gen_sxtb(tmp); break;
6444 case 3: gen_sxth(tmp); break;
6445 case 4: gen_uxtb16(tmp); break;
6446 case 6: gen_uxtb(tmp); break;
6447 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6448 default: goto illegal_op;
6449 }
6450 if (rn != 15) {
5e3f878a 6451 tmp2 = load_reg(s, rn);
9ee6e8bb 6452 if ((op1 & 3) == 0) {
5e3f878a 6453 gen_add16(tmp, tmp2);
9ee6e8bb 6454 } else {
5e3f878a
PB
6455 tcg_gen_add_i32(tmp, tmp, tmp2);
6456 dead_tmp(tmp2);
9ee6e8bb
PB
6457 }
6458 }
6c95676b 6459 store_reg(s, rd, tmp);
9ee6e8bb
PB
6460 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6461 /* rev */
b0109805 6462 tmp = load_reg(s, rm);
9ee6e8bb
PB
6463 if (insn & (1 << 22)) {
6464 if (insn & (1 << 7)) {
b0109805 6465 gen_revsh(tmp);
9ee6e8bb
PB
6466 } else {
6467 ARCH(6T2);
b0109805 6468 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6469 }
6470 } else {
6471 if (insn & (1 << 7))
b0109805 6472 gen_rev16(tmp);
9ee6e8bb 6473 else
b0109805 6474 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6475 }
b0109805 6476 store_reg(s, rd, tmp);
9ee6e8bb
PB
6477 } else {
6478 goto illegal_op;
6479 }
6480 break;
6481 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6482 tmp = load_reg(s, rm);
6483 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6484 if (insn & (1 << 20)) {
6485 /* Signed multiply most significant [accumulate]. */
5e3f878a 6486 tmp2 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6487 if (insn & (1 << 5))
5e3f878a
PB
6488 tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6489 tcg_gen_shri_i64(tmp2, tmp2, 32);
6490 tmp = new_tmp();
6491 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 6492 if (rn != 15) {
5e3f878a 6493 tmp2 = load_reg(s, rn);
9ee6e8bb 6494 if (insn & (1 << 6)) {
5e3f878a 6495 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6496 } else {
5e3f878a 6497 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6498 }
5e3f878a 6499 dead_tmp(tmp2);
9ee6e8bb 6500 }
5e3f878a 6501 store_reg(s, rd, tmp);
9ee6e8bb
PB
6502 } else {
6503 if (insn & (1 << 5))
5e3f878a
PB
6504 gen_swap_half(tmp2);
6505 gen_smul_dual(tmp, tmp2);
6506 /* This addition cannot overflow. */
6507 if (insn & (1 << 6)) {
6508 tcg_gen_sub_i32(tmp, tmp, tmp2);
6509 } else {
6510 tcg_gen_add_i32(tmp, tmp, tmp2);
6511 }
6512 dead_tmp(tmp2);
9ee6e8bb 6513 if (insn & (1 << 22)) {
5e3f878a
PB
6514 /* smlald, smlsld */
6515 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6516 tcg_gen_ext_i32_i64(tmp2, tmp);
6517 dead_tmp(tmp);
22478e79
AZ
6518 gen_addq(s, tmp2, rd, rn);
6519 gen_storeq_reg(s, rd, rn, tmp2);
9ee6e8bb 6520 } else {
5e3f878a 6521 /* smuad, smusd, smlad, smlsd */
22478e79 6522 if (rd != 15)
9ee6e8bb 6523 {
22478e79 6524 tmp2 = load_reg(s, rd);
5e3f878a
PB
6525 gen_helper_add_setq(tmp, tmp, tmp2);
6526 dead_tmp(tmp2);
9ee6e8bb 6527 }
22478e79 6528 store_reg(s, rn, tmp);
9ee6e8bb
PB
6529 }
6530 }
6531 break;
6532 case 3:
6533 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6534 switch (op1) {
6535 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6536 ARCH(6);
6537 tmp = load_reg(s, rm);
6538 tmp2 = load_reg(s, rs);
6539 gen_helper_usad8(tmp, tmp, tmp2);
6540 dead_tmp(tmp2);
9ee6e8bb 6541 if (rn != 15) {
6ddbc6e4
PB
6542 tmp2 = load_reg(s, rn);
6543 tcg_gen_add_i32(tmp, tmp, tmp2);
6544 dead_tmp(tmp2);
9ee6e8bb 6545 }
6ddbc6e4 6546 store_reg(s, rd, tmp);
9ee6e8bb
PB
6547 break;
6548 case 0x20: case 0x24: case 0x28: case 0x2c:
6549 /* Bitfield insert/clear. */
6550 ARCH(6T2);
6551 shift = (insn >> 7) & 0x1f;
6552 i = (insn >> 16) & 0x1f;
6553 i = i + 1 - shift;
6554 if (rm == 15) {
5e3f878a
PB
6555 tmp = new_tmp();
6556 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6557 } else {
5e3f878a 6558 tmp = load_reg(s, rm);
9ee6e8bb
PB
6559 }
6560 if (i != 32) {
5e3f878a 6561 tmp2 = load_reg(s, rd);
8f8e3aa4 6562 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6563 dead_tmp(tmp2);
9ee6e8bb 6564 }
5e3f878a 6565 store_reg(s, rd, tmp);
9ee6e8bb
PB
6566 break;
6567 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6568 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5e3f878a 6569 tmp = load_reg(s, rm);
9ee6e8bb
PB
6570 shift = (insn >> 7) & 0x1f;
6571 i = ((insn >> 16) & 0x1f) + 1;
6572 if (shift + i > 32)
6573 goto illegal_op;
6574 if (i < 32) {
6575 if (op1 & 0x20) {
5e3f878a 6576 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6577 } else {
5e3f878a 6578 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6579 }
6580 }
5e3f878a 6581 store_reg(s, rd, tmp);
9ee6e8bb
PB
6582 break;
6583 default:
6584 goto illegal_op;
6585 }
6586 break;
6587 }
6588 break;
6589 }
6590 do_ldst:
6591 /* Check for undefined extension instructions
6592 * per the ARM Bible IE:
6593 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6594 */
6595 sh = (0xf << 20) | (0xf << 4);
6596 if (op1 == 0x7 && ((insn & sh) == sh))
6597 {
6598 goto illegal_op;
6599 }
6600 /* load/store byte/word */
6601 rn = (insn >> 16) & 0xf;
6602 rd = (insn >> 12) & 0xf;
b0109805 6603 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6604 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6605 if (insn & (1 << 24))
b0109805 6606 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6607 if (insn & (1 << 20)) {
6608 /* load */
6609 s->is_mem = 1;
9ee6e8bb 6610 if (insn & (1 << 22)) {
b0109805 6611 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6612 } else {
b0109805 6613 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6614 }
9ee6e8bb
PB
6615 } else {
6616 /* store */
b0109805 6617 tmp = load_reg(s, rd);
9ee6e8bb 6618 if (insn & (1 << 22))
b0109805 6619 gen_st8(tmp, tmp2, i);
9ee6e8bb 6620 else
b0109805 6621 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6622 }
6623 if (!(insn & (1 << 24))) {
b0109805
PB
6624 gen_add_data_offset(s, insn, tmp2);
6625 store_reg(s, rn, tmp2);
6626 } else if (insn & (1 << 21)) {
6627 store_reg(s, rn, tmp2);
6628 } else {
6629 dead_tmp(tmp2);
9ee6e8bb
PB
6630 }
6631 if (insn & (1 << 20)) {
6632 /* Complete the load. */
6633 if (rd == 15)
b0109805 6634 gen_bx(s, tmp);
9ee6e8bb 6635 else
b0109805 6636 store_reg(s, rd, tmp);
9ee6e8bb
PB
6637 }
6638 break;
6639 case 0x08:
6640 case 0x09:
6641 {
6642 int j, n, user, loaded_base;
b0109805 6643 TCGv loaded_var;
9ee6e8bb
PB
6644 /* load/store multiple words */
6645 /* XXX: store correct base if write back */
6646 user = 0;
6647 if (insn & (1 << 22)) {
6648 if (IS_USER(s))
6649 goto illegal_op; /* only usable in supervisor mode */
6650
6651 if ((insn & (1 << 15)) == 0)
6652 user = 1;
6653 }
6654 rn = (insn >> 16) & 0xf;
b0109805 6655 addr = load_reg(s, rn);
9ee6e8bb
PB
6656
6657 /* compute total size */
6658 loaded_base = 0;
a50f5b91 6659 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6660 n = 0;
6661 for(i=0;i<16;i++) {
6662 if (insn & (1 << i))
6663 n++;
6664 }
6665 /* XXX: test invalid n == 0 case ? */
6666 if (insn & (1 << 23)) {
6667 if (insn & (1 << 24)) {
6668 /* pre increment */
b0109805 6669 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6670 } else {
6671 /* post increment */
6672 }
6673 } else {
6674 if (insn & (1 << 24)) {
6675 /* pre decrement */
b0109805 6676 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6677 } else {
6678 /* post decrement */
6679 if (n != 1)
b0109805 6680 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6681 }
6682 }
6683 j = 0;
6684 for(i=0;i<16;i++) {
6685 if (insn & (1 << i)) {
6686 if (insn & (1 << 20)) {
6687 /* load */
b0109805 6688 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6689 if (i == 15) {
b0109805 6690 gen_bx(s, tmp);
9ee6e8bb 6691 } else if (user) {
b0109805
PB
6692 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6693 dead_tmp(tmp);
9ee6e8bb 6694 } else if (i == rn) {
b0109805 6695 loaded_var = tmp;
9ee6e8bb
PB
6696 loaded_base = 1;
6697 } else {
b0109805 6698 store_reg(s, i, tmp);
9ee6e8bb
PB
6699 }
6700 } else {
6701 /* store */
6702 if (i == 15) {
6703 /* special case: r15 = PC + 8 */
6704 val = (long)s->pc + 4;
b0109805
PB
6705 tmp = new_tmp();
6706 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6707 } else if (user) {
b0109805
PB
6708 tmp = new_tmp();
6709 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6710 } else {
b0109805 6711 tmp = load_reg(s, i);
9ee6e8bb 6712 }
b0109805 6713 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6714 }
6715 j++;
6716 /* no need to add after the last transfer */
6717 if (j != n)
b0109805 6718 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6719 }
6720 }
6721 if (insn & (1 << 21)) {
6722 /* write back */
6723 if (insn & (1 << 23)) {
6724 if (insn & (1 << 24)) {
6725 /* pre increment */
6726 } else {
6727 /* post increment */
b0109805 6728 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6729 }
6730 } else {
6731 if (insn & (1 << 24)) {
6732 /* pre decrement */
6733 if (n != 1)
b0109805 6734 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6735 } else {
6736 /* post decrement */
b0109805 6737 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6738 }
6739 }
b0109805
PB
6740 store_reg(s, rn, addr);
6741 } else {
6742 dead_tmp(addr);
9ee6e8bb
PB
6743 }
6744 if (loaded_base) {
b0109805 6745 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6746 }
6747 if ((insn & (1 << 22)) && !user) {
6748 /* Restore CPSR from SPSR. */
d9ba4830
PB
6749 tmp = load_cpu_field(spsr);
6750 gen_set_cpsr(tmp, 0xffffffff);
6751 dead_tmp(tmp);
9ee6e8bb
PB
6752 s->is_jmp = DISAS_UPDATE;
6753 }
6754 }
6755 break;
6756 case 0xa:
6757 case 0xb:
6758 {
6759 int32_t offset;
6760
6761 /* branch (and link) */
6762 val = (int32_t)s->pc;
6763 if (insn & (1 << 24)) {
5e3f878a
PB
6764 tmp = new_tmp();
6765 tcg_gen_movi_i32(tmp, val);
6766 store_reg(s, 14, tmp);
9ee6e8bb
PB
6767 }
6768 offset = (((int32_t)insn << 8) >> 8);
6769 val += (offset << 2) + 4;
6770 gen_jmp(s, val);
6771 }
6772 break;
6773 case 0xc:
6774 case 0xd:
6775 case 0xe:
6776 /* Coprocessor. */
6777 if (disas_coproc_insn(env, s, insn))
6778 goto illegal_op;
6779 break;
6780 case 0xf:
6781 /* swi */
5e3f878a 6782 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6783 s->is_jmp = DISAS_SWI;
6784 break;
6785 default:
6786 illegal_op:
6787 gen_set_condexec(s);
5e3f878a 6788 gen_set_pc_im(s->pc - 4);
d9ba4830 6789 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6790 s->is_jmp = DISAS_JUMP;
6791 break;
6792 }
6793 }
6794}
6795
6796/* Return true if this is a Thumb-2 logical op. */
6797static int
6798thumb2_logic_op(int op)
6799{
6800 return (op < 8);
6801}
6802
6803/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6804 then set condition code flags based on the result of the operation.
6805 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6806 to the high bit of T1.
6807 Returns zero if the opcode is valid. */
6808
6809static int
6810gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6811{
6812 int logic_cc;
6813
6814 logic_cc = 0;
6815 switch (op) {
6816 case 0: /* and */
6817 gen_op_andl_T0_T1();
6818 logic_cc = conds;
6819 break;
6820 case 1: /* bic */
6821 gen_op_bicl_T0_T1();
6822 logic_cc = conds;
6823 break;
6824 case 2: /* orr */
6825 gen_op_orl_T0_T1();
6826 logic_cc = conds;
6827 break;
6828 case 3: /* orn */
6829 gen_op_notl_T1();
6830 gen_op_orl_T0_T1();
6831 logic_cc = conds;
6832 break;
6833 case 4: /* eor */
6834 gen_op_xorl_T0_T1();
6835 logic_cc = conds;
6836 break;
6837 case 8: /* add */
6838 if (conds)
6839 gen_op_addl_T0_T1_cc();
6840 else
6841 gen_op_addl_T0_T1();
6842 break;
6843 case 10: /* adc */
6844 if (conds)
6845 gen_op_adcl_T0_T1_cc();
6846 else
b26eefb6 6847 gen_adc_T0_T1();
9ee6e8bb
PB
6848 break;
6849 case 11: /* sbc */
6850 if (conds)
6851 gen_op_sbcl_T0_T1_cc();
6852 else
3670669c 6853 gen_sbc_T0_T1();
9ee6e8bb
PB
6854 break;
6855 case 13: /* sub */
6856 if (conds)
6857 gen_op_subl_T0_T1_cc();
6858 else
6859 gen_op_subl_T0_T1();
6860 break;
6861 case 14: /* rsb */
6862 if (conds)
6863 gen_op_rsbl_T0_T1_cc();
6864 else
6865 gen_op_rsbl_T0_T1();
6866 break;
6867 default: /* 5, 6, 7, 9, 12, 15. */
6868 return 1;
6869 }
6870 if (logic_cc) {
6871 gen_op_logic_T0_cc();
6872 if (shifter_out)
b26eefb6 6873 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6874 }
6875 return 0;
6876}
6877
6878/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6879 is not legal. */
6880static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6881{
b0109805 6882 uint32_t insn, imm, shift, offset;
9ee6e8bb 6883 uint32_t rd, rn, rm, rs;
b26eefb6 6884 TCGv tmp;
6ddbc6e4
PB
6885 TCGv tmp2;
6886 TCGv tmp3;
b0109805 6887 TCGv addr;
9ee6e8bb
PB
6888 int op;
6889 int shiftop;
6890 int conds;
6891 int logic_cc;
6892
6893 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6894 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 6895 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
6896 16-bit instructions to get correct prefetch abort behavior. */
6897 insn = insn_hw1;
6898 if ((insn & (1 << 12)) == 0) {
6899 /* Second half of blx. */
6900 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6901 tmp = load_reg(s, 14);
6902 tcg_gen_addi_i32(tmp, tmp, offset);
6903 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6904
d9ba4830 6905 tmp2 = new_tmp();
b0109805 6906 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6907 store_reg(s, 14, tmp2);
6908 gen_bx(s, tmp);
9ee6e8bb
PB
6909 return 0;
6910 }
6911 if (insn & (1 << 11)) {
6912 /* Second half of bl. */
6913 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 6914 tmp = load_reg(s, 14);
6a0d8a1d 6915 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 6916
d9ba4830 6917 tmp2 = new_tmp();
b0109805 6918 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6919 store_reg(s, 14, tmp2);
6920 gen_bx(s, tmp);
9ee6e8bb
PB
6921 return 0;
6922 }
6923 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6924 /* Instruction spans a page boundary. Implement it as two
6925 16-bit instructions in case the second half causes an
6926 prefetch abort. */
6927 offset = ((int32_t)insn << 21) >> 9;
b0109805 6928 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6929 gen_movl_reg_T0(s, 14);
6930 return 0;
6931 }
6932 /* Fall through to 32-bit decode. */
6933 }
6934
6935 insn = lduw_code(s->pc);
6936 s->pc += 2;
6937 insn |= (uint32_t)insn_hw1 << 16;
6938
6939 if ((insn & 0xf800e800) != 0xf000e800) {
6940 ARCH(6T2);
6941 }
6942
6943 rn = (insn >> 16) & 0xf;
6944 rs = (insn >> 12) & 0xf;
6945 rd = (insn >> 8) & 0xf;
6946 rm = insn & 0xf;
6947 switch ((insn >> 25) & 0xf) {
6948 case 0: case 1: case 2: case 3:
6949 /* 16-bit instructions. Should never happen. */
6950 abort();
6951 case 4:
6952 if (insn & (1 << 22)) {
6953 /* Other load/store, table branch. */
6954 if (insn & 0x01200000) {
6955 /* Load/store doubleword. */
6956 if (rn == 15) {
b0109805
PB
6957 addr = new_tmp();
6958 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 6959 } else {
b0109805 6960 addr = load_reg(s, rn);
9ee6e8bb
PB
6961 }
6962 offset = (insn & 0xff) * 4;
6963 if ((insn & (1 << 23)) == 0)
6964 offset = -offset;
6965 if (insn & (1 << 24)) {
b0109805 6966 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
6967 offset = 0;
6968 }
6969 if (insn & (1 << 20)) {
6970 /* ldrd */
b0109805
PB
6971 tmp = gen_ld32(addr, IS_USER(s));
6972 store_reg(s, rs, tmp);
6973 tcg_gen_addi_i32(addr, addr, 4);
6974 tmp = gen_ld32(addr, IS_USER(s));
6975 store_reg(s, rd, tmp);
9ee6e8bb
PB
6976 } else {
6977 /* strd */
b0109805
PB
6978 tmp = load_reg(s, rs);
6979 gen_st32(tmp, addr, IS_USER(s));
6980 tcg_gen_addi_i32(addr, addr, 4);
6981 tmp = load_reg(s, rd);
6982 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6983 }
6984 if (insn & (1 << 21)) {
6985 /* Base writeback. */
6986 if (rn == 15)
6987 goto illegal_op;
b0109805
PB
6988 tcg_gen_addi_i32(addr, addr, offset - 4);
6989 store_reg(s, rn, addr);
6990 } else {
6991 dead_tmp(addr);
9ee6e8bb
PB
6992 }
6993 } else if ((insn & (1 << 23)) == 0) {
6994 /* Load/store exclusive word. */
2c0262af 6995 gen_movl_T1_reg(s, rn);
72f1c62f 6996 addr = cpu_T[1];
2c0262af 6997 if (insn & (1 << 20)) {
8f8e3aa4
PB
6998 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6999 tmp = gen_ld32(addr, IS_USER(s));
7000 store_reg(s, rd, tmp);
9ee6e8bb 7001 } else {
8f8e3aa4
PB
7002 int label = gen_new_label();
7003 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7004 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7005 0, label);
8f8e3aa4
PB
7006 tmp = load_reg(s, rs);
7007 gen_st32(tmp, cpu_T[1], IS_USER(s));
7008 gen_set_label(label);
7009 gen_movl_reg_T0(s, rd);
9ee6e8bb 7010 }
9ee6e8bb
PB
7011 } else if ((insn & (1 << 6)) == 0) {
7012 /* Table Branch. */
7013 if (rn == 15) {
b0109805
PB
7014 addr = new_tmp();
7015 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7016 } else {
b0109805 7017 addr = load_reg(s, rn);
9ee6e8bb 7018 }
b26eefb6 7019 tmp = load_reg(s, rm);
b0109805 7020 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7021 if (insn & (1 << 4)) {
7022 /* tbh */
b0109805 7023 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7024 dead_tmp(tmp);
b0109805 7025 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7026 } else { /* tbb */
b26eefb6 7027 dead_tmp(tmp);
b0109805 7028 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7029 }
b0109805
PB
7030 dead_tmp(addr);
7031 tcg_gen_shli_i32(tmp, tmp, 1);
7032 tcg_gen_addi_i32(tmp, tmp, s->pc);
7033 store_reg(s, 15, tmp);
9ee6e8bb
PB
7034 } else {
7035 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7036 /* ??? These are not really atomic. However we know
7037 we never have multiple CPUs running in parallel,
7038 so it is good enough. */
9ee6e8bb 7039 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7040 /* Must use a global reg for the address because we have
7041 a conditional branch in the store instruction. */
9ee6e8bb 7042 gen_movl_T1_reg(s, rn);
8f8e3aa4 7043 addr = cpu_T[1];
9ee6e8bb 7044 if (insn & (1 << 20)) {
8f8e3aa4 7045 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7046 switch (op) {
7047 case 0:
8f8e3aa4 7048 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7049 break;
2c0262af 7050 case 1:
8f8e3aa4 7051 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7052 break;
9ee6e8bb 7053 case 3:
8f8e3aa4
PB
7054 tmp = gen_ld32(addr, IS_USER(s));
7055 tcg_gen_addi_i32(addr, addr, 4);
7056 tmp2 = gen_ld32(addr, IS_USER(s));
7057 store_reg(s, rd, tmp2);
2c0262af
FB
7058 break;
7059 default:
9ee6e8bb
PB
7060 goto illegal_op;
7061 }
8f8e3aa4 7062 store_reg(s, rs, tmp);
9ee6e8bb 7063 } else {
8f8e3aa4
PB
7064 int label = gen_new_label();
7065 /* Must use a global that is not killed by the branch. */
7066 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7067 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7068 tmp = load_reg(s, rs);
9ee6e8bb
PB
7069 switch (op) {
7070 case 0:
8f8e3aa4 7071 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7072 break;
7073 case 1:
8f8e3aa4 7074 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7075 break;
2c0262af 7076 case 3:
8f8e3aa4
PB
7077 gen_st32(tmp, addr, IS_USER(s));
7078 tcg_gen_addi_i32(addr, addr, 4);
7079 tmp = load_reg(s, rd);
7080 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7081 break;
9ee6e8bb
PB
7082 default:
7083 goto illegal_op;
2c0262af 7084 }
8f8e3aa4 7085 gen_set_label(label);
9ee6e8bb
PB
7086 gen_movl_reg_T0(s, rm);
7087 }
7088 }
7089 } else {
7090 /* Load/store multiple, RFE, SRS. */
7091 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7092 /* Not available in user mode. */
b0109805 7093 if (IS_USER(s))
9ee6e8bb
PB
7094 goto illegal_op;
7095 if (insn & (1 << 20)) {
7096 /* rfe */
b0109805
PB
7097 addr = load_reg(s, rn);
7098 if ((insn & (1 << 24)) == 0)
7099 tcg_gen_addi_i32(addr, addr, -8);
7100 /* Load PC into tmp and CPSR into tmp2. */
7101 tmp = gen_ld32(addr, 0);
7102 tcg_gen_addi_i32(addr, addr, 4);
7103 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7104 if (insn & (1 << 21)) {
7105 /* Base writeback. */
b0109805
PB
7106 if (insn & (1 << 24)) {
7107 tcg_gen_addi_i32(addr, addr, 4);
7108 } else {
7109 tcg_gen_addi_i32(addr, addr, -4);
7110 }
7111 store_reg(s, rn, addr);
7112 } else {
7113 dead_tmp(addr);
9ee6e8bb 7114 }
b0109805 7115 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7116 } else {
7117 /* srs */
7118 op = (insn & 0x1f);
7119 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7120 addr = load_reg(s, 13);
9ee6e8bb 7121 } else {
b0109805
PB
7122 addr = new_tmp();
7123 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7124 }
7125 if ((insn & (1 << 24)) == 0) {
b0109805 7126 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7127 }
b0109805
PB
7128 tmp = load_reg(s, 14);
7129 gen_st32(tmp, addr, 0);
7130 tcg_gen_addi_i32(addr, addr, 4);
7131 tmp = new_tmp();
7132 gen_helper_cpsr_read(tmp);
7133 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7134 if (insn & (1 << 21)) {
7135 if ((insn & (1 << 24)) == 0) {
b0109805 7136 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7137 } else {
b0109805 7138 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7139 }
7140 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7141 store_reg(s, 13, addr);
9ee6e8bb 7142 } else {
b0109805
PB
7143 gen_helper_set_r13_banked(cpu_env,
7144 tcg_const_i32(op), addr);
9ee6e8bb 7145 }
b0109805
PB
7146 } else {
7147 dead_tmp(addr);
9ee6e8bb
PB
7148 }
7149 }
7150 } else {
7151 int i;
7152 /* Load/store multiple. */
b0109805 7153 addr = load_reg(s, rn);
9ee6e8bb
PB
7154 offset = 0;
7155 for (i = 0; i < 16; i++) {
7156 if (insn & (1 << i))
7157 offset += 4;
7158 }
7159 if (insn & (1 << 24)) {
b0109805 7160 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7161 }
7162
7163 for (i = 0; i < 16; i++) {
7164 if ((insn & (1 << i)) == 0)
7165 continue;
7166 if (insn & (1 << 20)) {
7167 /* Load. */
b0109805 7168 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7169 if (i == 15) {
b0109805 7170 gen_bx(s, tmp);
9ee6e8bb 7171 } else {
b0109805 7172 store_reg(s, i, tmp);
9ee6e8bb
PB
7173 }
7174 } else {
7175 /* Store. */
b0109805
PB
7176 tmp = load_reg(s, i);
7177 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7178 }
b0109805 7179 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7180 }
7181 if (insn & (1 << 21)) {
7182 /* Base register writeback. */
7183 if (insn & (1 << 24)) {
b0109805 7184 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7185 }
7186 /* Fault if writeback register is in register list. */
7187 if (insn & (1 << rn))
7188 goto illegal_op;
b0109805
PB
7189 store_reg(s, rn, addr);
7190 } else {
7191 dead_tmp(addr);
9ee6e8bb
PB
7192 }
7193 }
7194 }
7195 break;
7196 case 5: /* Data processing register constant shift. */
7197 if (rn == 15)
7198 gen_op_movl_T0_im(0);
7199 else
7200 gen_movl_T0_reg(s, rn);
7201 gen_movl_T1_reg(s, rm);
7202 op = (insn >> 21) & 0xf;
7203 shiftop = (insn >> 4) & 3;
7204 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7205 conds = (insn & (1 << 20)) != 0;
7206 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7207 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7208 if (gen_thumb2_data_op(s, op, conds, 0))
7209 goto illegal_op;
7210 if (rd != 15)
7211 gen_movl_reg_T0(s, rd);
7212 break;
7213 case 13: /* Misc data processing. */
7214 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7215 if (op < 4 && (insn & 0xf000) != 0xf000)
7216 goto illegal_op;
7217 switch (op) {
7218 case 0: /* Register controlled shift. */
8984bd2e
PB
7219 tmp = load_reg(s, rn);
7220 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7221 if ((insn & 0x70) != 0)
7222 goto illegal_op;
7223 op = (insn >> 21) & 3;
8984bd2e
PB
7224 logic_cc = (insn & (1 << 20)) != 0;
7225 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7226 if (logic_cc)
7227 gen_logic_CC(tmp);
7228 store_reg(s, rd, tmp);
9ee6e8bb
PB
7229 break;
7230 case 1: /* Sign/zero extend. */
5e3f878a 7231 tmp = load_reg(s, rm);
9ee6e8bb
PB
7232 shift = (insn >> 4) & 3;
7233 /* ??? In many cases it's not neccessary to do a
7234 rotate, a shift is sufficient. */
7235 if (shift != 0)
5e3f878a 7236 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7237 op = (insn >> 20) & 7;
7238 switch (op) {
5e3f878a
PB
7239 case 0: gen_sxth(tmp); break;
7240 case 1: gen_uxth(tmp); break;
7241 case 2: gen_sxtb16(tmp); break;
7242 case 3: gen_uxtb16(tmp); break;
7243 case 4: gen_sxtb(tmp); break;
7244 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7245 default: goto illegal_op;
7246 }
7247 if (rn != 15) {
5e3f878a 7248 tmp2 = load_reg(s, rn);
9ee6e8bb 7249 if ((op >> 1) == 1) {
5e3f878a 7250 gen_add16(tmp, tmp2);
9ee6e8bb 7251 } else {
5e3f878a
PB
7252 tcg_gen_add_i32(tmp, tmp, tmp2);
7253 dead_tmp(tmp2);
9ee6e8bb
PB
7254 }
7255 }
5e3f878a 7256 store_reg(s, rd, tmp);
9ee6e8bb
PB
7257 break;
7258 case 2: /* SIMD add/subtract. */
7259 op = (insn >> 20) & 7;
7260 shift = (insn >> 4) & 7;
7261 if ((op & 3) == 3 || (shift & 3) == 3)
7262 goto illegal_op;
6ddbc6e4
PB
7263 tmp = load_reg(s, rn);
7264 tmp2 = load_reg(s, rm);
7265 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7266 dead_tmp(tmp2);
7267 store_reg(s, rd, tmp);
9ee6e8bb
PB
7268 break;
7269 case 3: /* Other data processing. */
7270 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7271 if (op < 4) {
7272 /* Saturating add/subtract. */
d9ba4830
PB
7273 tmp = load_reg(s, rn);
7274 tmp2 = load_reg(s, rm);
9ee6e8bb 7275 if (op & 2)
d9ba4830 7276 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7277 if (op & 1)
d9ba4830 7278 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7279 else
d9ba4830
PB
7280 gen_helper_add_saturate(tmp, tmp, tmp2);
7281 dead_tmp(tmp2);
9ee6e8bb 7282 } else {
d9ba4830 7283 tmp = load_reg(s, rn);
9ee6e8bb
PB
7284 switch (op) {
7285 case 0x0a: /* rbit */
d9ba4830 7286 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7287 break;
7288 case 0x08: /* rev */
d9ba4830 7289 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7290 break;
7291 case 0x09: /* rev16 */
d9ba4830 7292 gen_rev16(tmp);
9ee6e8bb
PB
7293 break;
7294 case 0x0b: /* revsh */
d9ba4830 7295 gen_revsh(tmp);
9ee6e8bb
PB
7296 break;
7297 case 0x10: /* sel */
d9ba4830 7298 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7299 tmp3 = new_tmp();
7300 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7301 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7302 dead_tmp(tmp3);
d9ba4830 7303 dead_tmp(tmp2);
9ee6e8bb
PB
7304 break;
7305 case 0x18: /* clz */
d9ba4830 7306 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7307 break;
7308 default:
7309 goto illegal_op;
7310 }
7311 }
d9ba4830 7312 store_reg(s, rd, tmp);
9ee6e8bb
PB
7313 break;
7314 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7315 op = (insn >> 4) & 0xf;
d9ba4830
PB
7316 tmp = load_reg(s, rn);
7317 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7318 switch ((insn >> 20) & 7) {
7319 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7320 tcg_gen_mul_i32(tmp, tmp, tmp2);
7321 dead_tmp(tmp2);
9ee6e8bb 7322 if (rs != 15) {
d9ba4830 7323 tmp2 = load_reg(s, rs);
9ee6e8bb 7324 if (op)
d9ba4830 7325 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7326 else
d9ba4830
PB
7327 tcg_gen_add_i32(tmp, tmp, tmp2);
7328 dead_tmp(tmp2);
9ee6e8bb 7329 }
9ee6e8bb
PB
7330 break;
7331 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7332 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7333 dead_tmp(tmp2);
9ee6e8bb 7334 if (rs != 15) {
d9ba4830
PB
7335 tmp2 = load_reg(s, rs);
7336 gen_helper_add_setq(tmp, tmp, tmp2);
7337 dead_tmp(tmp2);
9ee6e8bb 7338 }
9ee6e8bb
PB
7339 break;
7340 case 2: /* Dual multiply add. */
7341 case 4: /* Dual multiply subtract. */
7342 if (op)
d9ba4830
PB
7343 gen_swap_half(tmp2);
7344 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7345 /* This addition cannot overflow. */
7346 if (insn & (1 << 22)) {
d9ba4830 7347 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7348 } else {
d9ba4830 7349 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7350 }
d9ba4830 7351 dead_tmp(tmp2);
9ee6e8bb
PB
7352 if (rs != 15)
7353 {
d9ba4830
PB
7354 tmp2 = load_reg(s, rs);
7355 gen_helper_add_setq(tmp, tmp, tmp2);
7356 dead_tmp(tmp2);
9ee6e8bb 7357 }
9ee6e8bb
PB
7358 break;
7359 case 3: /* 32 * 16 -> 32msb */
7360 if (op)
d9ba4830 7361 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7362 else
d9ba4830 7363 gen_sxth(tmp2);
5e3f878a
PB
7364 tmp2 = gen_muls_i64_i32(tmp, tmp2);
7365 tcg_gen_shri_i64(tmp2, tmp2, 16);
7366 tmp = new_tmp();
7367 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb
PB
7368 if (rs != 15)
7369 {
d9ba4830
PB
7370 tmp2 = load_reg(s, rs);
7371 gen_helper_add_setq(tmp, tmp, tmp2);
7372 dead_tmp(tmp2);
9ee6e8bb 7373 }
9ee6e8bb
PB
7374 break;
7375 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7376 gen_imull(tmp, tmp2);
7377 if (insn & (1 << 5)) {
7378 gen_roundqd(tmp, tmp2);
7379 dead_tmp(tmp2);
7380 } else {
7381 dead_tmp(tmp);
7382 tmp = tmp2;
7383 }
9ee6e8bb 7384 if (rs != 15) {
d9ba4830 7385 tmp2 = load_reg(s, rs);
9ee6e8bb 7386 if (insn & (1 << 21)) {
d9ba4830 7387 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7388 } else {
d9ba4830 7389 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7390 }
d9ba4830 7391 dead_tmp(tmp2);
2c0262af 7392 }
9ee6e8bb
PB
7393 break;
7394 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7395 gen_helper_usad8(tmp, tmp, tmp2);
7396 dead_tmp(tmp2);
9ee6e8bb 7397 if (rs != 15) {
d9ba4830
PB
7398 tmp2 = load_reg(s, rs);
7399 tcg_gen_add_i32(tmp, tmp, tmp2);
7400 dead_tmp(tmp2);
5fd46862 7401 }
9ee6e8bb 7402 break;
2c0262af 7403 }
d9ba4830 7404 store_reg(s, rd, tmp);
2c0262af 7405 break;
9ee6e8bb
PB
7406 case 6: case 7: /* 64-bit multiply, Divide. */
7407 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7408 tmp = load_reg(s, rn);
7409 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7410 if ((op & 0x50) == 0x10) {
7411 /* sdiv, udiv */
7412 if (!arm_feature(env, ARM_FEATURE_DIV))
7413 goto illegal_op;
7414 if (op & 0x20)
5e3f878a 7415 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7416 else
5e3f878a
PB
7417 gen_helper_sdiv(tmp, tmp, tmp2);
7418 dead_tmp(tmp2);
7419 store_reg(s, rd, tmp);
9ee6e8bb
PB
7420 } else if ((op & 0xe) == 0xc) {
7421 /* Dual multiply accumulate long. */
7422 if (op & 1)
5e3f878a
PB
7423 gen_swap_half(tmp2);
7424 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7425 if (op & 0x10) {
5e3f878a 7426 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7427 } else {
5e3f878a 7428 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7429 }
5e3f878a
PB
7430 dead_tmp(tmp2);
7431 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7432 gen_addq(s, tmp, rs, rd);
7433 gen_storeq_reg(s, rs, rd, tmp);
2c0262af 7434 } else {
9ee6e8bb
PB
7435 if (op & 0x20) {
7436 /* Unsigned 64-bit multiply */
5e3f878a 7437 tmp = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7438 } else {
9ee6e8bb
PB
7439 if (op & 8) {
7440 /* smlalxy */
5e3f878a
PB
7441 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7442 dead_tmp(tmp2);
7443 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7444 tcg_gen_ext_i32_i64(tmp2, tmp);
7445 dead_tmp(tmp);
7446 tmp = tmp2;
9ee6e8bb
PB
7447 } else {
7448 /* Signed 64-bit multiply */
5e3f878a 7449 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7450 }
b5ff1b31 7451 }
9ee6e8bb
PB
7452 if (op & 4) {
7453 /* umaal */
5e3f878a
PB
7454 gen_addq_lo(s, tmp, rs);
7455 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
7456 } else if (op & 0x40) {
7457 /* 64-bit accumulate. */
5e3f878a 7458 gen_addq(s, tmp, rs, rd);
9ee6e8bb 7459 }
5e3f878a 7460 gen_storeq_reg(s, rs, rd, tmp);
5fd46862 7461 }
2c0262af 7462 break;
9ee6e8bb
PB
7463 }
7464 break;
7465 case 6: case 7: case 14: case 15:
7466 /* Coprocessor. */
7467 if (((insn >> 24) & 3) == 3) {
7468 /* Translate into the equivalent ARM encoding. */
7469 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7470 if (disas_neon_data_insn(env, s, insn))
7471 goto illegal_op;
7472 } else {
7473 if (insn & (1 << 28))
7474 goto illegal_op;
7475 if (disas_coproc_insn (env, s, insn))
7476 goto illegal_op;
7477 }
7478 break;
7479 case 8: case 9: case 10: case 11:
7480 if (insn & (1 << 15)) {
7481 /* Branches, misc control. */
7482 if (insn & 0x5000) {
7483 /* Unconditional branch. */
7484 /* signextend(hw1[10:0]) -> offset[:12]. */
7485 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7486 /* hw1[10:0] -> offset[11:1]. */
7487 offset |= (insn & 0x7ff) << 1;
7488 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7489 offset[24:22] already have the same value because of the
7490 sign extension above. */
7491 offset ^= ((~insn) & (1 << 13)) << 10;
7492 offset ^= ((~insn) & (1 << 11)) << 11;
7493
9ee6e8bb
PB
7494 if (insn & (1 << 14)) {
7495 /* Branch and link. */
b0109805 7496 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7497 gen_movl_reg_T1(s, 14);
b5ff1b31 7498 }
3b46e624 7499
b0109805 7500 offset += s->pc;
9ee6e8bb
PB
7501 if (insn & (1 << 12)) {
7502 /* b/bl */
b0109805 7503 gen_jmp(s, offset);
9ee6e8bb
PB
7504 } else {
7505 /* blx */
b0109805
PB
7506 offset &= ~(uint32_t)2;
7507 gen_bx_im(s, offset);
2c0262af 7508 }
9ee6e8bb
PB
7509 } else if (((insn >> 23) & 7) == 7) {
7510 /* Misc control */
7511 if (insn & (1 << 13))
7512 goto illegal_op;
7513
7514 if (insn & (1 << 26)) {
7515 /* Secure monitor call (v6Z) */
7516 goto illegal_op; /* not implemented. */
2c0262af 7517 } else {
9ee6e8bb
PB
7518 op = (insn >> 20) & 7;
7519 switch (op) {
7520 case 0: /* msr cpsr. */
7521 if (IS_M(env)) {
8984bd2e
PB
7522 tmp = load_reg(s, rn);
7523 addr = tcg_const_i32(insn & 0xff);
7524 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7525 gen_lookup_tb(s);
7526 break;
7527 }
7528 /* fall through */
7529 case 1: /* msr spsr. */
7530 if (IS_M(env))
7531 goto illegal_op;
7532 gen_movl_T0_reg(s, rn);
7533 if (gen_set_psr_T0(s,
7534 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7535 op == 1))
7536 goto illegal_op;
7537 break;
7538 case 2: /* cps, nop-hint. */
7539 if (((insn >> 8) & 7) == 0) {
7540 gen_nop_hint(s, insn & 0xff);
7541 }
7542 /* Implemented as NOP in user mode. */
7543 if (IS_USER(s))
7544 break;
7545 offset = 0;
7546 imm = 0;
7547 if (insn & (1 << 10)) {
7548 if (insn & (1 << 7))
7549 offset |= CPSR_A;
7550 if (insn & (1 << 6))
7551 offset |= CPSR_I;
7552 if (insn & (1 << 5))
7553 offset |= CPSR_F;
7554 if (insn & (1 << 9))
7555 imm = CPSR_A | CPSR_I | CPSR_F;
7556 }
7557 if (insn & (1 << 8)) {
7558 offset |= 0x1f;
7559 imm |= (insn & 0x1f);
7560 }
7561 if (offset) {
7562 gen_op_movl_T0_im(imm);
7563 gen_set_psr_T0(s, offset, 0);
7564 }
7565 break;
7566 case 3: /* Special control operations. */
7567 op = (insn >> 4) & 0xf;
7568 switch (op) {
7569 case 2: /* clrex */
8f8e3aa4 7570 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7571 break;
7572 case 4: /* dsb */
7573 case 5: /* dmb */
7574 case 6: /* isb */
7575 /* These execute as NOPs. */
7576 ARCH(7);
7577 break;
7578 default:
7579 goto illegal_op;
7580 }
7581 break;
7582 case 4: /* bxj */
7583 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7584 tmp = load_reg(s, rn);
7585 gen_bx(s, tmp);
9ee6e8bb
PB
7586 break;
7587 case 5: /* Exception return. */
7588 /* Unpredictable in user mode. */
7589 goto illegal_op;
7590 case 6: /* mrs cpsr. */
8984bd2e 7591 tmp = new_tmp();
9ee6e8bb 7592 if (IS_M(env)) {
8984bd2e
PB
7593 addr = tcg_const_i32(insn & 0xff);
7594 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7595 } else {
8984bd2e 7596 gen_helper_cpsr_read(tmp);
9ee6e8bb 7597 }
8984bd2e 7598 store_reg(s, rd, tmp);
9ee6e8bb
PB
7599 break;
7600 case 7: /* mrs spsr. */
7601 /* Not accessible in user mode. */
7602 if (IS_USER(s) || IS_M(env))
7603 goto illegal_op;
d9ba4830
PB
7604 tmp = load_cpu_field(spsr);
7605 store_reg(s, rd, tmp);
9ee6e8bb 7606 break;
2c0262af
FB
7607 }
7608 }
9ee6e8bb
PB
7609 } else {
7610 /* Conditional branch. */
7611 op = (insn >> 22) & 0xf;
7612 /* Generate a conditional jump to next instruction. */
7613 s->condlabel = gen_new_label();
d9ba4830 7614 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7615 s->condjmp = 1;
7616
7617 /* offset[11:1] = insn[10:0] */
7618 offset = (insn & 0x7ff) << 1;
7619 /* offset[17:12] = insn[21:16]. */
7620 offset |= (insn & 0x003f0000) >> 4;
7621 /* offset[31:20] = insn[26]. */
7622 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7623 /* offset[18] = insn[13]. */
7624 offset |= (insn & (1 << 13)) << 5;
7625 /* offset[19] = insn[11]. */
7626 offset |= (insn & (1 << 11)) << 8;
7627
7628 /* jump to the offset */
b0109805 7629 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7630 }
7631 } else {
7632 /* Data processing immediate. */
7633 if (insn & (1 << 25)) {
7634 if (insn & (1 << 24)) {
7635 if (insn & (1 << 20))
7636 goto illegal_op;
7637 /* Bitfield/Saturate. */
7638 op = (insn >> 21) & 7;
7639 imm = insn & 0x1f;
7640 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7641 if (rn == 15) {
7642 tmp = new_tmp();
7643 tcg_gen_movi_i32(tmp, 0);
7644 } else {
7645 tmp = load_reg(s, rn);
7646 }
9ee6e8bb
PB
7647 switch (op) {
7648 case 2: /* Signed bitfield extract. */
7649 imm++;
7650 if (shift + imm > 32)
7651 goto illegal_op;
7652 if (imm < 32)
6ddbc6e4 7653 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7654 break;
7655 case 6: /* Unsigned bitfield extract. */
7656 imm++;
7657 if (shift + imm > 32)
7658 goto illegal_op;
7659 if (imm < 32)
6ddbc6e4 7660 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7661 break;
7662 case 3: /* Bitfield insert/clear. */
7663 if (imm < shift)
7664 goto illegal_op;
7665 imm = imm + 1 - shift;
7666 if (imm != 32) {
6ddbc6e4 7667 tmp2 = load_reg(s, rd);
8f8e3aa4 7668 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7669 dead_tmp(tmp2);
9ee6e8bb
PB
7670 }
7671 break;
7672 case 7:
7673 goto illegal_op;
7674 default: /* Saturate. */
9ee6e8bb
PB
7675 if (shift) {
7676 if (op & 1)
6ddbc6e4 7677 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7678 else
6ddbc6e4 7679 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7680 }
6ddbc6e4 7681 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7682 if (op & 4) {
7683 /* Unsigned. */
9ee6e8bb 7684 if ((op & 1) && shift == 0)
6ddbc6e4 7685 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7686 else
6ddbc6e4 7687 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7688 } else {
9ee6e8bb 7689 /* Signed. */
9ee6e8bb 7690 if ((op & 1) && shift == 0)
6ddbc6e4 7691 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7692 else
6ddbc6e4 7693 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7694 }
9ee6e8bb 7695 break;
2c0262af 7696 }
6ddbc6e4 7697 store_reg(s, rd, tmp);
9ee6e8bb
PB
7698 } else {
7699 imm = ((insn & 0x04000000) >> 15)
7700 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7701 if (insn & (1 << 22)) {
7702 /* 16-bit immediate. */
7703 imm |= (insn >> 4) & 0xf000;
7704 if (insn & (1 << 23)) {
7705 /* movt */
5e3f878a 7706 tmp = load_reg(s, rd);
86831435 7707 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7708 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7709 } else {
9ee6e8bb 7710 /* movw */
5e3f878a
PB
7711 tmp = new_tmp();
7712 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7713 }
7714 } else {
9ee6e8bb
PB
7715 /* Add/sub 12-bit immediate. */
7716 if (rn == 15) {
b0109805 7717 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7718 if (insn & (1 << 23))
b0109805 7719 offset -= imm;
9ee6e8bb 7720 else
b0109805 7721 offset += imm;
5e3f878a
PB
7722 tmp = new_tmp();
7723 tcg_gen_movi_i32(tmp, offset);
2c0262af 7724 } else {
5e3f878a 7725 tmp = load_reg(s, rn);
9ee6e8bb 7726 if (insn & (1 << 23))
5e3f878a 7727 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7728 else
5e3f878a 7729 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7730 }
9ee6e8bb 7731 }
5e3f878a 7732 store_reg(s, rd, tmp);
191abaa2 7733 }
9ee6e8bb
PB
7734 } else {
7735 int shifter_out = 0;
7736 /* modified 12-bit immediate. */
7737 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7738 imm = (insn & 0xff);
7739 switch (shift) {
7740 case 0: /* XY */
7741 /* Nothing to do. */
7742 break;
7743 case 1: /* 00XY00XY */
7744 imm |= imm << 16;
7745 break;
7746 case 2: /* XY00XY00 */
7747 imm |= imm << 16;
7748 imm <<= 8;
7749 break;
7750 case 3: /* XYXYXYXY */
7751 imm |= imm << 16;
7752 imm |= imm << 8;
7753 break;
7754 default: /* Rotated constant. */
7755 shift = (shift << 1) | (imm >> 7);
7756 imm |= 0x80;
7757 imm = imm << (32 - shift);
7758 shifter_out = 1;
7759 break;
b5ff1b31 7760 }
9ee6e8bb
PB
7761 gen_op_movl_T1_im(imm);
7762 rn = (insn >> 16) & 0xf;
7763 if (rn == 15)
7764 gen_op_movl_T0_im(0);
7765 else
7766 gen_movl_T0_reg(s, rn);
7767 op = (insn >> 21) & 0xf;
7768 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7769 shifter_out))
7770 goto illegal_op;
7771 rd = (insn >> 8) & 0xf;
7772 if (rd != 15) {
7773 gen_movl_reg_T0(s, rd);
2c0262af 7774 }
2c0262af 7775 }
9ee6e8bb
PB
7776 }
7777 break;
7778 case 12: /* Load/store single data item. */
7779 {
7780 int postinc = 0;
7781 int writeback = 0;
b0109805 7782 int user;
9ee6e8bb
PB
7783 if ((insn & 0x01100000) == 0x01000000) {
7784 if (disas_neon_ls_insn(env, s, insn))
c1713132 7785 goto illegal_op;
9ee6e8bb
PB
7786 break;
7787 }
b0109805 7788 user = IS_USER(s);
9ee6e8bb 7789 if (rn == 15) {
b0109805 7790 addr = new_tmp();
9ee6e8bb
PB
7791 /* PC relative. */
7792 /* s->pc has already been incremented by 4. */
7793 imm = s->pc & 0xfffffffc;
7794 if (insn & (1 << 23))
7795 imm += insn & 0xfff;
7796 else
7797 imm -= insn & 0xfff;
b0109805 7798 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7799 } else {
b0109805 7800 addr = load_reg(s, rn);
9ee6e8bb
PB
7801 if (insn & (1 << 23)) {
7802 /* Positive offset. */
7803 imm = insn & 0xfff;
b0109805 7804 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7805 } else {
7806 op = (insn >> 8) & 7;
7807 imm = insn & 0xff;
7808 switch (op) {
7809 case 0: case 8: /* Shifted Register. */
7810 shift = (insn >> 4) & 0xf;
7811 if (shift > 3)
18c9b560 7812 goto illegal_op;
b26eefb6 7813 tmp = load_reg(s, rm);
9ee6e8bb 7814 if (shift)
b26eefb6 7815 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7816 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7817 dead_tmp(tmp);
9ee6e8bb
PB
7818 break;
7819 case 4: /* Negative offset. */
b0109805 7820 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7821 break;
7822 case 6: /* User privilege. */
b0109805
PB
7823 tcg_gen_addi_i32(addr, addr, imm);
7824 user = 1;
9ee6e8bb
PB
7825 break;
7826 case 1: /* Post-decrement. */
7827 imm = -imm;
7828 /* Fall through. */
7829 case 3: /* Post-increment. */
9ee6e8bb
PB
7830 postinc = 1;
7831 writeback = 1;
7832 break;
7833 case 5: /* Pre-decrement. */
7834 imm = -imm;
7835 /* Fall through. */
7836 case 7: /* Pre-increment. */
b0109805 7837 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7838 writeback = 1;
7839 break;
7840 default:
b7bcbe95 7841 goto illegal_op;
9ee6e8bb
PB
7842 }
7843 }
7844 }
7845 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7846 if (insn & (1 << 20)) {
7847 /* Load. */
7848 if (rs == 15 && op != 2) {
7849 if (op & 2)
b5ff1b31 7850 goto illegal_op;
9ee6e8bb
PB
7851 /* Memory hint. Implemented as NOP. */
7852 } else {
7853 switch (op) {
b0109805
PB
7854 case 0: tmp = gen_ld8u(addr, user); break;
7855 case 4: tmp = gen_ld8s(addr, user); break;
7856 case 1: tmp = gen_ld16u(addr, user); break;
7857 case 5: tmp = gen_ld16s(addr, user); break;
7858 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7859 default: goto illegal_op;
7860 }
7861 if (rs == 15) {
b0109805 7862 gen_bx(s, tmp);
9ee6e8bb 7863 } else {
b0109805 7864 store_reg(s, rs, tmp);
9ee6e8bb
PB
7865 }
7866 }
7867 } else {
7868 /* Store. */
7869 if (rs == 15)
b7bcbe95 7870 goto illegal_op;
b0109805 7871 tmp = load_reg(s, rs);
9ee6e8bb 7872 switch (op) {
b0109805
PB
7873 case 0: gen_st8(tmp, addr, user); break;
7874 case 1: gen_st16(tmp, addr, user); break;
7875 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7876 default: goto illegal_op;
b7bcbe95 7877 }
2c0262af 7878 }
9ee6e8bb 7879 if (postinc)
b0109805
PB
7880 tcg_gen_addi_i32(addr, addr, imm);
7881 if (writeback) {
7882 store_reg(s, rn, addr);
7883 } else {
7884 dead_tmp(addr);
7885 }
9ee6e8bb
PB
7886 }
7887 break;
7888 default:
7889 goto illegal_op;
2c0262af 7890 }
9ee6e8bb
PB
7891 return 0;
7892illegal_op:
7893 return 1;
2c0262af
FB
7894}
7895
9ee6e8bb 7896static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7897{
7898 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7899 int32_t offset;
7900 int i;
b26eefb6 7901 TCGv tmp;
d9ba4830 7902 TCGv tmp2;
b0109805 7903 TCGv addr;
99c475ab 7904
9ee6e8bb
PB
7905 if (s->condexec_mask) {
7906 cond = s->condexec_cond;
7907 s->condlabel = gen_new_label();
d9ba4830 7908 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7909 s->condjmp = 1;
7910 }
7911
b5ff1b31 7912 insn = lduw_code(s->pc);
99c475ab 7913 s->pc += 2;
b5ff1b31 7914
99c475ab
FB
7915 switch (insn >> 12) {
7916 case 0: case 1:
7917 rd = insn & 7;
7918 op = (insn >> 11) & 3;
7919 if (op == 3) {
7920 /* add/subtract */
7921 rn = (insn >> 3) & 7;
7922 gen_movl_T0_reg(s, rn);
7923 if (insn & (1 << 10)) {
7924 /* immediate */
7925 gen_op_movl_T1_im((insn >> 6) & 7);
7926 } else {
7927 /* reg */
7928 rm = (insn >> 6) & 7;
7929 gen_movl_T1_reg(s, rm);
7930 }
9ee6e8bb
PB
7931 if (insn & (1 << 9)) {
7932 if (s->condexec_mask)
7933 gen_op_subl_T0_T1();
7934 else
7935 gen_op_subl_T0_T1_cc();
7936 } else {
7937 if (s->condexec_mask)
7938 gen_op_addl_T0_T1();
7939 else
7940 gen_op_addl_T0_T1_cc();
7941 }
99c475ab
FB
7942 gen_movl_reg_T0(s, rd);
7943 } else {
7944 /* shift immediate */
7945 rm = (insn >> 3) & 7;
7946 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7947 tmp = load_reg(s, rm);
7948 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7949 if (!s->condexec_mask)
7950 gen_logic_CC(tmp);
7951 store_reg(s, rd, tmp);
99c475ab
FB
7952 }
7953 break;
7954 case 2: case 3:
7955 /* arithmetic large immediate */
7956 op = (insn >> 11) & 3;
7957 rd = (insn >> 8) & 0x7;
7958 if (op == 0) {
7959 gen_op_movl_T0_im(insn & 0xff);
7960 } else {
7961 gen_movl_T0_reg(s, rd);
7962 gen_op_movl_T1_im(insn & 0xff);
7963 }
7964 switch (op) {
7965 case 0: /* mov */
9ee6e8bb
PB
7966 if (!s->condexec_mask)
7967 gen_op_logic_T0_cc();
99c475ab
FB
7968 break;
7969 case 1: /* cmp */
7970 gen_op_subl_T0_T1_cc();
7971 break;
7972 case 2: /* add */
9ee6e8bb
PB
7973 if (s->condexec_mask)
7974 gen_op_addl_T0_T1();
7975 else
7976 gen_op_addl_T0_T1_cc();
99c475ab
FB
7977 break;
7978 case 3: /* sub */
9ee6e8bb
PB
7979 if (s->condexec_mask)
7980 gen_op_subl_T0_T1();
7981 else
7982 gen_op_subl_T0_T1_cc();
99c475ab
FB
7983 break;
7984 }
7985 if (op != 1)
7986 gen_movl_reg_T0(s, rd);
7987 break;
7988 case 4:
7989 if (insn & (1 << 11)) {
7990 rd = (insn >> 8) & 7;
5899f386
FB
7991 /* load pc-relative. Bit 1 of PC is ignored. */
7992 val = s->pc + 2 + ((insn & 0xff) * 4);
7993 val &= ~(uint32_t)2;
b0109805
PB
7994 addr = new_tmp();
7995 tcg_gen_movi_i32(addr, val);
7996 tmp = gen_ld32(addr, IS_USER(s));
7997 dead_tmp(addr);
7998 store_reg(s, rd, tmp);
99c475ab
FB
7999 break;
8000 }
8001 if (insn & (1 << 10)) {
8002 /* data processing extended or blx */
8003 rd = (insn & 7) | ((insn >> 4) & 8);
8004 rm = (insn >> 3) & 0xf;
8005 op = (insn >> 8) & 3;
8006 switch (op) {
8007 case 0: /* add */
8008 gen_movl_T0_reg(s, rd);
8009 gen_movl_T1_reg(s, rm);
8010 gen_op_addl_T0_T1();
8011 gen_movl_reg_T0(s, rd);
8012 break;
8013 case 1: /* cmp */
8014 gen_movl_T0_reg(s, rd);
8015 gen_movl_T1_reg(s, rm);
8016 gen_op_subl_T0_T1_cc();
8017 break;
8018 case 2: /* mov/cpy */
8019 gen_movl_T0_reg(s, rm);
8020 gen_movl_reg_T0(s, rd);
8021 break;
8022 case 3:/* branch [and link] exchange thumb register */
b0109805 8023 tmp = load_reg(s, rm);
99c475ab
FB
8024 if (insn & (1 << 7)) {
8025 val = (uint32_t)s->pc | 1;
b0109805
PB
8026 tmp2 = new_tmp();
8027 tcg_gen_movi_i32(tmp2, val);
8028 store_reg(s, 14, tmp2);
99c475ab 8029 }
d9ba4830 8030 gen_bx(s, tmp);
99c475ab
FB
8031 break;
8032 }
8033 break;
8034 }
8035
8036 /* data processing register */
8037 rd = insn & 7;
8038 rm = (insn >> 3) & 7;
8039 op = (insn >> 6) & 0xf;
8040 if (op == 2 || op == 3 || op == 4 || op == 7) {
8041 /* the shift/rotate ops want the operands backwards */
8042 val = rm;
8043 rm = rd;
8044 rd = val;
8045 val = 1;
8046 } else {
8047 val = 0;
8048 }
8049
8050 if (op == 9) /* neg */
8051 gen_op_movl_T0_im(0);
8052 else if (op != 0xf) /* mvn doesn't read its first operand */
8053 gen_movl_T0_reg(s, rd);
8054
8055 gen_movl_T1_reg(s, rm);
5899f386 8056 switch (op) {
99c475ab
FB
8057 case 0x0: /* and */
8058 gen_op_andl_T0_T1();
9ee6e8bb
PB
8059 if (!s->condexec_mask)
8060 gen_op_logic_T0_cc();
99c475ab
FB
8061 break;
8062 case 0x1: /* eor */
8063 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8064 if (!s->condexec_mask)
8065 gen_op_logic_T0_cc();
99c475ab
FB
8066 break;
8067 case 0x2: /* lsl */
9ee6e8bb 8068 if (s->condexec_mask) {
8984bd2e 8069 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8070 } else {
8984bd2e 8071 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8072 gen_op_logic_T1_cc();
8073 }
99c475ab
FB
8074 break;
8075 case 0x3: /* lsr */
9ee6e8bb 8076 if (s->condexec_mask) {
8984bd2e 8077 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8078 } else {
8984bd2e 8079 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8080 gen_op_logic_T1_cc();
8081 }
99c475ab
FB
8082 break;
8083 case 0x4: /* asr */
9ee6e8bb 8084 if (s->condexec_mask) {
8984bd2e 8085 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8086 } else {
8984bd2e 8087 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8088 gen_op_logic_T1_cc();
8089 }
99c475ab
FB
8090 break;
8091 case 0x5: /* adc */
9ee6e8bb 8092 if (s->condexec_mask)
b26eefb6 8093 gen_adc_T0_T1();
9ee6e8bb
PB
8094 else
8095 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8096 break;
8097 case 0x6: /* sbc */
9ee6e8bb 8098 if (s->condexec_mask)
3670669c 8099 gen_sbc_T0_T1();
9ee6e8bb
PB
8100 else
8101 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8102 break;
8103 case 0x7: /* ror */
9ee6e8bb 8104 if (s->condexec_mask) {
8984bd2e 8105 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8106 } else {
8984bd2e 8107 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8108 gen_op_logic_T1_cc();
8109 }
99c475ab
FB
8110 break;
8111 case 0x8: /* tst */
8112 gen_op_andl_T0_T1();
8113 gen_op_logic_T0_cc();
8114 rd = 16;
5899f386 8115 break;
99c475ab 8116 case 0x9: /* neg */
9ee6e8bb 8117 if (s->condexec_mask)
390efc54 8118 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8119 else
8120 gen_op_subl_T0_T1_cc();
99c475ab
FB
8121 break;
8122 case 0xa: /* cmp */
8123 gen_op_subl_T0_T1_cc();
8124 rd = 16;
8125 break;
8126 case 0xb: /* cmn */
8127 gen_op_addl_T0_T1_cc();
8128 rd = 16;
8129 break;
8130 case 0xc: /* orr */
8131 gen_op_orl_T0_T1();
9ee6e8bb
PB
8132 if (!s->condexec_mask)
8133 gen_op_logic_T0_cc();
99c475ab
FB
8134 break;
8135 case 0xd: /* mul */
8136 gen_op_mull_T0_T1();
9ee6e8bb
PB
8137 if (!s->condexec_mask)
8138 gen_op_logic_T0_cc();
99c475ab
FB
8139 break;
8140 case 0xe: /* bic */
8141 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8142 if (!s->condexec_mask)
8143 gen_op_logic_T0_cc();
99c475ab
FB
8144 break;
8145 case 0xf: /* mvn */
8146 gen_op_notl_T1();
9ee6e8bb
PB
8147 if (!s->condexec_mask)
8148 gen_op_logic_T1_cc();
99c475ab 8149 val = 1;
5899f386 8150 rm = rd;
99c475ab
FB
8151 break;
8152 }
8153 if (rd != 16) {
8154 if (val)
5899f386 8155 gen_movl_reg_T1(s, rm);
99c475ab
FB
8156 else
8157 gen_movl_reg_T0(s, rd);
8158 }
8159 break;
8160
8161 case 5:
8162 /* load/store register offset. */
8163 rd = insn & 7;
8164 rn = (insn >> 3) & 7;
8165 rm = (insn >> 6) & 7;
8166 op = (insn >> 9) & 7;
b0109805 8167 addr = load_reg(s, rn);
b26eefb6 8168 tmp = load_reg(s, rm);
b0109805 8169 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8170 dead_tmp(tmp);
99c475ab
FB
8171
8172 if (op < 3) /* store */
b0109805 8173 tmp = load_reg(s, rd);
99c475ab
FB
8174
8175 switch (op) {
8176 case 0: /* str */
b0109805 8177 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8178 break;
8179 case 1: /* strh */
b0109805 8180 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8181 break;
8182 case 2: /* strb */
b0109805 8183 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8184 break;
8185 case 3: /* ldrsb */
b0109805 8186 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8187 break;
8188 case 4: /* ldr */
b0109805 8189 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8190 break;
8191 case 5: /* ldrh */
b0109805 8192 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8193 break;
8194 case 6: /* ldrb */
b0109805 8195 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8196 break;
8197 case 7: /* ldrsh */
b0109805 8198 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8199 break;
8200 }
8201 if (op >= 3) /* load */
b0109805
PB
8202 store_reg(s, rd, tmp);
8203 dead_tmp(addr);
99c475ab
FB
8204 break;
8205
8206 case 6:
8207 /* load/store word immediate offset */
8208 rd = insn & 7;
8209 rn = (insn >> 3) & 7;
b0109805 8210 addr = load_reg(s, rn);
99c475ab 8211 val = (insn >> 4) & 0x7c;
b0109805 8212 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8213
8214 if (insn & (1 << 11)) {
8215 /* load */
b0109805
PB
8216 tmp = gen_ld32(addr, IS_USER(s));
8217 store_reg(s, rd, tmp);
99c475ab
FB
8218 } else {
8219 /* store */
b0109805
PB
8220 tmp = load_reg(s, rd);
8221 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8222 }
b0109805 8223 dead_tmp(addr);
99c475ab
FB
8224 break;
8225
8226 case 7:
8227 /* load/store byte immediate offset */
8228 rd = insn & 7;
8229 rn = (insn >> 3) & 7;
b0109805 8230 addr = load_reg(s, rn);
99c475ab 8231 val = (insn >> 6) & 0x1f;
b0109805 8232 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8233
8234 if (insn & (1 << 11)) {
8235 /* load */
b0109805
PB
8236 tmp = gen_ld8u(addr, IS_USER(s));
8237 store_reg(s, rd, tmp);
99c475ab
FB
8238 } else {
8239 /* store */
b0109805
PB
8240 tmp = load_reg(s, rd);
8241 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8242 }
b0109805 8243 dead_tmp(addr);
99c475ab
FB
8244 break;
8245
8246 case 8:
8247 /* load/store halfword immediate offset */
8248 rd = insn & 7;
8249 rn = (insn >> 3) & 7;
b0109805 8250 addr = load_reg(s, rn);
99c475ab 8251 val = (insn >> 5) & 0x3e;
b0109805 8252 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8253
8254 if (insn & (1 << 11)) {
8255 /* load */
b0109805
PB
8256 tmp = gen_ld16u(addr, IS_USER(s));
8257 store_reg(s, rd, tmp);
99c475ab
FB
8258 } else {
8259 /* store */
b0109805
PB
8260 tmp = load_reg(s, rd);
8261 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8262 }
b0109805 8263 dead_tmp(addr);
99c475ab
FB
8264 break;
8265
8266 case 9:
8267 /* load/store from stack */
8268 rd = (insn >> 8) & 7;
b0109805 8269 addr = load_reg(s, 13);
99c475ab 8270 val = (insn & 0xff) * 4;
b0109805 8271 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8272
8273 if (insn & (1 << 11)) {
8274 /* load */
b0109805
PB
8275 tmp = gen_ld32(addr, IS_USER(s));
8276 store_reg(s, rd, tmp);
99c475ab
FB
8277 } else {
8278 /* store */
b0109805
PB
8279 tmp = load_reg(s, rd);
8280 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8281 }
b0109805 8282 dead_tmp(addr);
99c475ab
FB
8283 break;
8284
8285 case 10:
8286 /* add to high reg */
8287 rd = (insn >> 8) & 7;
5899f386
FB
8288 if (insn & (1 << 11)) {
8289 /* SP */
5e3f878a 8290 tmp = load_reg(s, 13);
5899f386
FB
8291 } else {
8292 /* PC. bit 1 is ignored. */
5e3f878a
PB
8293 tmp = new_tmp();
8294 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8295 }
99c475ab 8296 val = (insn & 0xff) * 4;
5e3f878a
PB
8297 tcg_gen_addi_i32(tmp, tmp, val);
8298 store_reg(s, rd, tmp);
99c475ab
FB
8299 break;
8300
8301 case 11:
8302 /* misc */
8303 op = (insn >> 8) & 0xf;
8304 switch (op) {
8305 case 0:
8306 /* adjust stack pointer */
b26eefb6 8307 tmp = load_reg(s, 13);
99c475ab
FB
8308 val = (insn & 0x7f) * 4;
8309 if (insn & (1 << 7))
6a0d8a1d 8310 val = -(int32_t)val;
b26eefb6
PB
8311 tcg_gen_addi_i32(tmp, tmp, val);
8312 store_reg(s, 13, tmp);
99c475ab
FB
8313 break;
8314
9ee6e8bb
PB
8315 case 2: /* sign/zero extend. */
8316 ARCH(6);
8317 rd = insn & 7;
8318 rm = (insn >> 3) & 7;
b0109805 8319 tmp = load_reg(s, rm);
9ee6e8bb 8320 switch ((insn >> 6) & 3) {
b0109805
PB
8321 case 0: gen_sxth(tmp); break;
8322 case 1: gen_sxtb(tmp); break;
8323 case 2: gen_uxth(tmp); break;
8324 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8325 }
b0109805 8326 store_reg(s, rd, tmp);
9ee6e8bb 8327 break;
99c475ab
FB
8328 case 4: case 5: case 0xc: case 0xd:
8329 /* push/pop */
b0109805 8330 addr = load_reg(s, 13);
5899f386
FB
8331 if (insn & (1 << 8))
8332 offset = 4;
99c475ab 8333 else
5899f386
FB
8334 offset = 0;
8335 for (i = 0; i < 8; i++) {
8336 if (insn & (1 << i))
8337 offset += 4;
8338 }
8339 if ((insn & (1 << 11)) == 0) {
b0109805 8340 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8341 }
99c475ab
FB
8342 for (i = 0; i < 8; i++) {
8343 if (insn & (1 << i)) {
8344 if (insn & (1 << 11)) {
8345 /* pop */
b0109805
PB
8346 tmp = gen_ld32(addr, IS_USER(s));
8347 store_reg(s, i, tmp);
99c475ab
FB
8348 } else {
8349 /* push */
b0109805
PB
8350 tmp = load_reg(s, i);
8351 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8352 }
5899f386 8353 /* advance to the next address. */
b0109805 8354 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8355 }
8356 }
a50f5b91 8357 TCGV_UNUSED(tmp);
99c475ab
FB
8358 if (insn & (1 << 8)) {
8359 if (insn & (1 << 11)) {
8360 /* pop pc */
b0109805 8361 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8362 /* don't set the pc until the rest of the instruction
8363 has completed */
8364 } else {
8365 /* push lr */
b0109805
PB
8366 tmp = load_reg(s, 14);
8367 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8368 }
b0109805 8369 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8370 }
5899f386 8371 if ((insn & (1 << 11)) == 0) {
b0109805 8372 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8373 }
99c475ab 8374 /* write back the new stack pointer */
b0109805 8375 store_reg(s, 13, addr);
99c475ab
FB
8376 /* set the new PC value */
8377 if ((insn & 0x0900) == 0x0900)
b0109805 8378 gen_bx(s, tmp);
99c475ab
FB
8379 break;
8380
9ee6e8bb
PB
8381 case 1: case 3: case 9: case 11: /* czb */
8382 rm = insn & 7;
d9ba4830 8383 tmp = load_reg(s, rm);
9ee6e8bb
PB
8384 s->condlabel = gen_new_label();
8385 s->condjmp = 1;
8386 if (insn & (1 << 11))
cb63669a 8387 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8388 else
cb63669a 8389 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8390 dead_tmp(tmp);
9ee6e8bb
PB
8391 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8392 val = (uint32_t)s->pc + 2;
8393 val += offset;
8394 gen_jmp(s, val);
8395 break;
8396
8397 case 15: /* IT, nop-hint. */
8398 if ((insn & 0xf) == 0) {
8399 gen_nop_hint(s, (insn >> 4) & 0xf);
8400 break;
8401 }
8402 /* If Then. */
8403 s->condexec_cond = (insn >> 4) & 0xe;
8404 s->condexec_mask = insn & 0x1f;
8405 /* No actual code generated for this insn, just setup state. */
8406 break;
8407
06c949e6 8408 case 0xe: /* bkpt */
9ee6e8bb 8409 gen_set_condexec(s);
5e3f878a 8410 gen_set_pc_im(s->pc - 2);
d9ba4830 8411 gen_exception(EXCP_BKPT);
06c949e6
PB
8412 s->is_jmp = DISAS_JUMP;
8413 break;
8414
9ee6e8bb
PB
8415 case 0xa: /* rev */
8416 ARCH(6);
8417 rn = (insn >> 3) & 0x7;
8418 rd = insn & 0x7;
b0109805 8419 tmp = load_reg(s, rn);
9ee6e8bb 8420 switch ((insn >> 6) & 3) {
b0109805
PB
8421 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8422 case 1: gen_rev16(tmp); break;
8423 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8424 default: goto illegal_op;
8425 }
b0109805 8426 store_reg(s, rd, tmp);
9ee6e8bb
PB
8427 break;
8428
8429 case 6: /* cps */
8430 ARCH(6);
8431 if (IS_USER(s))
8432 break;
8433 if (IS_M(env)) {
8984bd2e 8434 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8435 /* PRIMASK */
8984bd2e
PB
8436 if (insn & 1) {
8437 addr = tcg_const_i32(16);
8438 gen_helper_v7m_msr(cpu_env, addr, tmp);
8439 }
9ee6e8bb 8440 /* FAULTMASK */
8984bd2e
PB
8441 if (insn & 2) {
8442 addr = tcg_const_i32(17);
8443 gen_helper_v7m_msr(cpu_env, addr, tmp);
8444 }
9ee6e8bb
PB
8445 gen_lookup_tb(s);
8446 } else {
8447 if (insn & (1 << 4))
8448 shift = CPSR_A | CPSR_I | CPSR_F;
8449 else
8450 shift = 0;
8451
8452 val = ((insn & 7) << 6) & shift;
8453 gen_op_movl_T0_im(val);
8454 gen_set_psr_T0(s, shift, 0);
8455 }
8456 break;
8457
99c475ab
FB
8458 default:
8459 goto undef;
8460 }
8461 break;
8462
8463 case 12:
8464 /* load/store multiple */
8465 rn = (insn >> 8) & 0x7;
b0109805 8466 addr = load_reg(s, rn);
99c475ab
FB
8467 for (i = 0; i < 8; i++) {
8468 if (insn & (1 << i)) {
99c475ab
FB
8469 if (insn & (1 << 11)) {
8470 /* load */
b0109805
PB
8471 tmp = gen_ld32(addr, IS_USER(s));
8472 store_reg(s, i, tmp);
99c475ab
FB
8473 } else {
8474 /* store */
b0109805
PB
8475 tmp = load_reg(s, i);
8476 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8477 }
5899f386 8478 /* advance to the next address */
b0109805 8479 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8480 }
8481 }
5899f386 8482 /* Base register writeback. */
b0109805
PB
8483 if ((insn & (1 << rn)) == 0) {
8484 store_reg(s, rn, addr);
8485 } else {
8486 dead_tmp(addr);
8487 }
99c475ab
FB
8488 break;
8489
8490 case 13:
8491 /* conditional branch or swi */
8492 cond = (insn >> 8) & 0xf;
8493 if (cond == 0xe)
8494 goto undef;
8495
8496 if (cond == 0xf) {
8497 /* swi */
9ee6e8bb 8498 gen_set_condexec(s);
422ebf69 8499 gen_set_pc_im(s->pc);
9ee6e8bb 8500 s->is_jmp = DISAS_SWI;
99c475ab
FB
8501 break;
8502 }
8503 /* generate a conditional jump to next instruction */
e50e6a20 8504 s->condlabel = gen_new_label();
d9ba4830 8505 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8506 s->condjmp = 1;
99c475ab
FB
8507 gen_movl_T1_reg(s, 15);
8508
8509 /* jump to the offset */
5899f386 8510 val = (uint32_t)s->pc + 2;
99c475ab 8511 offset = ((int32_t)insn << 24) >> 24;
5899f386 8512 val += offset << 1;
8aaca4c0 8513 gen_jmp(s, val);
99c475ab
FB
8514 break;
8515
8516 case 14:
358bf29e 8517 if (insn & (1 << 11)) {
9ee6e8bb
PB
8518 if (disas_thumb2_insn(env, s, insn))
8519 goto undef32;
358bf29e
PB
8520 break;
8521 }
9ee6e8bb 8522 /* unconditional branch */
99c475ab
FB
8523 val = (uint32_t)s->pc;
8524 offset = ((int32_t)insn << 21) >> 21;
8525 val += (offset << 1) + 2;
8aaca4c0 8526 gen_jmp(s, val);
99c475ab
FB
8527 break;
8528
8529 case 15:
9ee6e8bb 8530 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8531 goto undef32;
9ee6e8bb 8532 break;
99c475ab
FB
8533 }
8534 return;
9ee6e8bb
PB
8535undef32:
8536 gen_set_condexec(s);
5e3f878a 8537 gen_set_pc_im(s->pc - 4);
d9ba4830 8538 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8539 s->is_jmp = DISAS_JUMP;
8540 return;
8541illegal_op:
99c475ab 8542undef:
9ee6e8bb 8543 gen_set_condexec(s);
5e3f878a 8544 gen_set_pc_im(s->pc - 2);
d9ba4830 8545 gen_exception(EXCP_UDEF);
99c475ab
FB
8546 s->is_jmp = DISAS_JUMP;
8547}
8548
2c0262af
FB
8549/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8550 basic block 'tb'. If search_pc is TRUE, also generate PC
8551 information for each intermediate instruction. */
2cfc5f17
TS
8552static inline void gen_intermediate_code_internal(CPUState *env,
8553 TranslationBlock *tb,
8554 int search_pc)
2c0262af
FB
8555{
8556 DisasContext dc1, *dc = &dc1;
8557 uint16_t *gen_opc_end;
8558 int j, lj;
0fa85d43 8559 target_ulong pc_start;
b5ff1b31 8560 uint32_t next_page_start;
2e70f6ef
PB
8561 int num_insns;
8562 int max_insns;
3b46e624 8563
2c0262af 8564 /* generate intermediate code */
b26eefb6
PB
8565 num_temps = 0;
8566 memset(temps, 0, sizeof(temps));
8567
0fa85d43 8568 pc_start = tb->pc;
3b46e624 8569
2c0262af
FB
8570 dc->tb = tb;
8571
2c0262af 8572 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8573
8574 dc->is_jmp = DISAS_NEXT;
8575 dc->pc = pc_start;
8aaca4c0 8576 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8577 dc->condjmp = 0;
5899f386 8578 dc->thumb = env->thumb;
9ee6e8bb
PB
8579 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8580 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 8581 dc->is_mem = 0;
b5ff1b31 8582#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8583 if (IS_M(env)) {
8584 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8585 } else {
8586 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8587 }
b5ff1b31 8588#endif
4373f3ce
PB
8589 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8590 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8591 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8592 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
ad69471c
PB
8593 cpu_V0 = cpu_F0d;
8594 cpu_V1 = cpu_F1d;
e677137d
PB
8595 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8596 cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
b5ff1b31 8597 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8598 lj = -1;
2e70f6ef
PB
8599 num_insns = 0;
8600 max_insns = tb->cflags & CF_COUNT_MASK;
8601 if (max_insns == 0)
8602 max_insns = CF_COUNT_MASK;
8603
8604 gen_icount_start();
9ee6e8bb
PB
8605 /* Reset the conditional execution bits immediately. This avoids
8606 complications trying to do it at the end of the block. */
8607 if (env->condexec_bits)
8f01245e
PB
8608 {
8609 TCGv tmp = new_tmp();
8610 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8611 store_cpu_field(tmp, condexec_bits);
8f01245e 8612 }
2c0262af 8613 do {
fbb4a2e3
PB
8614#ifdef CONFIG_USER_ONLY
8615 /* Intercept jump to the magic kernel page. */
8616 if (dc->pc >= 0xffff0000) {
8617 /* We always get here via a jump, so know we are not in a
8618 conditional execution block. */
8619 gen_exception(EXCP_KERNEL_TRAP);
8620 dc->is_jmp = DISAS_UPDATE;
8621 break;
8622 }
8623#else
9ee6e8bb
PB
8624 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8625 /* We always get here via a jump, so know we are not in a
8626 conditional execution block. */
d9ba4830 8627 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8628 dc->is_jmp = DISAS_UPDATE;
8629 break;
9ee6e8bb
PB
8630 }
8631#endif
8632
1fddef4b
FB
8633 if (env->nb_breakpoints > 0) {
8634 for(j = 0; j < env->nb_breakpoints; j++) {
8635 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 8636 gen_set_condexec(dc);
5e3f878a 8637 gen_set_pc_im(dc->pc);
d9ba4830 8638 gen_exception(EXCP_DEBUG);
1fddef4b 8639 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8640 /* Advance PC so that clearing the breakpoint will
8641 invalidate this TB. */
8642 dc->pc += 2;
8643 goto done_generating;
1fddef4b
FB
8644 break;
8645 }
8646 }
8647 }
2c0262af
FB
8648 if (search_pc) {
8649 j = gen_opc_ptr - gen_opc_buf;
8650 if (lj < j) {
8651 lj++;
8652 while (lj < j)
8653 gen_opc_instr_start[lj++] = 0;
8654 }
0fa85d43 8655 gen_opc_pc[lj] = dc->pc;
2c0262af 8656 gen_opc_instr_start[lj] = 1;
2e70f6ef 8657 gen_opc_icount[lj] = num_insns;
2c0262af 8658 }
e50e6a20 8659
2e70f6ef
PB
8660 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8661 gen_io_start();
8662
9ee6e8bb
PB
8663 if (env->thumb) {
8664 disas_thumb_insn(env, dc);
8665 if (dc->condexec_mask) {
8666 dc->condexec_cond = (dc->condexec_cond & 0xe)
8667 | ((dc->condexec_mask >> 4) & 1);
8668 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8669 if (dc->condexec_mask == 0) {
8670 dc->condexec_cond = 0;
8671 }
8672 }
8673 } else {
8674 disas_arm_insn(env, dc);
8675 }
b26eefb6
PB
8676 if (num_temps) {
8677 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8678 num_temps = 0;
8679 }
e50e6a20
FB
8680
8681 if (dc->condjmp && !dc->is_jmp) {
8682 gen_set_label(dc->condlabel);
8683 dc->condjmp = 0;
8684 }
6658ffb8
PB
8685 /* Terminate the TB on memory ops if watchpoints are present. */
8686 /* FIXME: This should be replacd by the deterministic execution
8687 * IRQ raising bits. */
8688 if (dc->is_mem && env->nb_watchpoints)
8689 break;
8690
e50e6a20
FB
8691 /* Translation stops when a conditional branch is enoutered.
8692 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8693 * Also stop translation when a page boundary is reached. This
bf20dc07 8694 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8695 num_insns ++;
1fddef4b
FB
8696 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8697 !env->singlestep_enabled &&
2e70f6ef
PB
8698 dc->pc < next_page_start &&
8699 num_insns < max_insns);
8700
8701 if (tb->cflags & CF_LAST_IO) {
8702 if (dc->condjmp) {
8703 /* FIXME: This can theoretically happen with self-modifying
8704 code. */
8705 cpu_abort(env, "IO on conditional branch instruction");
8706 }
8707 gen_io_end();
8708 }
9ee6e8bb 8709
b5ff1b31 8710 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8711 instruction was a conditional branch or trap, and the PC has
8712 already been written. */
551bd27f 8713 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8714 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8715 if (dc->condjmp) {
9ee6e8bb
PB
8716 gen_set_condexec(dc);
8717 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8718 gen_exception(EXCP_SWI);
9ee6e8bb 8719 } else {
d9ba4830 8720 gen_exception(EXCP_DEBUG);
9ee6e8bb 8721 }
e50e6a20
FB
8722 gen_set_label(dc->condlabel);
8723 }
8724 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8725 gen_set_pc_im(dc->pc);
e50e6a20 8726 dc->condjmp = 0;
8aaca4c0 8727 }
9ee6e8bb
PB
8728 gen_set_condexec(dc);
8729 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8730 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8731 } else {
8732 /* FIXME: Single stepping a WFI insn will not halt
8733 the CPU. */
d9ba4830 8734 gen_exception(EXCP_DEBUG);
9ee6e8bb 8735 }
8aaca4c0 8736 } else {
9ee6e8bb
PB
8737 /* While branches must always occur at the end of an IT block,
8738 there are a few other things that can cause us to terminate
8739 the TB in the middel of an IT block:
8740 - Exception generating instructions (bkpt, swi, undefined).
8741 - Page boundaries.
8742 - Hardware watchpoints.
8743 Hardware breakpoints have already been handled and skip this code.
8744 */
8745 gen_set_condexec(dc);
8aaca4c0 8746 switch(dc->is_jmp) {
8aaca4c0 8747 case DISAS_NEXT:
6e256c93 8748 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8749 break;
8750 default:
8751 case DISAS_JUMP:
8752 case DISAS_UPDATE:
8753 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8754 tcg_gen_exit_tb(0);
8aaca4c0
FB
8755 break;
8756 case DISAS_TB_JUMP:
8757 /* nothing more to generate */
8758 break;
9ee6e8bb 8759 case DISAS_WFI:
d9ba4830 8760 gen_helper_wfi();
9ee6e8bb
PB
8761 break;
8762 case DISAS_SWI:
d9ba4830 8763 gen_exception(EXCP_SWI);
9ee6e8bb 8764 break;
8aaca4c0 8765 }
e50e6a20
FB
8766 if (dc->condjmp) {
8767 gen_set_label(dc->condlabel);
9ee6e8bb 8768 gen_set_condexec(dc);
6e256c93 8769 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8770 dc->condjmp = 0;
8771 }
2c0262af 8772 }
2e70f6ef 8773
9ee6e8bb 8774done_generating:
2e70f6ef 8775 gen_icount_end(tb, num_insns);
2c0262af
FB
8776 *gen_opc_ptr = INDEX_op_end;
8777
8778#ifdef DEBUG_DISAS
e19e89a5 8779 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8780 fprintf(logfile, "----------------\n");
8781 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8782 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8783 fprintf(logfile, "\n");
8784 }
8785#endif
b5ff1b31
FB
8786 if (search_pc) {
8787 j = gen_opc_ptr - gen_opc_buf;
8788 lj++;
8789 while (lj <= j)
8790 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8791 } else {
2c0262af 8792 tb->size = dc->pc - pc_start;
2e70f6ef 8793 tb->icount = num_insns;
b5ff1b31 8794 }
2c0262af
FB
8795}
8796
2cfc5f17 8797void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8798{
2cfc5f17 8799 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8800}
8801
2cfc5f17 8802void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8803{
2cfc5f17 8804 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8805}
8806
b5ff1b31
FB
8807static const char *cpu_mode_names[16] = {
8808 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8809 "???", "???", "???", "und", "???", "???", "???", "sys"
8810};
9ee6e8bb 8811
5fafdf24 8812void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8813 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8814 int flags)
2c0262af
FB
8815{
8816 int i;
06e80fc9 8817#if 0
bc380d17 8818 union {
b7bcbe95
FB
8819 uint32_t i;
8820 float s;
8821 } s0, s1;
8822 CPU_DoubleU d;
a94a6abf
PB
8823 /* ??? This assumes float64 and double have the same layout.
8824 Oh well, it's only debug dumps. */
8825 union {
8826 float64 f64;
8827 double d;
8828 } d0;
06e80fc9 8829#endif
b5ff1b31 8830 uint32_t psr;
2c0262af
FB
8831
8832 for(i=0;i<16;i++) {
7fe48483 8833 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8834 if ((i % 4) == 3)
7fe48483 8835 cpu_fprintf(f, "\n");
2c0262af 8836 else
7fe48483 8837 cpu_fprintf(f, " ");
2c0262af 8838 }
b5ff1b31 8839 psr = cpsr_read(env);
687fa640
TS
8840 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8841 psr,
b5ff1b31
FB
8842 psr & (1 << 31) ? 'N' : '-',
8843 psr & (1 << 30) ? 'Z' : '-',
8844 psr & (1 << 29) ? 'C' : '-',
8845 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8846 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8847 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8848
5e3f878a 8849#if 0
b7bcbe95 8850 for (i = 0; i < 16; i++) {
8e96005d
FB
8851 d.d = env->vfp.regs[i];
8852 s0.i = d.l.lower;
8853 s1.i = d.l.upper;
a94a6abf
PB
8854 d0.f64 = d.d;
8855 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8856 i * 2, (int)s0.i, s0.s,
a94a6abf 8857 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8858 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8859 d0.d);
b7bcbe95 8860 }
40f137e1 8861 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8862#endif
2c0262af 8863}
a6b025d3 8864
d2856f1a
AJ
8865void gen_pc_load(CPUState *env, TranslationBlock *tb,
8866 unsigned long searched_pc, int pc_pos, void *puc)
8867{
8868 env->regs[15] = gen_opc_pc[pc_pos];
8869}