]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
Sparc32: convert slavio interrupt controller to qdev
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
fad6cb1a 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
2c0262af
FB
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
79383c9c 32#include "qemu-log.h"
1497c961 33
a7812ae4 34#include "helpers.h"
1497c961 35#define GEN_HELPER 1
b26eefb6 36#include "helpers.h"
2c0262af 37
9ee6e8bb
PB
38#define ENABLE_ARCH_5J 0
39#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 43
86753403 44#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 45
2c0262af
FB
46/* internal defines */
47typedef struct DisasContext {
0fa85d43 48 target_ulong pc;
2c0262af 49 int is_jmp;
e50e6a20
FB
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
9ee6e8bb
PB
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
2c0262af 57 struct TranslationBlock *tb;
8aaca4c0 58 int singlestep_enabled;
5899f386 59 int thumb;
b5ff1b31
FB
60#if !defined(CONFIG_USER_ONLY)
61 int user;
62#endif
2c0262af
FB
63} DisasContext;
64
b5ff1b31
FB
65#if defined(CONFIG_USER_ONLY)
66#define IS_USER(s) 1
67#else
68#define IS_USER(s) (s->user)
69#endif
70
9ee6e8bb
PB
71/* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73#define DISAS_WFI 4
74#define DISAS_SWI 5
2c0262af 75
a7812ae4 76static TCGv_ptr cpu_env;
ad69471c 77/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 78static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
ad69471c 79
b26eefb6 80/* FIXME: These should be removed. */
8f8e3aa4 81static TCGv cpu_T[2];
a7812ae4
PB
82static TCGv cpu_F0s, cpu_F1s;
83static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 84
2e70f6ef
PB
85#define ICOUNT_TEMP cpu_T[0]
86#include "gen-icount.h"
87
b26eefb6
PB
88/* initialize TCG globals. */
89void arm_translate_init(void)
90{
a7812ae4
PB
91 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
92
93 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
94 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 95
a7812ae4
PB
96#define GEN_HELPER 2
97#include "helpers.h"
b26eefb6
PB
98}
99
100/* The code generator doesn't like lots of temporaries, so maintain our own
101 cache for reuse within a function. */
102#define MAX_TEMPS 8
103static int num_temps;
104static TCGv temps[MAX_TEMPS];
105
106/* Allocate a temporary variable. */
a7812ae4 107static TCGv_i32 new_tmp(void)
b26eefb6
PB
108{
109 TCGv tmp;
110 if (num_temps == MAX_TEMPS)
111 abort();
112
a7812ae4 113 if (GET_TCGV_I32(temps[num_temps]))
b26eefb6
PB
114 return temps[num_temps++];
115
a7812ae4 116 tmp = tcg_temp_new_i32();
b26eefb6
PB
117 temps[num_temps++] = tmp;
118 return tmp;
119}
120
121/* Release a temporary variable. */
122static void dead_tmp(TCGv tmp)
123{
124 int i;
125 num_temps--;
126 i = num_temps;
a7812ae4 127 if (TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
128 return;
129
130 /* Shuffle this temp to the last slot. */
a7812ae4 131 while (!TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
132 i--;
133 while (i < num_temps) {
134 temps[i] = temps[i + 1];
135 i++;
136 }
137 temps[i] = tmp;
138}
139
d9ba4830
PB
140static inline TCGv load_cpu_offset(int offset)
141{
142 TCGv tmp = new_tmp();
143 tcg_gen_ld_i32(tmp, cpu_env, offset);
144 return tmp;
145}
146
147#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
148
149static inline void store_cpu_offset(TCGv var, int offset)
150{
151 tcg_gen_st_i32(var, cpu_env, offset);
152 dead_tmp(var);
153}
154
155#define store_cpu_field(var, name) \
156 store_cpu_offset(var, offsetof(CPUState, name))
157
b26eefb6
PB
158/* Set a variable to the value of a CPU register. */
159static void load_reg_var(DisasContext *s, TCGv var, int reg)
160{
161 if (reg == 15) {
162 uint32_t addr;
163 /* normaly, since we updated PC, we need only to add one insn */
164 if (s->thumb)
165 addr = (long)s->pc + 2;
166 else
167 addr = (long)s->pc + 4;
168 tcg_gen_movi_i32(var, addr);
169 } else {
170 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
171 }
172}
173
174/* Create a new temporary and set it to the value of a CPU register. */
175static inline TCGv load_reg(DisasContext *s, int reg)
176{
177 TCGv tmp = new_tmp();
178 load_reg_var(s, tmp, reg);
179 return tmp;
180}
181
182/* Set a CPU register. The source must be a temporary and will be
183 marked as dead. */
184static void store_reg(DisasContext *s, int reg, TCGv var)
185{
186 if (reg == 15) {
187 tcg_gen_andi_i32(var, var, ~1);
188 s->is_jmp = DISAS_JUMP;
189 }
190 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
191 dead_tmp(var);
192}
193
194
195/* Basic operations. */
196#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6
PB
197#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
198#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
199
200#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
201#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
204
8984bd2e
PB
205#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
207#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
8984bd2e 210
b26eefb6
PB
211#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
213#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
214#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
215#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
216#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
217#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
218
b26eefb6
PB
219#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
220#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
b26eefb6
PB
221
222/* Value extensions. */
86831435
PB
223#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
224#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
225#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
226#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
227
1497c961
PB
228#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
229#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
230
231#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 232
d9ba4830
PB
233#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
234/* Set NZCV flags from the high 4 bits of var. */
235#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
236
237static void gen_exception(int excp)
238{
239 TCGv tmp = new_tmp();
240 tcg_gen_movi_i32(tmp, excp);
241 gen_helper_exception(tmp);
242 dead_tmp(tmp);
243}
244
3670669c
PB
245static void gen_smul_dual(TCGv a, TCGv b)
246{
247 TCGv tmp1 = new_tmp();
248 TCGv tmp2 = new_tmp();
22478e79
AZ
249 tcg_gen_ext16s_i32(tmp1, a);
250 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
251 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
252 dead_tmp(tmp2);
253 tcg_gen_sari_i32(a, a, 16);
254 tcg_gen_sari_i32(b, b, 16);
255 tcg_gen_mul_i32(b, b, a);
256 tcg_gen_mov_i32(a, tmp1);
257 dead_tmp(tmp1);
258}
259
260/* Byteswap each halfword. */
261static void gen_rev16(TCGv var)
262{
263 TCGv tmp = new_tmp();
264 tcg_gen_shri_i32(tmp, var, 8);
265 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
266 tcg_gen_shli_i32(var, var, 8);
267 tcg_gen_andi_i32(var, var, 0xff00ff00);
268 tcg_gen_or_i32(var, var, tmp);
269 dead_tmp(tmp);
270}
271
272/* Byteswap low halfword and sign extend. */
273static void gen_revsh(TCGv var)
274{
275 TCGv tmp = new_tmp();
276 tcg_gen_shri_i32(tmp, var, 8);
277 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
278 tcg_gen_shli_i32(var, var, 8);
279 tcg_gen_ext8s_i32(var, var);
280 tcg_gen_or_i32(var, var, tmp);
281 dead_tmp(tmp);
282}
283
284/* Unsigned bitfield extract. */
285static void gen_ubfx(TCGv var, int shift, uint32_t mask)
286{
287 if (shift)
288 tcg_gen_shri_i32(var, var, shift);
289 tcg_gen_andi_i32(var, var, mask);
290}
291
292/* Signed bitfield extract. */
293static void gen_sbfx(TCGv var, int shift, int width)
294{
295 uint32_t signbit;
296
297 if (shift)
298 tcg_gen_sari_i32(var, var, shift);
299 if (shift + width < 32) {
300 signbit = 1u << (width - 1);
301 tcg_gen_andi_i32(var, var, (1u << width) - 1);
302 tcg_gen_xori_i32(var, var, signbit);
303 tcg_gen_subi_i32(var, var, signbit);
304 }
305}
306
307/* Bitfield insertion. Insert val into base. Clobbers base and val. */
308static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
309{
3670669c 310 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
311 tcg_gen_shli_i32(val, val, shift);
312 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
313 tcg_gen_or_i32(dest, base, val);
314}
315
d9ba4830
PB
316/* Round the top 32 bits of a 64-bit value. */
317static void gen_roundqd(TCGv a, TCGv b)
3670669c 318{
d9ba4830
PB
319 tcg_gen_shri_i32(a, a, 31);
320 tcg_gen_add_i32(a, a, b);
3670669c
PB
321}
322
8f01245e
PB
323/* FIXME: Most targets have native widening multiplication.
324 It would be good to use that instead of a full wide multiply. */
5e3f878a 325/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 326static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 327{
a7812ae4
PB
328 TCGv_i64 tmp1 = tcg_temp_new_i64();
329 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
330
331 tcg_gen_extu_i32_i64(tmp1, a);
332 dead_tmp(a);
333 tcg_gen_extu_i32_i64(tmp2, b);
334 dead_tmp(b);
335 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
336 return tmp1;
337}
338
a7812ae4 339static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 340{
a7812ae4
PB
341 TCGv_i64 tmp1 = tcg_temp_new_i64();
342 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
343
344 tcg_gen_ext_i32_i64(tmp1, a);
345 dead_tmp(a);
346 tcg_gen_ext_i32_i64(tmp2, b);
347 dead_tmp(b);
348 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
349 return tmp1;
350}
351
8f01245e
PB
352/* Unsigned 32x32->64 multiply. */
353static void gen_op_mull_T0_T1(void)
354{
a7812ae4
PB
355 TCGv_i64 tmp1 = tcg_temp_new_i64();
356 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e
PB
357
358 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
359 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
360 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
361 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
362 tcg_gen_shri_i64(tmp1, tmp1, 32);
363 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
364}
365
366/* Signed 32x32->64 multiply. */
d9ba4830 367static void gen_imull(TCGv a, TCGv b)
8f01245e 368{
a7812ae4
PB
369 TCGv_i64 tmp1 = tcg_temp_new_i64();
370 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 371
d9ba4830
PB
372 tcg_gen_ext_i32_i64(tmp1, a);
373 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 374 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 375 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 376 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
377 tcg_gen_trunc_i64_i32(b, tmp1);
378}
d9ba4830 379
8f01245e
PB
380/* Swap low and high halfwords. */
381static void gen_swap_half(TCGv var)
382{
383 TCGv tmp = new_tmp();
384 tcg_gen_shri_i32(tmp, var, 16);
385 tcg_gen_shli_i32(var, var, 16);
386 tcg_gen_or_i32(var, var, tmp);
3670669c 387 dead_tmp(tmp);
8f01245e
PB
388}
389
b26eefb6
PB
390/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
391 tmp = (t0 ^ t1) & 0x8000;
392 t0 &= ~0x8000;
393 t1 &= ~0x8000;
394 t0 = (t0 + t1) ^ tmp;
395 */
396
397static void gen_add16(TCGv t0, TCGv t1)
398{
399 TCGv tmp = new_tmp();
400 tcg_gen_xor_i32(tmp, t0, t1);
401 tcg_gen_andi_i32(tmp, tmp, 0x8000);
402 tcg_gen_andi_i32(t0, t0, ~0x8000);
403 tcg_gen_andi_i32(t1, t1, ~0x8000);
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_xor_i32(t0, t0, tmp);
406 dead_tmp(tmp);
407 dead_tmp(t1);
408}
409
9a119ff6
PB
410#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
411
b26eefb6
PB
412/* Set CF to the top bit of var. */
413static void gen_set_CF_bit31(TCGv var)
414{
415 TCGv tmp = new_tmp();
416 tcg_gen_shri_i32(tmp, var, 31);
4cc633c3 417 gen_set_CF(tmp);
b26eefb6
PB
418 dead_tmp(tmp);
419}
420
421/* Set N and Z flags from var. */
422static inline void gen_logic_CC(TCGv var)
423{
6fbe23d5
PB
424 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
425 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
426}
427
428/* T0 += T1 + CF. */
429static void gen_adc_T0_T1(void)
430{
d9ba4830 431 TCGv tmp;
b26eefb6 432 gen_op_addl_T0_T1();
d9ba4830 433 tmp = load_cpu_field(CF);
b26eefb6
PB
434 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
435 dead_tmp(tmp);
436}
437
e9bb4aa9
JR
438/* dest = T0 + T1 + CF. */
439static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
440{
441 TCGv tmp;
442 tcg_gen_add_i32(dest, t0, t1);
443 tmp = load_cpu_field(CF);
444 tcg_gen_add_i32(dest, dest, tmp);
445 dead_tmp(tmp);
446}
447
3670669c
PB
448/* dest = T0 - T1 + CF - 1. */
449static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
450{
d9ba4830 451 TCGv tmp;
3670669c 452 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 453 tmp = load_cpu_field(CF);
3670669c
PB
454 tcg_gen_add_i32(dest, dest, tmp);
455 tcg_gen_subi_i32(dest, dest, 1);
456 dead_tmp(tmp);
457}
458
459#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
460#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
461
b26eefb6
PB
462/* T0 &= ~T1. Clobbers T1. */
463/* FIXME: Implement bic natively. */
8f8e3aa4
PB
464static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
465{
466 TCGv tmp = new_tmp();
467 tcg_gen_not_i32(tmp, t1);
468 tcg_gen_and_i32(dest, t0, tmp);
469 dead_tmp(tmp);
470}
b26eefb6
PB
471static inline void gen_op_bicl_T0_T1(void)
472{
473 gen_op_notl_T1();
474 gen_op_andl_T0_T1();
475}
476
ad69471c
PB
477/* FIXME: Implement this natively. */
478#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
479
b26eefb6
PB
480/* FIXME: Implement this natively. */
481static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
482{
483 TCGv tmp;
484
485 if (i == 0)
486 return;
487
488 tmp = new_tmp();
489 tcg_gen_shri_i32(tmp, t1, i);
490 tcg_gen_shli_i32(t1, t1, 32 - i);
491 tcg_gen_or_i32(t0, t1, tmp);
492 dead_tmp(tmp);
493}
494
9a119ff6 495static void shifter_out_im(TCGv var, int shift)
b26eefb6 496{
9a119ff6
PB
497 TCGv tmp = new_tmp();
498 if (shift == 0) {
499 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 500 } else {
9a119ff6 501 tcg_gen_shri_i32(tmp, var, shift);
4cc633c3 502 if (shift != 31)
9a119ff6
PB
503 tcg_gen_andi_i32(tmp, tmp, 1);
504 }
505 gen_set_CF(tmp);
506 dead_tmp(tmp);
507}
b26eefb6 508
9a119ff6
PB
509/* Shift by immediate. Includes special handling for shift == 0. */
510static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
511{
512 switch (shiftop) {
513 case 0: /* LSL */
514 if (shift != 0) {
515 if (flags)
516 shifter_out_im(var, 32 - shift);
517 tcg_gen_shli_i32(var, var, shift);
518 }
519 break;
520 case 1: /* LSR */
521 if (shift == 0) {
522 if (flags) {
523 tcg_gen_shri_i32(var, var, 31);
524 gen_set_CF(var);
525 }
526 tcg_gen_movi_i32(var, 0);
527 } else {
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 tcg_gen_shri_i32(var, var, shift);
531 }
532 break;
533 case 2: /* ASR */
534 if (shift == 0)
535 shift = 32;
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 if (shift == 32)
539 shift = 31;
540 tcg_gen_sari_i32(var, var, shift);
541 break;
542 case 3: /* ROR/RRX */
543 if (shift != 0) {
544 if (flags)
545 shifter_out_im(var, shift - 1);
546 tcg_gen_rori_i32(var, var, shift); break;
547 } else {
d9ba4830 548 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
549 if (flags)
550 shifter_out_im(var, 0);
551 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
552 tcg_gen_shli_i32(tmp, tmp, 31);
553 tcg_gen_or_i32(var, var, tmp);
554 dead_tmp(tmp);
b26eefb6
PB
555 }
556 }
557};
558
8984bd2e
PB
559static inline void gen_arm_shift_reg(TCGv var, int shiftop,
560 TCGv shift, int flags)
561{
562 if (flags) {
563 switch (shiftop) {
564 case 0: gen_helper_shl_cc(var, var, shift); break;
565 case 1: gen_helper_shr_cc(var, var, shift); break;
566 case 2: gen_helper_sar_cc(var, var, shift); break;
567 case 3: gen_helper_ror_cc(var, var, shift); break;
568 }
569 } else {
570 switch (shiftop) {
571 case 0: gen_helper_shl(var, var, shift); break;
572 case 1: gen_helper_shr(var, var, shift); break;
573 case 2: gen_helper_sar(var, var, shift); break;
574 case 3: gen_helper_ror(var, var, shift); break;
575 }
576 }
577 dead_tmp(shift);
578}
579
6ddbc6e4
PB
580#define PAS_OP(pfx) \
581 switch (op2) { \
582 case 0: gen_pas_helper(glue(pfx,add16)); break; \
583 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
584 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
585 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
586 case 4: gen_pas_helper(glue(pfx,add8)); break; \
587 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
588 }
d9ba4830 589static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 590{
a7812ae4 591 TCGv_ptr tmp;
6ddbc6e4
PB
592
593 switch (op1) {
594#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
595 case 1:
a7812ae4 596 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
597 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
598 PAS_OP(s)
599 break;
600 case 5:
a7812ae4 601 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
602 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
603 PAS_OP(u)
604 break;
605#undef gen_pas_helper
606#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
607 case 2:
608 PAS_OP(q);
609 break;
610 case 3:
611 PAS_OP(sh);
612 break;
613 case 6:
614 PAS_OP(uq);
615 break;
616 case 7:
617 PAS_OP(uh);
618 break;
619#undef gen_pas_helper
620 }
621}
9ee6e8bb
PB
622#undef PAS_OP
623
6ddbc6e4
PB
624/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
625#define PAS_OP(pfx) \
626 switch (op2) { \
627 case 0: gen_pas_helper(glue(pfx,add8)); break; \
628 case 1: gen_pas_helper(glue(pfx,add16)); break; \
629 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
630 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
631 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
632 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
633 }
d9ba4830 634static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 635{
a7812ae4 636 TCGv_ptr tmp;
6ddbc6e4
PB
637
638 switch (op1) {
639#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
640 case 0:
a7812ae4 641 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
642 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
643 PAS_OP(s)
644 break;
645 case 4:
a7812ae4 646 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
647 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
648 PAS_OP(u)
649 break;
650#undef gen_pas_helper
651#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
652 case 1:
653 PAS_OP(q);
654 break;
655 case 2:
656 PAS_OP(sh);
657 break;
658 case 5:
659 PAS_OP(uq);
660 break;
661 case 6:
662 PAS_OP(uh);
663 break;
664#undef gen_pas_helper
665 }
666}
9ee6e8bb
PB
667#undef PAS_OP
668
d9ba4830
PB
669static void gen_test_cc(int cc, int label)
670{
671 TCGv tmp;
672 TCGv tmp2;
d9ba4830
PB
673 int inv;
674
d9ba4830
PB
675 switch (cc) {
676 case 0: /* eq: Z */
6fbe23d5 677 tmp = load_cpu_field(ZF);
cb63669a 678 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
679 break;
680 case 1: /* ne: !Z */
6fbe23d5 681 tmp = load_cpu_field(ZF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
683 break;
684 case 2: /* cs: C */
685 tmp = load_cpu_field(CF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
687 break;
688 case 3: /* cc: !C */
689 tmp = load_cpu_field(CF);
cb63669a 690 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
691 break;
692 case 4: /* mi: N */
6fbe23d5 693 tmp = load_cpu_field(NF);
cb63669a 694 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
695 break;
696 case 5: /* pl: !N */
6fbe23d5 697 tmp = load_cpu_field(NF);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
699 break;
700 case 6: /* vs: V */
701 tmp = load_cpu_field(VF);
cb63669a 702 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
703 break;
704 case 7: /* vc: !V */
705 tmp = load_cpu_field(VF);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
707 break;
708 case 8: /* hi: C && !Z */
709 inv = gen_new_label();
710 tmp = load_cpu_field(CF);
cb63669a 711 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 712 dead_tmp(tmp);
6fbe23d5 713 tmp = load_cpu_field(ZF);
cb63669a 714 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
715 gen_set_label(inv);
716 break;
717 case 9: /* ls: !C || Z */
718 tmp = load_cpu_field(CF);
cb63669a 719 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 720 dead_tmp(tmp);
6fbe23d5 721 tmp = load_cpu_field(ZF);
cb63669a 722 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
723 break;
724 case 10: /* ge: N == V -> N ^ V == 0 */
725 tmp = load_cpu_field(VF);
6fbe23d5 726 tmp2 = load_cpu_field(NF);
d9ba4830
PB
727 tcg_gen_xor_i32(tmp, tmp, tmp2);
728 dead_tmp(tmp2);
cb63669a 729 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
730 break;
731 case 11: /* lt: N != V -> N ^ V != 0 */
732 tmp = load_cpu_field(VF);
6fbe23d5 733 tmp2 = load_cpu_field(NF);
d9ba4830
PB
734 tcg_gen_xor_i32(tmp, tmp, tmp2);
735 dead_tmp(tmp2);
cb63669a 736 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
737 break;
738 case 12: /* gt: !Z && N == V */
739 inv = gen_new_label();
6fbe23d5 740 tmp = load_cpu_field(ZF);
cb63669a 741 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
742 dead_tmp(tmp);
743 tmp = load_cpu_field(VF);
6fbe23d5 744 tmp2 = load_cpu_field(NF);
d9ba4830
PB
745 tcg_gen_xor_i32(tmp, tmp, tmp2);
746 dead_tmp(tmp2);
cb63669a 747 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
748 gen_set_label(inv);
749 break;
750 case 13: /* le: Z || N != V */
6fbe23d5 751 tmp = load_cpu_field(ZF);
cb63669a 752 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
753 dead_tmp(tmp);
754 tmp = load_cpu_field(VF);
6fbe23d5 755 tmp2 = load_cpu_field(NF);
d9ba4830
PB
756 tcg_gen_xor_i32(tmp, tmp, tmp2);
757 dead_tmp(tmp2);
cb63669a 758 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
759 break;
760 default:
761 fprintf(stderr, "Bad condition code 0x%x\n", cc);
762 abort();
763 }
764 dead_tmp(tmp);
765}
2c0262af 766
b1d8e52e 767static const uint8_t table_logic_cc[16] = {
2c0262af
FB
768 1, /* and */
769 1, /* xor */
770 0, /* sub */
771 0, /* rsb */
772 0, /* add */
773 0, /* adc */
774 0, /* sbc */
775 0, /* rsc */
776 1, /* andl */
777 1, /* xorl */
778 0, /* cmp */
779 0, /* cmn */
780 1, /* orr */
781 1, /* mov */
782 1, /* bic */
783 1, /* mvn */
784};
3b46e624 785
d9ba4830
PB
786/* Set PC and Thumb state from an immediate address. */
787static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 788{
b26eefb6 789 TCGv tmp;
99c475ab 790
b26eefb6
PB
791 s->is_jmp = DISAS_UPDATE;
792 tmp = new_tmp();
d9ba4830
PB
793 if (s->thumb != (addr & 1)) {
794 tcg_gen_movi_i32(tmp, addr & 1);
795 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
796 }
797 tcg_gen_movi_i32(tmp, addr & ~1);
798 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 799 dead_tmp(tmp);
d9ba4830
PB
800}
801
802/* Set PC and Thumb state from var. var is marked as dead. */
803static inline void gen_bx(DisasContext *s, TCGv var)
804{
805 TCGv tmp;
806
807 s->is_jmp = DISAS_UPDATE;
808 tmp = new_tmp();
809 tcg_gen_andi_i32(tmp, var, 1);
810 store_cpu_field(tmp, thumb);
811 tcg_gen_andi_i32(var, var, ~1);
812 store_cpu_field(var, regs[15]);
813}
814
21aeb343
JR
815/* Variant of store_reg which uses branch&exchange logic when storing
816 to r15 in ARM architecture v7 and above. The source must be a temporary
817 and will be marked as dead. */
818static inline void store_reg_bx(CPUState *env, DisasContext *s,
819 int reg, TCGv var)
820{
821 if (reg == 15 && ENABLE_ARCH_7) {
822 gen_bx(s, var);
823 } else {
824 store_reg(s, reg, var);
825 }
826}
827
b0109805
PB
828static inline TCGv gen_ld8s(TCGv addr, int index)
829{
830 TCGv tmp = new_tmp();
831 tcg_gen_qemu_ld8s(tmp, addr, index);
832 return tmp;
833}
834static inline TCGv gen_ld8u(TCGv addr, int index)
835{
836 TCGv tmp = new_tmp();
837 tcg_gen_qemu_ld8u(tmp, addr, index);
838 return tmp;
839}
840static inline TCGv gen_ld16s(TCGv addr, int index)
841{
842 TCGv tmp = new_tmp();
843 tcg_gen_qemu_ld16s(tmp, addr, index);
844 return tmp;
845}
846static inline TCGv gen_ld16u(TCGv addr, int index)
847{
848 TCGv tmp = new_tmp();
849 tcg_gen_qemu_ld16u(tmp, addr, index);
850 return tmp;
851}
852static inline TCGv gen_ld32(TCGv addr, int index)
853{
854 TCGv tmp = new_tmp();
855 tcg_gen_qemu_ld32u(tmp, addr, index);
856 return tmp;
857}
858static inline void gen_st8(TCGv val, TCGv addr, int index)
859{
860 tcg_gen_qemu_st8(val, addr, index);
861 dead_tmp(val);
862}
863static inline void gen_st16(TCGv val, TCGv addr, int index)
864{
865 tcg_gen_qemu_st16(val, addr, index);
866 dead_tmp(val);
867}
868static inline void gen_st32(TCGv val, TCGv addr, int index)
869{
870 tcg_gen_qemu_st32(val, addr, index);
871 dead_tmp(val);
872}
b5ff1b31 873
2c0262af
FB
874static inline void gen_movl_T0_reg(DisasContext *s, int reg)
875{
b26eefb6 876 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
877}
878
879static inline void gen_movl_T1_reg(DisasContext *s, int reg)
880{
b26eefb6 881 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
882}
883
884static inline void gen_movl_T2_reg(DisasContext *s, int reg)
885{
b26eefb6
PB
886 load_reg_var(s, cpu_T[2], reg);
887}
888
5e3f878a
PB
889static inline void gen_set_pc_im(uint32_t val)
890{
891 TCGv tmp = new_tmp();
892 tcg_gen_movi_i32(tmp, val);
893 store_cpu_field(tmp, regs[15]);
894}
895
2c0262af
FB
896static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
897{
b26eefb6
PB
898 TCGv tmp;
899 if (reg == 15) {
900 tmp = new_tmp();
901 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
902 } else {
903 tmp = cpu_T[t];
904 }
905 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 906 if (reg == 15) {
b26eefb6 907 dead_tmp(tmp);
2c0262af
FB
908 s->is_jmp = DISAS_JUMP;
909 }
910}
911
912static inline void gen_movl_reg_T0(DisasContext *s, int reg)
913{
914 gen_movl_reg_TN(s, reg, 0);
915}
916
917static inline void gen_movl_reg_T1(DisasContext *s, int reg)
918{
919 gen_movl_reg_TN(s, reg, 1);
920}
921
b5ff1b31
FB
922/* Force a TB lookup after an instruction that changes the CPU state. */
923static inline void gen_lookup_tb(DisasContext *s)
924{
925 gen_op_movl_T0_im(s->pc);
926 gen_movl_reg_T0(s, 15);
927 s->is_jmp = DISAS_UPDATE;
928}
929
b0109805
PB
930static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
931 TCGv var)
2c0262af 932{
1e8d4eec 933 int val, rm, shift, shiftop;
b26eefb6 934 TCGv offset;
2c0262af
FB
935
936 if (!(insn & (1 << 25))) {
937 /* immediate */
938 val = insn & 0xfff;
939 if (!(insn & (1 << 23)))
940 val = -val;
537730b9 941 if (val != 0)
b0109805 942 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
943 } else {
944 /* shift/register */
945 rm = (insn) & 0xf;
946 shift = (insn >> 7) & 0x1f;
1e8d4eec 947 shiftop = (insn >> 5) & 3;
b26eefb6 948 offset = load_reg(s, rm);
9a119ff6 949 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 950 if (!(insn & (1 << 23)))
b0109805 951 tcg_gen_sub_i32(var, var, offset);
2c0262af 952 else
b0109805 953 tcg_gen_add_i32(var, var, offset);
b26eefb6 954 dead_tmp(offset);
2c0262af
FB
955 }
956}
957
191f9a93 958static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 959 int extra, TCGv var)
2c0262af
FB
960{
961 int val, rm;
b26eefb6 962 TCGv offset;
3b46e624 963
2c0262af
FB
964 if (insn & (1 << 22)) {
965 /* immediate */
966 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
967 if (!(insn & (1 << 23)))
968 val = -val;
18acad92 969 val += extra;
537730b9 970 if (val != 0)
b0109805 971 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
972 } else {
973 /* register */
191f9a93 974 if (extra)
b0109805 975 tcg_gen_addi_i32(var, var, extra);
2c0262af 976 rm = (insn) & 0xf;
b26eefb6 977 offset = load_reg(s, rm);
2c0262af 978 if (!(insn & (1 << 23)))
b0109805 979 tcg_gen_sub_i32(var, var, offset);
2c0262af 980 else
b0109805 981 tcg_gen_add_i32(var, var, offset);
b26eefb6 982 dead_tmp(offset);
2c0262af
FB
983 }
984}
985
4373f3ce
PB
986#define VFP_OP2(name) \
987static inline void gen_vfp_##name(int dp) \
988{ \
989 if (dp) \
990 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
991 else \
992 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
993}
994
4373f3ce
PB
995VFP_OP2(add)
996VFP_OP2(sub)
997VFP_OP2(mul)
998VFP_OP2(div)
999
1000#undef VFP_OP2
1001
1002static inline void gen_vfp_abs(int dp)
1003{
1004 if (dp)
1005 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1006 else
1007 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1008}
1009
1010static inline void gen_vfp_neg(int dp)
1011{
1012 if (dp)
1013 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1014 else
1015 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1016}
1017
1018static inline void gen_vfp_sqrt(int dp)
1019{
1020 if (dp)
1021 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1022 else
1023 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1024}
1025
1026static inline void gen_vfp_cmp(int dp)
1027{
1028 if (dp)
1029 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1030 else
1031 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1032}
1033
1034static inline void gen_vfp_cmpe(int dp)
1035{
1036 if (dp)
1037 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1038 else
1039 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1040}
1041
1042static inline void gen_vfp_F1_ld0(int dp)
1043{
1044 if (dp)
5b340b51 1045 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1046 else
5b340b51 1047 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1048}
1049
1050static inline void gen_vfp_uito(int dp)
1051{
1052 if (dp)
1053 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1054 else
1055 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1056}
1057
1058static inline void gen_vfp_sito(int dp)
1059{
1060 if (dp)
66230e0d 1061 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1062 else
66230e0d 1063 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1064}
1065
1066static inline void gen_vfp_toui(int dp)
1067{
1068 if (dp)
1069 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1070 else
1071 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1072}
1073
1074static inline void gen_vfp_touiz(int dp)
1075{
1076 if (dp)
1077 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1078 else
1079 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1080}
1081
1082static inline void gen_vfp_tosi(int dp)
1083{
1084 if (dp)
1085 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1086 else
1087 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1088}
1089
1090static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1091{
1092 if (dp)
4373f3ce 1093 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1094 else
4373f3ce
PB
1095 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1096}
1097
1098#define VFP_GEN_FIX(name) \
1099static inline void gen_vfp_##name(int dp, int shift) \
1100{ \
1101 if (dp) \
1102 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1103 else \
1104 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1105}
4373f3ce
PB
1106VFP_GEN_FIX(tosh)
1107VFP_GEN_FIX(tosl)
1108VFP_GEN_FIX(touh)
1109VFP_GEN_FIX(toul)
1110VFP_GEN_FIX(shto)
1111VFP_GEN_FIX(slto)
1112VFP_GEN_FIX(uhto)
1113VFP_GEN_FIX(ulto)
1114#undef VFP_GEN_FIX
9ee6e8bb 1115
b5ff1b31
FB
1116static inline void gen_vfp_ld(DisasContext *s, int dp)
1117{
1118 if (dp)
4373f3ce 1119 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1120 else
4373f3ce 1121 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1122}
1123
1124static inline void gen_vfp_st(DisasContext *s, int dp)
1125{
1126 if (dp)
4373f3ce 1127 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1128 else
4373f3ce 1129 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1130}
1131
8e96005d
FB
1132static inline long
1133vfp_reg_offset (int dp, int reg)
1134{
1135 if (dp)
1136 return offsetof(CPUARMState, vfp.regs[reg]);
1137 else if (reg & 1) {
1138 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1139 + offsetof(CPU_DoubleU, l.upper);
1140 } else {
1141 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1142 + offsetof(CPU_DoubleU, l.lower);
1143 }
1144}
9ee6e8bb
PB
1145
1146/* Return the offset of a 32-bit piece of a NEON register.
1147 zero is the least significant end of the register. */
1148static inline long
1149neon_reg_offset (int reg, int n)
1150{
1151 int sreg;
1152 sreg = reg * 2 + n;
1153 return vfp_reg_offset(0, sreg);
1154}
1155
ad69471c
PB
1156/* FIXME: Remove these. */
1157#define neon_T0 cpu_T[0]
1158#define neon_T1 cpu_T[1]
1159#define NEON_GET_REG(T, reg, n) \
1160 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1161#define NEON_SET_REG(T, reg, n) \
1162 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1163
8f8e3aa4
PB
1164static TCGv neon_load_reg(int reg, int pass)
1165{
1166 TCGv tmp = new_tmp();
1167 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1168 return tmp;
1169}
1170
1171static void neon_store_reg(int reg, int pass, TCGv var)
1172{
1173 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1174 dead_tmp(var);
1175}
1176
a7812ae4 1177static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1178{
1179 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1180}
1181
a7812ae4 1182static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1183{
1184 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1185}
1186
4373f3ce
PB
1187#define tcg_gen_ld_f32 tcg_gen_ld_i32
1188#define tcg_gen_ld_f64 tcg_gen_ld_i64
1189#define tcg_gen_st_f32 tcg_gen_st_i32
1190#define tcg_gen_st_f64 tcg_gen_st_i64
1191
b7bcbe95
FB
1192static inline void gen_mov_F0_vreg(int dp, int reg)
1193{
1194 if (dp)
4373f3ce 1195 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1196 else
4373f3ce 1197 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1198}
1199
1200static inline void gen_mov_F1_vreg(int dp, int reg)
1201{
1202 if (dp)
4373f3ce 1203 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1204 else
4373f3ce 1205 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1206}
1207
1208static inline void gen_mov_vreg_F0(int dp, int reg)
1209{
1210 if (dp)
4373f3ce 1211 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1212 else
4373f3ce 1213 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1214}
1215
18c9b560
AZ
1216#define ARM_CP_RW_BIT (1 << 20)
1217
a7812ae4 1218static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1219{
1220 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1221}
1222
a7812ae4 1223static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1224{
1225 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1226}
1227
1228static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1229{
1230 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1231}
1232
1233static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1234{
1235 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1236}
1237
1238static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1239{
1240 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1241}
1242
1243static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1244{
1245 iwmmxt_store_reg(cpu_M0, rn);
1246}
1247
1248static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1249{
1250 iwmmxt_load_reg(cpu_M0, rn);
1251}
1252
1253static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1254{
1255 iwmmxt_load_reg(cpu_V1, rn);
1256 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1257}
1258
1259static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1260{
1261 iwmmxt_load_reg(cpu_V1, rn);
1262 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1263}
1264
1265static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1266{
1267 iwmmxt_load_reg(cpu_V1, rn);
1268 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1269}
1270
1271#define IWMMXT_OP(name) \
1272static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1273{ \
1274 iwmmxt_load_reg(cpu_V1, rn); \
1275 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1276}
1277
1278#define IWMMXT_OP_ENV(name) \
1279static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1280{ \
1281 iwmmxt_load_reg(cpu_V1, rn); \
1282 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1283}
1284
1285#define IWMMXT_OP_ENV_SIZE(name) \
1286IWMMXT_OP_ENV(name##b) \
1287IWMMXT_OP_ENV(name##w) \
1288IWMMXT_OP_ENV(name##l)
1289
1290#define IWMMXT_OP_ENV1(name) \
1291static inline void gen_op_iwmmxt_##name##_M0(void) \
1292{ \
1293 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1294}
1295
1296IWMMXT_OP(maddsq)
1297IWMMXT_OP(madduq)
1298IWMMXT_OP(sadb)
1299IWMMXT_OP(sadw)
1300IWMMXT_OP(mulslw)
1301IWMMXT_OP(mulshw)
1302IWMMXT_OP(mululw)
1303IWMMXT_OP(muluhw)
1304IWMMXT_OP(macsw)
1305IWMMXT_OP(macuw)
1306
1307IWMMXT_OP_ENV_SIZE(unpackl)
1308IWMMXT_OP_ENV_SIZE(unpackh)
1309
1310IWMMXT_OP_ENV1(unpacklub)
1311IWMMXT_OP_ENV1(unpackluw)
1312IWMMXT_OP_ENV1(unpacklul)
1313IWMMXT_OP_ENV1(unpackhub)
1314IWMMXT_OP_ENV1(unpackhuw)
1315IWMMXT_OP_ENV1(unpackhul)
1316IWMMXT_OP_ENV1(unpacklsb)
1317IWMMXT_OP_ENV1(unpacklsw)
1318IWMMXT_OP_ENV1(unpacklsl)
1319IWMMXT_OP_ENV1(unpackhsb)
1320IWMMXT_OP_ENV1(unpackhsw)
1321IWMMXT_OP_ENV1(unpackhsl)
1322
1323IWMMXT_OP_ENV_SIZE(cmpeq)
1324IWMMXT_OP_ENV_SIZE(cmpgtu)
1325IWMMXT_OP_ENV_SIZE(cmpgts)
1326
1327IWMMXT_OP_ENV_SIZE(mins)
1328IWMMXT_OP_ENV_SIZE(minu)
1329IWMMXT_OP_ENV_SIZE(maxs)
1330IWMMXT_OP_ENV_SIZE(maxu)
1331
1332IWMMXT_OP_ENV_SIZE(subn)
1333IWMMXT_OP_ENV_SIZE(addn)
1334IWMMXT_OP_ENV_SIZE(subu)
1335IWMMXT_OP_ENV_SIZE(addu)
1336IWMMXT_OP_ENV_SIZE(subs)
1337IWMMXT_OP_ENV_SIZE(adds)
1338
1339IWMMXT_OP_ENV(avgb0)
1340IWMMXT_OP_ENV(avgb1)
1341IWMMXT_OP_ENV(avgw0)
1342IWMMXT_OP_ENV(avgw1)
1343
1344IWMMXT_OP(msadb)
1345
1346IWMMXT_OP_ENV(packuw)
1347IWMMXT_OP_ENV(packul)
1348IWMMXT_OP_ENV(packuq)
1349IWMMXT_OP_ENV(packsw)
1350IWMMXT_OP_ENV(packsl)
1351IWMMXT_OP_ENV(packsq)
1352
1353static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1354{
1355 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1356}
1357
1358static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1359{
1360 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1361}
1362
1363static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1364{
1365 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1366}
1367
1368static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1369{
1370 iwmmxt_load_reg(cpu_V1, rn);
1371 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1372}
1373
1374static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1375{
1376 TCGv tmp = tcg_const_i32(shift);
1377 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1378}
1379
1380static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1381{
1382 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1383 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1384 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1385}
1386
1387static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1388{
1389 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1390 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1391 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1392}
1393
1394static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1395{
1396 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1397 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1398 if (mask != ~0u)
1399 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1400}
1401
1402static void gen_op_iwmmxt_set_mup(void)
1403{
1404 TCGv tmp;
1405 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1406 tcg_gen_ori_i32(tmp, tmp, 2);
1407 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1408}
1409
1410static void gen_op_iwmmxt_set_cup(void)
1411{
1412 TCGv tmp;
1413 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1414 tcg_gen_ori_i32(tmp, tmp, 1);
1415 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1416}
1417
1418static void gen_op_iwmmxt_setpsr_nz(void)
1419{
1420 TCGv tmp = new_tmp();
1421 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1422 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1423}
1424
1425static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1426{
1427 iwmmxt_load_reg(cpu_V1, rn);
86831435 1428 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1429 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1430}
1431
1432
1433static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1434{
1435 iwmmxt_load_reg(cpu_V0, rn);
1436 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1437 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1438 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1439}
1440
1441static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1442{
36aa55dc 1443 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1444 iwmmxt_store_reg(cpu_V0, rn);
1445}
1446
18c9b560
AZ
1447static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1448{
1449 int rd;
1450 uint32_t offset;
1451
1452 rd = (insn >> 16) & 0xf;
1453 gen_movl_T1_reg(s, rd);
1454
1455 offset = (insn & 0xff) << ((insn >> 7) & 2);
1456 if (insn & (1 << 24)) {
1457 /* Pre indexed */
1458 if (insn & (1 << 23))
1459 gen_op_addl_T1_im(offset);
1460 else
1461 gen_op_addl_T1_im(-offset);
1462
1463 if (insn & (1 << 21))
1464 gen_movl_reg_T1(s, rd);
1465 } else if (insn & (1 << 21)) {
1466 /* Post indexed */
1467 if (insn & (1 << 23))
1468 gen_op_movl_T0_im(offset);
1469 else
1470 gen_op_movl_T0_im(- offset);
1471 gen_op_addl_T0_T1();
1472 gen_movl_reg_T0(s, rd);
1473 } else if (!(insn & (1 << 23)))
1474 return 1;
1475 return 0;
1476}
1477
1478static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1479{
1480 int rd = (insn >> 0) & 0xf;
1481
1482 if (insn & (1 << 8))
1483 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1484 return 1;
1485 else
1486 gen_op_iwmmxt_movl_T0_wCx(rd);
1487 else
e677137d 1488 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1489
1490 gen_op_movl_T1_im(mask);
1491 gen_op_andl_T0_T1();
1492 return 0;
1493}
1494
1495/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1496 (ie. an undefined instruction). */
1497static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1498{
1499 int rd, wrd;
1500 int rdhi, rdlo, rd0, rd1, i;
b0109805 1501 TCGv tmp;
18c9b560
AZ
1502
1503 if ((insn & 0x0e000e00) == 0x0c000000) {
1504 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1505 wrd = insn & 0xf;
1506 rdlo = (insn >> 12) & 0xf;
1507 rdhi = (insn >> 16) & 0xf;
1508 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1509 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1510 gen_movl_reg_T0(s, rdlo);
1511 gen_movl_reg_T1(s, rdhi);
1512 } else { /* TMCRR */
1513 gen_movl_T0_reg(s, rdlo);
1514 gen_movl_T1_reg(s, rdhi);
e677137d 1515 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1516 gen_op_iwmmxt_set_mup();
1517 }
1518 return 0;
1519 }
1520
1521 wrd = (insn >> 12) & 0xf;
1522 if (gen_iwmmxt_address(s, insn))
1523 return 1;
1524 if (insn & ARM_CP_RW_BIT) {
1525 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1526 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1527 tcg_gen_mov_i32(cpu_T[0], tmp);
1528 dead_tmp(tmp);
18c9b560
AZ
1529 gen_op_iwmmxt_movl_wCx_T0(wrd);
1530 } else {
e677137d
PB
1531 i = 1;
1532 if (insn & (1 << 8)) {
1533 if (insn & (1 << 22)) { /* WLDRD */
1534 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1535 i = 0;
1536 } else { /* WLDRW wRd */
1537 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1538 }
1539 } else {
1540 if (insn & (1 << 22)) { /* WLDRH */
1541 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1542 } else { /* WLDRB */
1543 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1544 }
1545 }
1546 if (i) {
1547 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1548 dead_tmp(tmp);
1549 }
18c9b560
AZ
1550 gen_op_iwmmxt_movq_wRn_M0(wrd);
1551 }
1552 } else {
1553 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1554 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1555 tmp = new_tmp();
1556 tcg_gen_mov_i32(tmp, cpu_T[0]);
1557 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1558 } else {
1559 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1560 tmp = new_tmp();
1561 if (insn & (1 << 8)) {
1562 if (insn & (1 << 22)) { /* WSTRD */
1563 dead_tmp(tmp);
1564 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1565 } else { /* WSTRW wRd */
1566 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1567 gen_st32(tmp, cpu_T[1], IS_USER(s));
1568 }
1569 } else {
1570 if (insn & (1 << 22)) { /* WSTRH */
1571 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1572 gen_st16(tmp, cpu_T[1], IS_USER(s));
1573 } else { /* WSTRB */
1574 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1575 gen_st8(tmp, cpu_T[1], IS_USER(s));
1576 }
1577 }
18c9b560
AZ
1578 }
1579 }
1580 return 0;
1581 }
1582
1583 if ((insn & 0x0f000000) != 0x0e000000)
1584 return 1;
1585
1586 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1587 case 0x000: /* WOR */
1588 wrd = (insn >> 12) & 0xf;
1589 rd0 = (insn >> 0) & 0xf;
1590 rd1 = (insn >> 16) & 0xf;
1591 gen_op_iwmmxt_movq_M0_wRn(rd0);
1592 gen_op_iwmmxt_orq_M0_wRn(rd1);
1593 gen_op_iwmmxt_setpsr_nz();
1594 gen_op_iwmmxt_movq_wRn_M0(wrd);
1595 gen_op_iwmmxt_set_mup();
1596 gen_op_iwmmxt_set_cup();
1597 break;
1598 case 0x011: /* TMCR */
1599 if (insn & 0xf)
1600 return 1;
1601 rd = (insn >> 12) & 0xf;
1602 wrd = (insn >> 16) & 0xf;
1603 switch (wrd) {
1604 case ARM_IWMMXT_wCID:
1605 case ARM_IWMMXT_wCASF:
1606 break;
1607 case ARM_IWMMXT_wCon:
1608 gen_op_iwmmxt_set_cup();
1609 /* Fall through. */
1610 case ARM_IWMMXT_wCSSF:
1611 gen_op_iwmmxt_movl_T0_wCx(wrd);
1612 gen_movl_T1_reg(s, rd);
1613 gen_op_bicl_T0_T1();
1614 gen_op_iwmmxt_movl_wCx_T0(wrd);
1615 break;
1616 case ARM_IWMMXT_wCGR0:
1617 case ARM_IWMMXT_wCGR1:
1618 case ARM_IWMMXT_wCGR2:
1619 case ARM_IWMMXT_wCGR3:
1620 gen_op_iwmmxt_set_cup();
1621 gen_movl_reg_T0(s, rd);
1622 gen_op_iwmmxt_movl_wCx_T0(wrd);
1623 break;
1624 default:
1625 return 1;
1626 }
1627 break;
1628 case 0x100: /* WXOR */
1629 wrd = (insn >> 12) & 0xf;
1630 rd0 = (insn >> 0) & 0xf;
1631 rd1 = (insn >> 16) & 0xf;
1632 gen_op_iwmmxt_movq_M0_wRn(rd0);
1633 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1634 gen_op_iwmmxt_setpsr_nz();
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x111: /* TMRC */
1640 if (insn & 0xf)
1641 return 1;
1642 rd = (insn >> 12) & 0xf;
1643 wrd = (insn >> 16) & 0xf;
1644 gen_op_iwmmxt_movl_T0_wCx(wrd);
1645 gen_movl_reg_T0(s, rd);
1646 break;
1647 case 0x300: /* WANDN */
1648 wrd = (insn >> 12) & 0xf;
1649 rd0 = (insn >> 0) & 0xf;
1650 rd1 = (insn >> 16) & 0xf;
1651 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1652 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1653 gen_op_iwmmxt_andq_M0_wRn(rd1);
1654 gen_op_iwmmxt_setpsr_nz();
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x200: /* WAND */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 0) & 0xf;
1662 rd1 = (insn >> 16) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 gen_op_iwmmxt_andq_M0_wRn(rd1);
1665 gen_op_iwmmxt_setpsr_nz();
1666 gen_op_iwmmxt_movq_wRn_M0(wrd);
1667 gen_op_iwmmxt_set_mup();
1668 gen_op_iwmmxt_set_cup();
1669 break;
1670 case 0x810: case 0xa10: /* WMADD */
1671 wrd = (insn >> 12) & 0xf;
1672 rd0 = (insn >> 0) & 0xf;
1673 rd1 = (insn >> 16) & 0xf;
1674 gen_op_iwmmxt_movq_M0_wRn(rd0);
1675 if (insn & (1 << 21))
1676 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1677 else
1678 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1679 gen_op_iwmmxt_movq_wRn_M0(wrd);
1680 gen_op_iwmmxt_set_mup();
1681 break;
1682 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1683 wrd = (insn >> 12) & 0xf;
1684 rd0 = (insn >> 16) & 0xf;
1685 rd1 = (insn >> 0) & 0xf;
1686 gen_op_iwmmxt_movq_M0_wRn(rd0);
1687 switch ((insn >> 22) & 3) {
1688 case 0:
1689 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1690 break;
1691 case 1:
1692 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1693 break;
1694 case 2:
1695 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1696 break;
1697 case 3:
1698 return 1;
1699 }
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 gen_op_iwmmxt_set_cup();
1703 break;
1704 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1705 wrd = (insn >> 12) & 0xf;
1706 rd0 = (insn >> 16) & 0xf;
1707 rd1 = (insn >> 0) & 0xf;
1708 gen_op_iwmmxt_movq_M0_wRn(rd0);
1709 switch ((insn >> 22) & 3) {
1710 case 0:
1711 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1712 break;
1713 case 1:
1714 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1715 break;
1716 case 2:
1717 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1718 break;
1719 case 3:
1720 return 1;
1721 }
1722 gen_op_iwmmxt_movq_wRn_M0(wrd);
1723 gen_op_iwmmxt_set_mup();
1724 gen_op_iwmmxt_set_cup();
1725 break;
1726 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1727 wrd = (insn >> 12) & 0xf;
1728 rd0 = (insn >> 16) & 0xf;
1729 rd1 = (insn >> 0) & 0xf;
1730 gen_op_iwmmxt_movq_M0_wRn(rd0);
1731 if (insn & (1 << 22))
1732 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1733 else
1734 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1735 if (!(insn & (1 << 20)))
1736 gen_op_iwmmxt_addl_M0_wRn(wrd);
1737 gen_op_iwmmxt_movq_wRn_M0(wrd);
1738 gen_op_iwmmxt_set_mup();
1739 break;
1740 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1741 wrd = (insn >> 12) & 0xf;
1742 rd0 = (insn >> 16) & 0xf;
1743 rd1 = (insn >> 0) & 0xf;
1744 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1745 if (insn & (1 << 21)) {
1746 if (insn & (1 << 20))
1747 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1748 else
1749 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1750 } else {
1751 if (insn & (1 << 20))
1752 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1753 else
1754 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1755 }
18c9b560
AZ
1756 gen_op_iwmmxt_movq_wRn_M0(wrd);
1757 gen_op_iwmmxt_set_mup();
1758 break;
1759 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1760 wrd = (insn >> 12) & 0xf;
1761 rd0 = (insn >> 16) & 0xf;
1762 rd1 = (insn >> 0) & 0xf;
1763 gen_op_iwmmxt_movq_M0_wRn(rd0);
1764 if (insn & (1 << 21))
1765 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1766 else
1767 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1768 if (!(insn & (1 << 20))) {
e677137d
PB
1769 iwmmxt_load_reg(cpu_V1, wrd);
1770 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1771 }
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 break;
1775 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1776 wrd = (insn >> 12) & 0xf;
1777 rd0 = (insn >> 16) & 0xf;
1778 rd1 = (insn >> 0) & 0xf;
1779 gen_op_iwmmxt_movq_M0_wRn(rd0);
1780 switch ((insn >> 22) & 3) {
1781 case 0:
1782 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1783 break;
1784 case 1:
1785 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1786 break;
1787 case 2:
1788 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1789 break;
1790 case 3:
1791 return 1;
1792 }
1793 gen_op_iwmmxt_movq_wRn_M0(wrd);
1794 gen_op_iwmmxt_set_mup();
1795 gen_op_iwmmxt_set_cup();
1796 break;
1797 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1798 wrd = (insn >> 12) & 0xf;
1799 rd0 = (insn >> 16) & 0xf;
1800 rd1 = (insn >> 0) & 0xf;
1801 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1802 if (insn & (1 << 22)) {
1803 if (insn & (1 << 20))
1804 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1805 else
1806 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1807 } else {
1808 if (insn & (1 << 20))
1809 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1810 else
1811 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1812 }
18c9b560
AZ
1813 gen_op_iwmmxt_movq_wRn_M0(wrd);
1814 gen_op_iwmmxt_set_mup();
1815 gen_op_iwmmxt_set_cup();
1816 break;
1817 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1818 wrd = (insn >> 12) & 0xf;
1819 rd0 = (insn >> 16) & 0xf;
1820 rd1 = (insn >> 0) & 0xf;
1821 gen_op_iwmmxt_movq_M0_wRn(rd0);
1822 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1823 gen_op_movl_T1_im(7);
1824 gen_op_andl_T0_T1();
1825 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1826 gen_op_iwmmxt_movq_wRn_M0(wrd);
1827 gen_op_iwmmxt_set_mup();
1828 break;
1829 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1830 rd = (insn >> 12) & 0xf;
1831 wrd = (insn >> 16) & 0xf;
1832 gen_movl_T0_reg(s, rd);
1833 gen_op_iwmmxt_movq_M0_wRn(wrd);
1834 switch ((insn >> 6) & 3) {
1835 case 0:
1836 gen_op_movl_T1_im(0xff);
1837 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1838 break;
1839 case 1:
1840 gen_op_movl_T1_im(0xffff);
1841 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1842 break;
1843 case 2:
1844 gen_op_movl_T1_im(0xffffffff);
1845 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1846 break;
1847 case 3:
1848 return 1;
1849 }
1850 gen_op_iwmmxt_movq_wRn_M0(wrd);
1851 gen_op_iwmmxt_set_mup();
1852 break;
1853 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1854 rd = (insn >> 12) & 0xf;
1855 wrd = (insn >> 16) & 0xf;
1856 if (rd == 15)
1857 return 1;
1858 gen_op_iwmmxt_movq_M0_wRn(wrd);
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 if (insn & 8)
1862 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1863 else {
e677137d 1864 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1865 }
1866 break;
1867 case 1:
1868 if (insn & 8)
1869 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1870 else {
e677137d 1871 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1872 }
1873 break;
1874 case 2:
e677137d 1875 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1876 break;
1877 case 3:
1878 return 1;
1879 }
b26eefb6 1880 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1881 break;
1882 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1883 if ((insn & 0x000ff008) != 0x0003f000)
1884 return 1;
1885 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1886 switch ((insn >> 22) & 3) {
1887 case 0:
1888 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1889 break;
1890 case 1:
1891 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1892 break;
1893 case 2:
1894 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1895 break;
1896 case 3:
1897 return 1;
1898 }
1899 gen_op_shll_T1_im(28);
d9ba4830 1900 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1901 break;
1902 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1903 rd = (insn >> 12) & 0xf;
1904 wrd = (insn >> 16) & 0xf;
1905 gen_movl_T0_reg(s, rd);
1906 switch ((insn >> 6) & 3) {
1907 case 0:
e677137d 1908 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1909 break;
1910 case 1:
e677137d 1911 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1912 break;
1913 case 2:
e677137d 1914 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1915 break;
1916 case 3:
1917 return 1;
1918 }
1919 gen_op_iwmmxt_movq_wRn_M0(wrd);
1920 gen_op_iwmmxt_set_mup();
1921 break;
1922 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1923 if ((insn & 0x000ff00f) != 0x0003f000)
1924 return 1;
1925 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1926 switch ((insn >> 22) & 3) {
1927 case 0:
1928 for (i = 0; i < 7; i ++) {
1929 gen_op_shll_T1_im(4);
1930 gen_op_andl_T0_T1();
1931 }
1932 break;
1933 case 1:
1934 for (i = 0; i < 3; i ++) {
1935 gen_op_shll_T1_im(8);
1936 gen_op_andl_T0_T1();
1937 }
1938 break;
1939 case 2:
1940 gen_op_shll_T1_im(16);
1941 gen_op_andl_T0_T1();
1942 break;
1943 case 3:
1944 return 1;
1945 }
d9ba4830 1946 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1947 break;
1948 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1949 wrd = (insn >> 12) & 0xf;
1950 rd0 = (insn >> 16) & 0xf;
1951 gen_op_iwmmxt_movq_M0_wRn(rd0);
1952 switch ((insn >> 22) & 3) {
1953 case 0:
e677137d 1954 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1955 break;
1956 case 1:
e677137d 1957 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1958 break;
1959 case 2:
e677137d 1960 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1961 break;
1962 case 3:
1963 return 1;
1964 }
1965 gen_op_iwmmxt_movq_wRn_M0(wrd);
1966 gen_op_iwmmxt_set_mup();
1967 break;
1968 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1969 if ((insn & 0x000ff00f) != 0x0003f000)
1970 return 1;
1971 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1972 switch ((insn >> 22) & 3) {
1973 case 0:
1974 for (i = 0; i < 7; i ++) {
1975 gen_op_shll_T1_im(4);
1976 gen_op_orl_T0_T1();
1977 }
1978 break;
1979 case 1:
1980 for (i = 0; i < 3; i ++) {
1981 gen_op_shll_T1_im(8);
1982 gen_op_orl_T0_T1();
1983 }
1984 break;
1985 case 2:
1986 gen_op_shll_T1_im(16);
1987 gen_op_orl_T0_T1();
1988 break;
1989 case 3:
1990 return 1;
1991 }
d9ba4830 1992 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1993 break;
1994 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1995 rd = (insn >> 12) & 0xf;
1996 rd0 = (insn >> 16) & 0xf;
1997 if ((insn & 0xf) != 0)
1998 return 1;
1999 gen_op_iwmmxt_movq_M0_wRn(rd0);
2000 switch ((insn >> 22) & 3) {
2001 case 0:
e677137d 2002 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
2003 break;
2004 case 1:
e677137d 2005 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
2006 break;
2007 case 2:
e677137d 2008 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
2009 break;
2010 case 3:
2011 return 1;
2012 }
2013 gen_movl_reg_T0(s, rd);
2014 break;
2015 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2016 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2017 wrd = (insn >> 12) & 0xf;
2018 rd0 = (insn >> 16) & 0xf;
2019 rd1 = (insn >> 0) & 0xf;
2020 gen_op_iwmmxt_movq_M0_wRn(rd0);
2021 switch ((insn >> 22) & 3) {
2022 case 0:
2023 if (insn & (1 << 21))
2024 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2025 else
2026 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2027 break;
2028 case 1:
2029 if (insn & (1 << 21))
2030 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2031 else
2032 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2033 break;
2034 case 2:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2039 break;
2040 case 3:
2041 return 1;
2042 }
2043 gen_op_iwmmxt_movq_wRn_M0(wrd);
2044 gen_op_iwmmxt_set_mup();
2045 gen_op_iwmmxt_set_cup();
2046 break;
2047 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2048 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2049 wrd = (insn >> 12) & 0xf;
2050 rd0 = (insn >> 16) & 0xf;
2051 gen_op_iwmmxt_movq_M0_wRn(rd0);
2052 switch ((insn >> 22) & 3) {
2053 case 0:
2054 if (insn & (1 << 21))
2055 gen_op_iwmmxt_unpacklsb_M0();
2056 else
2057 gen_op_iwmmxt_unpacklub_M0();
2058 break;
2059 case 1:
2060 if (insn & (1 << 21))
2061 gen_op_iwmmxt_unpacklsw_M0();
2062 else
2063 gen_op_iwmmxt_unpackluw_M0();
2064 break;
2065 case 2:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpacklsl_M0();
2068 else
2069 gen_op_iwmmxt_unpacklul_M0();
2070 break;
2071 case 3:
2072 return 1;
2073 }
2074 gen_op_iwmmxt_movq_wRn_M0(wrd);
2075 gen_op_iwmmxt_set_mup();
2076 gen_op_iwmmxt_set_cup();
2077 break;
2078 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2079 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2080 wrd = (insn >> 12) & 0xf;
2081 rd0 = (insn >> 16) & 0xf;
2082 gen_op_iwmmxt_movq_M0_wRn(rd0);
2083 switch ((insn >> 22) & 3) {
2084 case 0:
2085 if (insn & (1 << 21))
2086 gen_op_iwmmxt_unpackhsb_M0();
2087 else
2088 gen_op_iwmmxt_unpackhub_M0();
2089 break;
2090 case 1:
2091 if (insn & (1 << 21))
2092 gen_op_iwmmxt_unpackhsw_M0();
2093 else
2094 gen_op_iwmmxt_unpackhuw_M0();
2095 break;
2096 case 2:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_unpackhsl_M0();
2099 else
2100 gen_op_iwmmxt_unpackhul_M0();
2101 break;
2102 case 3:
2103 return 1;
2104 }
2105 gen_op_iwmmxt_movq_wRn_M0(wrd);
2106 gen_op_iwmmxt_set_mup();
2107 gen_op_iwmmxt_set_cup();
2108 break;
2109 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2110 case 0x214: case 0x614: case 0xa14: case 0xe14:
2111 wrd = (insn >> 12) & 0xf;
2112 rd0 = (insn >> 16) & 0xf;
2113 gen_op_iwmmxt_movq_M0_wRn(rd0);
2114 if (gen_iwmmxt_shift(insn, 0xff))
2115 return 1;
2116 switch ((insn >> 22) & 3) {
2117 case 0:
2118 return 1;
2119 case 1:
e677137d 2120 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2121 break;
2122 case 2:
e677137d 2123 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2124 break;
2125 case 3:
e677137d 2126 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2127 break;
2128 }
2129 gen_op_iwmmxt_movq_wRn_M0(wrd);
2130 gen_op_iwmmxt_set_mup();
2131 gen_op_iwmmxt_set_cup();
2132 break;
2133 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2134 case 0x014: case 0x414: case 0x814: case 0xc14:
2135 wrd = (insn >> 12) & 0xf;
2136 rd0 = (insn >> 16) & 0xf;
2137 gen_op_iwmmxt_movq_M0_wRn(rd0);
2138 if (gen_iwmmxt_shift(insn, 0xff))
2139 return 1;
2140 switch ((insn >> 22) & 3) {
2141 case 0:
2142 return 1;
2143 case 1:
e677137d 2144 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2145 break;
2146 case 2:
e677137d 2147 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2148 break;
2149 case 3:
e677137d 2150 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2151 break;
2152 }
2153 gen_op_iwmmxt_movq_wRn_M0(wrd);
2154 gen_op_iwmmxt_set_mup();
2155 gen_op_iwmmxt_set_cup();
2156 break;
2157 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2158 case 0x114: case 0x514: case 0x914: case 0xd14:
2159 wrd = (insn >> 12) & 0xf;
2160 rd0 = (insn >> 16) & 0xf;
2161 gen_op_iwmmxt_movq_M0_wRn(rd0);
2162 if (gen_iwmmxt_shift(insn, 0xff))
2163 return 1;
2164 switch ((insn >> 22) & 3) {
2165 case 0:
2166 return 1;
2167 case 1:
e677137d 2168 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2169 break;
2170 case 2:
e677137d 2171 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2172 break;
2173 case 3:
e677137d 2174 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2175 break;
2176 }
2177 gen_op_iwmmxt_movq_wRn_M0(wrd);
2178 gen_op_iwmmxt_set_mup();
2179 gen_op_iwmmxt_set_cup();
2180 break;
2181 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2182 case 0x314: case 0x714: case 0xb14: case 0xf14:
2183 wrd = (insn >> 12) & 0xf;
2184 rd0 = (insn >> 16) & 0xf;
2185 gen_op_iwmmxt_movq_M0_wRn(rd0);
2186 switch ((insn >> 22) & 3) {
2187 case 0:
2188 return 1;
2189 case 1:
2190 if (gen_iwmmxt_shift(insn, 0xf))
2191 return 1;
e677137d 2192 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2193 break;
2194 case 2:
2195 if (gen_iwmmxt_shift(insn, 0x1f))
2196 return 1;
e677137d 2197 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2198 break;
2199 case 3:
2200 if (gen_iwmmxt_shift(insn, 0x3f))
2201 return 1;
e677137d 2202 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2203 break;
2204 }
2205 gen_op_iwmmxt_movq_wRn_M0(wrd);
2206 gen_op_iwmmxt_set_mup();
2207 gen_op_iwmmxt_set_cup();
2208 break;
2209 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2210 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2211 wrd = (insn >> 12) & 0xf;
2212 rd0 = (insn >> 16) & 0xf;
2213 rd1 = (insn >> 0) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 22) & 3) {
2216 case 0:
2217 if (insn & (1 << 21))
2218 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2219 else
2220 gen_op_iwmmxt_minub_M0_wRn(rd1);
2221 break;
2222 case 1:
2223 if (insn & (1 << 21))
2224 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2225 else
2226 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2227 break;
2228 case 2:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_minul_M0_wRn(rd1);
2233 break;
2234 case 3:
2235 return 1;
2236 }
2237 gen_op_iwmmxt_movq_wRn_M0(wrd);
2238 gen_op_iwmmxt_set_mup();
2239 break;
2240 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2241 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2242 wrd = (insn >> 12) & 0xf;
2243 rd0 = (insn >> 16) & 0xf;
2244 rd1 = (insn >> 0) & 0xf;
2245 gen_op_iwmmxt_movq_M0_wRn(rd0);
2246 switch ((insn >> 22) & 3) {
2247 case 0:
2248 if (insn & (1 << 21))
2249 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2250 else
2251 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2252 break;
2253 case 1:
2254 if (insn & (1 << 21))
2255 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2256 else
2257 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2258 break;
2259 case 2:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2264 break;
2265 case 3:
2266 return 1;
2267 }
2268 gen_op_iwmmxt_movq_wRn_M0(wrd);
2269 gen_op_iwmmxt_set_mup();
2270 break;
2271 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2272 case 0x402: case 0x502: case 0x602: case 0x702:
2273 wrd = (insn >> 12) & 0xf;
2274 rd0 = (insn >> 16) & 0xf;
2275 rd1 = (insn >> 0) & 0xf;
2276 gen_op_iwmmxt_movq_M0_wRn(rd0);
2277 gen_op_movl_T0_im((insn >> 20) & 3);
2278 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2279 gen_op_iwmmxt_movq_wRn_M0(wrd);
2280 gen_op_iwmmxt_set_mup();
2281 break;
2282 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2283 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2284 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2285 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2286 wrd = (insn >> 12) & 0xf;
2287 rd0 = (insn >> 16) & 0xf;
2288 rd1 = (insn >> 0) & 0xf;
2289 gen_op_iwmmxt_movq_M0_wRn(rd0);
2290 switch ((insn >> 20) & 0xf) {
2291 case 0x0:
2292 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2293 break;
2294 case 0x1:
2295 gen_op_iwmmxt_subub_M0_wRn(rd1);
2296 break;
2297 case 0x3:
2298 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2299 break;
2300 case 0x4:
2301 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2302 break;
2303 case 0x5:
2304 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2305 break;
2306 case 0x7:
2307 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2308 break;
2309 case 0x8:
2310 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2311 break;
2312 case 0x9:
2313 gen_op_iwmmxt_subul_M0_wRn(rd1);
2314 break;
2315 case 0xb:
2316 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2317 break;
2318 default:
2319 return 1;
2320 }
2321 gen_op_iwmmxt_movq_wRn_M0(wrd);
2322 gen_op_iwmmxt_set_mup();
2323 gen_op_iwmmxt_set_cup();
2324 break;
2325 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2326 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2327 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2328 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2329 wrd = (insn >> 12) & 0xf;
2330 rd0 = (insn >> 16) & 0xf;
2331 gen_op_iwmmxt_movq_M0_wRn(rd0);
2332 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2333 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2334 gen_op_iwmmxt_movq_wRn_M0(wrd);
2335 gen_op_iwmmxt_set_mup();
2336 gen_op_iwmmxt_set_cup();
2337 break;
2338 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2339 case 0x418: case 0x518: case 0x618: case 0x718:
2340 case 0x818: case 0x918: case 0xa18: case 0xb18:
2341 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2342 wrd = (insn >> 12) & 0xf;
2343 rd0 = (insn >> 16) & 0xf;
2344 rd1 = (insn >> 0) & 0xf;
2345 gen_op_iwmmxt_movq_M0_wRn(rd0);
2346 switch ((insn >> 20) & 0xf) {
2347 case 0x0:
2348 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2349 break;
2350 case 0x1:
2351 gen_op_iwmmxt_addub_M0_wRn(rd1);
2352 break;
2353 case 0x3:
2354 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2355 break;
2356 case 0x4:
2357 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2358 break;
2359 case 0x5:
2360 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2361 break;
2362 case 0x7:
2363 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2364 break;
2365 case 0x8:
2366 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2367 break;
2368 case 0x9:
2369 gen_op_iwmmxt_addul_M0_wRn(rd1);
2370 break;
2371 case 0xb:
2372 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2373 break;
2374 default:
2375 return 1;
2376 }
2377 gen_op_iwmmxt_movq_wRn_M0(wrd);
2378 gen_op_iwmmxt_set_mup();
2379 gen_op_iwmmxt_set_cup();
2380 break;
2381 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2382 case 0x408: case 0x508: case 0x608: case 0x708:
2383 case 0x808: case 0x908: case 0xa08: case 0xb08:
2384 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2385 wrd = (insn >> 12) & 0xf;
2386 rd0 = (insn >> 16) & 0xf;
2387 rd1 = (insn >> 0) & 0xf;
2388 gen_op_iwmmxt_movq_M0_wRn(rd0);
2389 if (!(insn & (1 << 20)))
2390 return 1;
2391 switch ((insn >> 22) & 3) {
2392 case 0:
2393 return 1;
2394 case 1:
2395 if (insn & (1 << 21))
2396 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2397 else
2398 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2399 break;
2400 case 2:
2401 if (insn & (1 << 21))
2402 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2403 else
2404 gen_op_iwmmxt_packul_M0_wRn(rd1);
2405 break;
2406 case 3:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2409 else
2410 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2411 break;
2412 }
2413 gen_op_iwmmxt_movq_wRn_M0(wrd);
2414 gen_op_iwmmxt_set_mup();
2415 gen_op_iwmmxt_set_cup();
2416 break;
2417 case 0x201: case 0x203: case 0x205: case 0x207:
2418 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2419 case 0x211: case 0x213: case 0x215: case 0x217:
2420 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2421 wrd = (insn >> 5) & 0xf;
2422 rd0 = (insn >> 12) & 0xf;
2423 rd1 = (insn >> 0) & 0xf;
2424 if (rd0 == 0xf || rd1 == 0xf)
2425 return 1;
2426 gen_op_iwmmxt_movq_M0_wRn(wrd);
2427 switch ((insn >> 16) & 0xf) {
2428 case 0x0: /* TMIA */
b26eefb6
PB
2429 gen_movl_T0_reg(s, rd0);
2430 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2431 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2432 break;
2433 case 0x8: /* TMIAPH */
b26eefb6
PB
2434 gen_movl_T0_reg(s, rd0);
2435 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2436 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2437 break;
2438 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2439 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2440 if (insn & (1 << 16))
2441 gen_op_shrl_T1_im(16);
2442 gen_op_movl_T0_T1();
b26eefb6 2443 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2444 if (insn & (1 << 17))
2445 gen_op_shrl_T1_im(16);
2446 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2447 break;
2448 default:
2449 return 1;
2450 }
2451 gen_op_iwmmxt_movq_wRn_M0(wrd);
2452 gen_op_iwmmxt_set_mup();
2453 break;
2454 default:
2455 return 1;
2456 }
2457
2458 return 0;
2459}
2460
2461/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2462 (ie. an undefined instruction). */
2463static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2464{
2465 int acc, rd0, rd1, rdhi, rdlo;
2466
2467 if ((insn & 0x0ff00f10) == 0x0e200010) {
2468 /* Multiply with Internal Accumulate Format */
2469 rd0 = (insn >> 12) & 0xf;
2470 rd1 = insn & 0xf;
2471 acc = (insn >> 5) & 7;
2472
2473 if (acc != 0)
2474 return 1;
2475
2476 switch ((insn >> 16) & 0xf) {
2477 case 0x0: /* MIA */
b26eefb6
PB
2478 gen_movl_T0_reg(s, rd0);
2479 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2480 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2481 break;
2482 case 0x8: /* MIAPH */
b26eefb6
PB
2483 gen_movl_T0_reg(s, rd0);
2484 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2485 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2486 break;
2487 case 0xc: /* MIABB */
2488 case 0xd: /* MIABT */
2489 case 0xe: /* MIATB */
2490 case 0xf: /* MIATT */
b26eefb6 2491 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2492 if (insn & (1 << 16))
2493 gen_op_shrl_T1_im(16);
2494 gen_op_movl_T0_T1();
b26eefb6 2495 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2496 if (insn & (1 << 17))
2497 gen_op_shrl_T1_im(16);
2498 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2499 break;
2500 default:
2501 return 1;
2502 }
2503
2504 gen_op_iwmmxt_movq_wRn_M0(acc);
2505 return 0;
2506 }
2507
2508 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2509 /* Internal Accumulator Access Format */
2510 rdhi = (insn >> 16) & 0xf;
2511 rdlo = (insn >> 12) & 0xf;
2512 acc = insn & 7;
2513
2514 if (acc != 0)
2515 return 1;
2516
2517 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2518 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2519 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2520 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2521 gen_op_andl_T0_T1();
b26eefb6 2522 gen_movl_reg_T0(s, rdhi);
18c9b560 2523 } else { /* MAR */
b26eefb6
PB
2524 gen_movl_T0_reg(s, rdlo);
2525 gen_movl_T1_reg(s, rdhi);
e677137d 2526 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2527 }
2528 return 0;
2529 }
2530
2531 return 1;
2532}
2533
c1713132
AZ
2534/* Disassemble system coprocessor instruction. Return nonzero if
2535 instruction is not defined. */
2536static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2537{
8984bd2e 2538 TCGv tmp;
c1713132
AZ
2539 uint32_t rd = (insn >> 12) & 0xf;
2540 uint32_t cp = (insn >> 8) & 0xf;
2541 if (IS_USER(s)) {
2542 return 1;
2543 }
2544
18c9b560 2545 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2546 if (!env->cp[cp].cp_read)
2547 return 1;
8984bd2e
PB
2548 gen_set_pc_im(s->pc);
2549 tmp = new_tmp();
2550 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2551 store_reg(s, rd, tmp);
c1713132
AZ
2552 } else {
2553 if (!env->cp[cp].cp_write)
2554 return 1;
8984bd2e
PB
2555 gen_set_pc_im(s->pc);
2556 tmp = load_reg(s, rd);
2557 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2558 dead_tmp(tmp);
c1713132
AZ
2559 }
2560 return 0;
2561}
2562
9ee6e8bb
PB
2563static int cp15_user_ok(uint32_t insn)
2564{
2565 int cpn = (insn >> 16) & 0xf;
2566 int cpm = insn & 0xf;
2567 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2568
2569 if (cpn == 13 && cpm == 0) {
2570 /* TLS register. */
2571 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2572 return 1;
2573 }
2574 if (cpn == 7) {
2575 /* ISB, DSB, DMB. */
2576 if ((cpm == 5 && op == 4)
2577 || (cpm == 10 && (op == 4 || op == 5)))
2578 return 1;
2579 }
2580 return 0;
2581}
2582
b5ff1b31
FB
2583/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2584 instruction is not defined. */
a90b7318 2585static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2586{
2587 uint32_t rd;
8984bd2e 2588 TCGv tmp;
b5ff1b31 2589
9ee6e8bb
PB
2590 /* M profile cores use memory mapped registers instead of cp15. */
2591 if (arm_feature(env, ARM_FEATURE_M))
2592 return 1;
2593
2594 if ((insn & (1 << 25)) == 0) {
2595 if (insn & (1 << 20)) {
2596 /* mrrc */
2597 return 1;
2598 }
2599 /* mcrr. Used for block cache operations, so implement as no-op. */
2600 return 0;
2601 }
2602 if ((insn & (1 << 4)) == 0) {
2603 /* cdp */
2604 return 1;
2605 }
2606 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2607 return 1;
2608 }
9332f9da
FB
2609 if ((insn & 0x0fff0fff) == 0x0e070f90
2610 || (insn & 0x0fff0fff) == 0x0e070f58) {
2611 /* Wait for interrupt. */
8984bd2e 2612 gen_set_pc_im(s->pc);
9ee6e8bb 2613 s->is_jmp = DISAS_WFI;
9332f9da
FB
2614 return 0;
2615 }
b5ff1b31 2616 rd = (insn >> 12) & 0xf;
18c9b560 2617 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2618 tmp = new_tmp();
2619 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2620 /* If the destination register is r15 then sets condition codes. */
2621 if (rd != 15)
8984bd2e
PB
2622 store_reg(s, rd, tmp);
2623 else
2624 dead_tmp(tmp);
b5ff1b31 2625 } else {
8984bd2e
PB
2626 tmp = load_reg(s, rd);
2627 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2628 dead_tmp(tmp);
a90b7318
AZ
2629 /* Normally we would always end the TB here, but Linux
2630 * arch/arm/mach-pxa/sleep.S expects two instructions following
2631 * an MMU enable to execute from cache. Imitate this behaviour. */
2632 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2633 (insn & 0x0fff0fff) != 0x0e010f10)
2634 gen_lookup_tb(s);
b5ff1b31 2635 }
b5ff1b31
FB
2636 return 0;
2637}
2638
9ee6e8bb
PB
2639#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2640#define VFP_SREG(insn, bigbit, smallbit) \
2641 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2642#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2643 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2644 reg = (((insn) >> (bigbit)) & 0x0f) \
2645 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2646 } else { \
2647 if (insn & (1 << (smallbit))) \
2648 return 1; \
2649 reg = ((insn) >> (bigbit)) & 0x0f; \
2650 }} while (0)
2651
2652#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2653#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2654#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2655#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2656#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2657#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2658
4373f3ce
PB
2659/* Move between integer and VFP cores. */
2660static TCGv gen_vfp_mrs(void)
2661{
2662 TCGv tmp = new_tmp();
2663 tcg_gen_mov_i32(tmp, cpu_F0s);
2664 return tmp;
2665}
2666
2667static void gen_vfp_msr(TCGv tmp)
2668{
2669 tcg_gen_mov_i32(cpu_F0s, tmp);
2670 dead_tmp(tmp);
2671}
2672
9ee6e8bb
PB
2673static inline int
2674vfp_enabled(CPUState * env)
2675{
2676 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2677}
2678
ad69471c
PB
2679static void gen_neon_dup_u8(TCGv var, int shift)
2680{
2681 TCGv tmp = new_tmp();
2682 if (shift)
2683 tcg_gen_shri_i32(var, var, shift);
86831435 2684 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2685 tcg_gen_shli_i32(tmp, var, 8);
2686 tcg_gen_or_i32(var, var, tmp);
2687 tcg_gen_shli_i32(tmp, var, 16);
2688 tcg_gen_or_i32(var, var, tmp);
2689 dead_tmp(tmp);
2690}
2691
2692static void gen_neon_dup_low16(TCGv var)
2693{
2694 TCGv tmp = new_tmp();
86831435 2695 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2696 tcg_gen_shli_i32(tmp, var, 16);
2697 tcg_gen_or_i32(var, var, tmp);
2698 dead_tmp(tmp);
2699}
2700
2701static void gen_neon_dup_high16(TCGv var)
2702{
2703 TCGv tmp = new_tmp();
2704 tcg_gen_andi_i32(var, var, 0xffff0000);
2705 tcg_gen_shri_i32(tmp, var, 16);
2706 tcg_gen_or_i32(var, var, tmp);
2707 dead_tmp(tmp);
2708}
2709
b7bcbe95
FB
2710/* Disassemble a VFP instruction. Returns nonzero if an error occured
2711 (ie. an undefined instruction). */
2712static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2713{
2714 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2715 int dp, veclen;
4373f3ce 2716 TCGv tmp;
ad69471c 2717 TCGv tmp2;
b7bcbe95 2718
40f137e1
PB
2719 if (!arm_feature(env, ARM_FEATURE_VFP))
2720 return 1;
2721
9ee6e8bb
PB
2722 if (!vfp_enabled(env)) {
2723 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2724 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2725 return 1;
2726 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2727 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2728 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2729 return 1;
2730 }
b7bcbe95
FB
2731 dp = ((insn & 0xf00) == 0xb00);
2732 switch ((insn >> 24) & 0xf) {
2733 case 0xe:
2734 if (insn & (1 << 4)) {
2735 /* single register transfer */
b7bcbe95
FB
2736 rd = (insn >> 12) & 0xf;
2737 if (dp) {
9ee6e8bb
PB
2738 int size;
2739 int pass;
2740
2741 VFP_DREG_N(rn, insn);
2742 if (insn & 0xf)
b7bcbe95 2743 return 1;
9ee6e8bb
PB
2744 if (insn & 0x00c00060
2745 && !arm_feature(env, ARM_FEATURE_NEON))
2746 return 1;
2747
2748 pass = (insn >> 21) & 1;
2749 if (insn & (1 << 22)) {
2750 size = 0;
2751 offset = ((insn >> 5) & 3) * 8;
2752 } else if (insn & (1 << 5)) {
2753 size = 1;
2754 offset = (insn & (1 << 6)) ? 16 : 0;
2755 } else {
2756 size = 2;
2757 offset = 0;
2758 }
18c9b560 2759 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2760 /* vfp->arm */
ad69471c 2761 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2762 switch (size) {
2763 case 0:
9ee6e8bb 2764 if (offset)
ad69471c 2765 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2766 if (insn & (1 << 23))
ad69471c 2767 gen_uxtb(tmp);
9ee6e8bb 2768 else
ad69471c 2769 gen_sxtb(tmp);
9ee6e8bb
PB
2770 break;
2771 case 1:
9ee6e8bb
PB
2772 if (insn & (1 << 23)) {
2773 if (offset) {
ad69471c 2774 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2775 } else {
ad69471c 2776 gen_uxth(tmp);
9ee6e8bb
PB
2777 }
2778 } else {
2779 if (offset) {
ad69471c 2780 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2781 } else {
ad69471c 2782 gen_sxth(tmp);
9ee6e8bb
PB
2783 }
2784 }
2785 break;
2786 case 2:
9ee6e8bb
PB
2787 break;
2788 }
ad69471c 2789 store_reg(s, rd, tmp);
b7bcbe95
FB
2790 } else {
2791 /* arm->vfp */
ad69471c 2792 tmp = load_reg(s, rd);
9ee6e8bb
PB
2793 if (insn & (1 << 23)) {
2794 /* VDUP */
2795 if (size == 0) {
ad69471c 2796 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2797 } else if (size == 1) {
ad69471c 2798 gen_neon_dup_low16(tmp);
9ee6e8bb 2799 }
cbbccffc
PB
2800 for (n = 0; n <= pass * 2; n++) {
2801 tmp2 = new_tmp();
2802 tcg_gen_mov_i32(tmp2, tmp);
2803 neon_store_reg(rn, n, tmp2);
2804 }
2805 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
2806 } else {
2807 /* VMOV */
2808 switch (size) {
2809 case 0:
ad69471c
PB
2810 tmp2 = neon_load_reg(rn, pass);
2811 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2812 dead_tmp(tmp2);
9ee6e8bb
PB
2813 break;
2814 case 1:
ad69471c
PB
2815 tmp2 = neon_load_reg(rn, pass);
2816 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2817 dead_tmp(tmp2);
9ee6e8bb
PB
2818 break;
2819 case 2:
9ee6e8bb
PB
2820 break;
2821 }
ad69471c 2822 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2823 }
b7bcbe95 2824 }
9ee6e8bb
PB
2825 } else { /* !dp */
2826 if ((insn & 0x6f) != 0x00)
2827 return 1;
2828 rn = VFP_SREG_N(insn);
18c9b560 2829 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2830 /* vfp->arm */
2831 if (insn & (1 << 21)) {
2832 /* system register */
40f137e1 2833 rn >>= 1;
9ee6e8bb 2834
b7bcbe95 2835 switch (rn) {
40f137e1 2836 case ARM_VFP_FPSID:
4373f3ce 2837 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2838 VFP3 restricts all id registers to privileged
2839 accesses. */
2840 if (IS_USER(s)
2841 && arm_feature(env, ARM_FEATURE_VFP3))
2842 return 1;
4373f3ce 2843 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2844 break;
40f137e1 2845 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2846 if (IS_USER(s))
2847 return 1;
4373f3ce 2848 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2849 break;
40f137e1
PB
2850 case ARM_VFP_FPINST:
2851 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2852 /* Not present in VFP3. */
2853 if (IS_USER(s)
2854 || arm_feature(env, ARM_FEATURE_VFP3))
2855 return 1;
4373f3ce 2856 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2857 break;
40f137e1 2858 case ARM_VFP_FPSCR:
601d70b9 2859 if (rd == 15) {
4373f3ce
PB
2860 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2861 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2862 } else {
2863 tmp = new_tmp();
2864 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2865 }
b7bcbe95 2866 break;
9ee6e8bb
PB
2867 case ARM_VFP_MVFR0:
2868 case ARM_VFP_MVFR1:
2869 if (IS_USER(s)
2870 || !arm_feature(env, ARM_FEATURE_VFP3))
2871 return 1;
4373f3ce 2872 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2873 break;
b7bcbe95
FB
2874 default:
2875 return 1;
2876 }
2877 } else {
2878 gen_mov_F0_vreg(0, rn);
4373f3ce 2879 tmp = gen_vfp_mrs();
b7bcbe95
FB
2880 }
2881 if (rd == 15) {
b5ff1b31 2882 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2883 gen_set_nzcv(tmp);
2884 dead_tmp(tmp);
2885 } else {
2886 store_reg(s, rd, tmp);
2887 }
b7bcbe95
FB
2888 } else {
2889 /* arm->vfp */
4373f3ce 2890 tmp = load_reg(s, rd);
b7bcbe95 2891 if (insn & (1 << 21)) {
40f137e1 2892 rn >>= 1;
b7bcbe95
FB
2893 /* system register */
2894 switch (rn) {
40f137e1 2895 case ARM_VFP_FPSID:
9ee6e8bb
PB
2896 case ARM_VFP_MVFR0:
2897 case ARM_VFP_MVFR1:
b7bcbe95
FB
2898 /* Writes are ignored. */
2899 break;
40f137e1 2900 case ARM_VFP_FPSCR:
4373f3ce
PB
2901 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2902 dead_tmp(tmp);
b5ff1b31 2903 gen_lookup_tb(s);
b7bcbe95 2904 break;
40f137e1 2905 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2906 if (IS_USER(s))
2907 return 1;
4373f3ce 2908 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2909 gen_lookup_tb(s);
2910 break;
2911 case ARM_VFP_FPINST:
2912 case ARM_VFP_FPINST2:
4373f3ce 2913 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2914 break;
b7bcbe95
FB
2915 default:
2916 return 1;
2917 }
2918 } else {
4373f3ce 2919 gen_vfp_msr(tmp);
b7bcbe95
FB
2920 gen_mov_vreg_F0(0, rn);
2921 }
2922 }
2923 }
2924 } else {
2925 /* data processing */
2926 /* The opcode is in bits 23, 21, 20 and 6. */
2927 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2928 if (dp) {
2929 if (op == 15) {
2930 /* rn is opcode */
2931 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2932 } else {
2933 /* rn is register number */
9ee6e8bb 2934 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2935 }
2936
2937 if (op == 15 && (rn == 15 || rn > 17)) {
2938 /* Integer or single precision destination. */
9ee6e8bb 2939 rd = VFP_SREG_D(insn);
b7bcbe95 2940 } else {
9ee6e8bb 2941 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2942 }
2943
2944 if (op == 15 && (rn == 16 || rn == 17)) {
2945 /* Integer source. */
2946 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2947 } else {
9ee6e8bb 2948 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2949 }
2950 } else {
9ee6e8bb 2951 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2952 if (op == 15 && rn == 15) {
2953 /* Double precision destination. */
9ee6e8bb
PB
2954 VFP_DREG_D(rd, insn);
2955 } else {
2956 rd = VFP_SREG_D(insn);
2957 }
2958 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2959 }
2960
2961 veclen = env->vfp.vec_len;
2962 if (op == 15 && rn > 3)
2963 veclen = 0;
2964
2965 /* Shut up compiler warnings. */
2966 delta_m = 0;
2967 delta_d = 0;
2968 bank_mask = 0;
3b46e624 2969
b7bcbe95
FB
2970 if (veclen > 0) {
2971 if (dp)
2972 bank_mask = 0xc;
2973 else
2974 bank_mask = 0x18;
2975
2976 /* Figure out what type of vector operation this is. */
2977 if ((rd & bank_mask) == 0) {
2978 /* scalar */
2979 veclen = 0;
2980 } else {
2981 if (dp)
2982 delta_d = (env->vfp.vec_stride >> 1) + 1;
2983 else
2984 delta_d = env->vfp.vec_stride + 1;
2985
2986 if ((rm & bank_mask) == 0) {
2987 /* mixed scalar/vector */
2988 delta_m = 0;
2989 } else {
2990 /* vector */
2991 delta_m = delta_d;
2992 }
2993 }
2994 }
2995
2996 /* Load the initial operands. */
2997 if (op == 15) {
2998 switch (rn) {
2999 case 16:
3000 case 17:
3001 /* Integer source */
3002 gen_mov_F0_vreg(0, rm);
3003 break;
3004 case 8:
3005 case 9:
3006 /* Compare */
3007 gen_mov_F0_vreg(dp, rd);
3008 gen_mov_F1_vreg(dp, rm);
3009 break;
3010 case 10:
3011 case 11:
3012 /* Compare with zero */
3013 gen_mov_F0_vreg(dp, rd);
3014 gen_vfp_F1_ld0(dp);
3015 break;
9ee6e8bb
PB
3016 case 20:
3017 case 21:
3018 case 22:
3019 case 23:
644ad806
PB
3020 case 28:
3021 case 29:
3022 case 30:
3023 case 31:
9ee6e8bb
PB
3024 /* Source and destination the same. */
3025 gen_mov_F0_vreg(dp, rd);
3026 break;
b7bcbe95
FB
3027 default:
3028 /* One source operand. */
3029 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3030 break;
b7bcbe95
FB
3031 }
3032 } else {
3033 /* Two source operands. */
3034 gen_mov_F0_vreg(dp, rn);
3035 gen_mov_F1_vreg(dp, rm);
3036 }
3037
3038 for (;;) {
3039 /* Perform the calculation. */
3040 switch (op) {
3041 case 0: /* mac: fd + (fn * fm) */
3042 gen_vfp_mul(dp);
3043 gen_mov_F1_vreg(dp, rd);
3044 gen_vfp_add(dp);
3045 break;
3046 case 1: /* nmac: fd - (fn * fm) */
3047 gen_vfp_mul(dp);
3048 gen_vfp_neg(dp);
3049 gen_mov_F1_vreg(dp, rd);
3050 gen_vfp_add(dp);
3051 break;
3052 case 2: /* msc: -fd + (fn * fm) */
3053 gen_vfp_mul(dp);
3054 gen_mov_F1_vreg(dp, rd);
3055 gen_vfp_sub(dp);
3056 break;
3057 case 3: /* nmsc: -fd - (fn * fm) */
3058 gen_vfp_mul(dp);
b7bcbe95 3059 gen_vfp_neg(dp);
c9fb531a
PB
3060 gen_mov_F1_vreg(dp, rd);
3061 gen_vfp_sub(dp);
b7bcbe95
FB
3062 break;
3063 case 4: /* mul: fn * fm */
3064 gen_vfp_mul(dp);
3065 break;
3066 case 5: /* nmul: -(fn * fm) */
3067 gen_vfp_mul(dp);
3068 gen_vfp_neg(dp);
3069 break;
3070 case 6: /* add: fn + fm */
3071 gen_vfp_add(dp);
3072 break;
3073 case 7: /* sub: fn - fm */
3074 gen_vfp_sub(dp);
3075 break;
3076 case 8: /* div: fn / fm */
3077 gen_vfp_div(dp);
3078 break;
9ee6e8bb
PB
3079 case 14: /* fconst */
3080 if (!arm_feature(env, ARM_FEATURE_VFP3))
3081 return 1;
3082
3083 n = (insn << 12) & 0x80000000;
3084 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3085 if (dp) {
3086 if (i & 0x40)
3087 i |= 0x3f80;
3088 else
3089 i |= 0x4000;
3090 n |= i << 16;
4373f3ce 3091 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3092 } else {
3093 if (i & 0x40)
3094 i |= 0x780;
3095 else
3096 i |= 0x800;
3097 n |= i << 19;
5b340b51 3098 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3099 }
9ee6e8bb 3100 break;
b7bcbe95
FB
3101 case 15: /* extension space */
3102 switch (rn) {
3103 case 0: /* cpy */
3104 /* no-op */
3105 break;
3106 case 1: /* abs */
3107 gen_vfp_abs(dp);
3108 break;
3109 case 2: /* neg */
3110 gen_vfp_neg(dp);
3111 break;
3112 case 3: /* sqrt */
3113 gen_vfp_sqrt(dp);
3114 break;
3115 case 8: /* cmp */
3116 gen_vfp_cmp(dp);
3117 break;
3118 case 9: /* cmpe */
3119 gen_vfp_cmpe(dp);
3120 break;
3121 case 10: /* cmpz */
3122 gen_vfp_cmp(dp);
3123 break;
3124 case 11: /* cmpez */
3125 gen_vfp_F1_ld0(dp);
3126 gen_vfp_cmpe(dp);
3127 break;
3128 case 15: /* single<->double conversion */
3129 if (dp)
4373f3ce 3130 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3131 else
4373f3ce 3132 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3133 break;
3134 case 16: /* fuito */
3135 gen_vfp_uito(dp);
3136 break;
3137 case 17: /* fsito */
3138 gen_vfp_sito(dp);
3139 break;
9ee6e8bb
PB
3140 case 20: /* fshto */
3141 if (!arm_feature(env, ARM_FEATURE_VFP3))
3142 return 1;
644ad806 3143 gen_vfp_shto(dp, 16 - rm);
9ee6e8bb
PB
3144 break;
3145 case 21: /* fslto */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
644ad806 3148 gen_vfp_slto(dp, 32 - rm);
9ee6e8bb
PB
3149 break;
3150 case 22: /* fuhto */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
644ad806 3153 gen_vfp_uhto(dp, 16 - rm);
9ee6e8bb
PB
3154 break;
3155 case 23: /* fulto */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
644ad806 3158 gen_vfp_ulto(dp, 32 - rm);
9ee6e8bb 3159 break;
b7bcbe95
FB
3160 case 24: /* ftoui */
3161 gen_vfp_toui(dp);
3162 break;
3163 case 25: /* ftouiz */
3164 gen_vfp_touiz(dp);
3165 break;
3166 case 26: /* ftosi */
3167 gen_vfp_tosi(dp);
3168 break;
3169 case 27: /* ftosiz */
3170 gen_vfp_tosiz(dp);
3171 break;
9ee6e8bb
PB
3172 case 28: /* ftosh */
3173 if (!arm_feature(env, ARM_FEATURE_VFP3))
3174 return 1;
644ad806 3175 gen_vfp_tosh(dp, 16 - rm);
9ee6e8bb
PB
3176 break;
3177 case 29: /* ftosl */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
644ad806 3180 gen_vfp_tosl(dp, 32 - rm);
9ee6e8bb
PB
3181 break;
3182 case 30: /* ftouh */
3183 if (!arm_feature(env, ARM_FEATURE_VFP3))
3184 return 1;
644ad806 3185 gen_vfp_touh(dp, 16 - rm);
9ee6e8bb
PB
3186 break;
3187 case 31: /* ftoul */
3188 if (!arm_feature(env, ARM_FEATURE_VFP3))
3189 return 1;
644ad806 3190 gen_vfp_toul(dp, 32 - rm);
9ee6e8bb 3191 break;
b7bcbe95
FB
3192 default: /* undefined */
3193 printf ("rn:%d\n", rn);
3194 return 1;
3195 }
3196 break;
3197 default: /* undefined */
3198 printf ("op:%d\n", op);
3199 return 1;
3200 }
3201
3202 /* Write back the result. */
3203 if (op == 15 && (rn >= 8 && rn <= 11))
3204 ; /* Comparison, do nothing. */
3205 else if (op == 15 && rn > 17)
3206 /* Integer result. */
3207 gen_mov_vreg_F0(0, rd);
3208 else if (op == 15 && rn == 15)
3209 /* conversion */
3210 gen_mov_vreg_F0(!dp, rd);
3211 else
3212 gen_mov_vreg_F0(dp, rd);
3213
3214 /* break out of the loop if we have finished */
3215 if (veclen == 0)
3216 break;
3217
3218 if (op == 15 && delta_m == 0) {
3219 /* single source one-many */
3220 while (veclen--) {
3221 rd = ((rd + delta_d) & (bank_mask - 1))
3222 | (rd & bank_mask);
3223 gen_mov_vreg_F0(dp, rd);
3224 }
3225 break;
3226 }
3227 /* Setup the next operands. */
3228 veclen--;
3229 rd = ((rd + delta_d) & (bank_mask - 1))
3230 | (rd & bank_mask);
3231
3232 if (op == 15) {
3233 /* One source operand. */
3234 rm = ((rm + delta_m) & (bank_mask - 1))
3235 | (rm & bank_mask);
3236 gen_mov_F0_vreg(dp, rm);
3237 } else {
3238 /* Two source operands. */
3239 rn = ((rn + delta_d) & (bank_mask - 1))
3240 | (rn & bank_mask);
3241 gen_mov_F0_vreg(dp, rn);
3242 if (delta_m) {
3243 rm = ((rm + delta_m) & (bank_mask - 1))
3244 | (rm & bank_mask);
3245 gen_mov_F1_vreg(dp, rm);
3246 }
3247 }
3248 }
3249 }
3250 break;
3251 case 0xc:
3252 case 0xd:
9ee6e8bb 3253 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3254 /* two-register transfer */
3255 rn = (insn >> 16) & 0xf;
3256 rd = (insn >> 12) & 0xf;
3257 if (dp) {
9ee6e8bb
PB
3258 VFP_DREG_M(rm, insn);
3259 } else {
3260 rm = VFP_SREG_M(insn);
3261 }
b7bcbe95 3262
18c9b560 3263 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3264 /* vfp->arm */
3265 if (dp) {
4373f3ce
PB
3266 gen_mov_F0_vreg(0, rm * 2);
3267 tmp = gen_vfp_mrs();
3268 store_reg(s, rd, tmp);
3269 gen_mov_F0_vreg(0, rm * 2 + 1);
3270 tmp = gen_vfp_mrs();
3271 store_reg(s, rn, tmp);
b7bcbe95
FB
3272 } else {
3273 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3274 tmp = gen_vfp_mrs();
3275 store_reg(s, rn, tmp);
b7bcbe95 3276 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3277 tmp = gen_vfp_mrs();
3278 store_reg(s, rd, tmp);
b7bcbe95
FB
3279 }
3280 } else {
3281 /* arm->vfp */
3282 if (dp) {
4373f3ce
PB
3283 tmp = load_reg(s, rd);
3284 gen_vfp_msr(tmp);
3285 gen_mov_vreg_F0(0, rm * 2);
3286 tmp = load_reg(s, rn);
3287 gen_vfp_msr(tmp);
3288 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3289 } else {
4373f3ce
PB
3290 tmp = load_reg(s, rn);
3291 gen_vfp_msr(tmp);
b7bcbe95 3292 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3293 tmp = load_reg(s, rd);
3294 gen_vfp_msr(tmp);
b7bcbe95
FB
3295 gen_mov_vreg_F0(0, rm + 1);
3296 }
3297 }
3298 } else {
3299 /* Load/store */
3300 rn = (insn >> 16) & 0xf;
3301 if (dp)
9ee6e8bb 3302 VFP_DREG_D(rd, insn);
b7bcbe95 3303 else
9ee6e8bb
PB
3304 rd = VFP_SREG_D(insn);
3305 if (s->thumb && rn == 15) {
3306 gen_op_movl_T1_im(s->pc & ~2);
3307 } else {
3308 gen_movl_T1_reg(s, rn);
3309 }
b7bcbe95
FB
3310 if ((insn & 0x01200000) == 0x01000000) {
3311 /* Single load/store */
3312 offset = (insn & 0xff) << 2;
3313 if ((insn & (1 << 23)) == 0)
3314 offset = -offset;
3315 gen_op_addl_T1_im(offset);
3316 if (insn & (1 << 20)) {
b5ff1b31 3317 gen_vfp_ld(s, dp);
b7bcbe95
FB
3318 gen_mov_vreg_F0(dp, rd);
3319 } else {
3320 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3321 gen_vfp_st(s, dp);
b7bcbe95
FB
3322 }
3323 } else {
3324 /* load/store multiple */
3325 if (dp)
3326 n = (insn >> 1) & 0x7f;
3327 else
3328 n = insn & 0xff;
3329
3330 if (insn & (1 << 24)) /* pre-decrement */
3331 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3332
3333 if (dp)
3334 offset = 8;
3335 else
3336 offset = 4;
3337 for (i = 0; i < n; i++) {
18c9b560 3338 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3339 /* load */
b5ff1b31 3340 gen_vfp_ld(s, dp);
b7bcbe95
FB
3341 gen_mov_vreg_F0(dp, rd + i);
3342 } else {
3343 /* store */
3344 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3345 gen_vfp_st(s, dp);
b7bcbe95
FB
3346 }
3347 gen_op_addl_T1_im(offset);
3348 }
3349 if (insn & (1 << 21)) {
3350 /* writeback */
3351 if (insn & (1 << 24))
3352 offset = -offset * n;
3353 else if (dp && (insn & 1))
3354 offset = 4;
3355 else
3356 offset = 0;
3357
3358 if (offset != 0)
3359 gen_op_addl_T1_im(offset);
3360 gen_movl_reg_T1(s, rn);
3361 }
3362 }
3363 }
3364 break;
3365 default:
3366 /* Should never happen. */
3367 return 1;
3368 }
3369 return 0;
3370}
3371
6e256c93 3372static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3373{
6e256c93
FB
3374 TranslationBlock *tb;
3375
3376 tb = s->tb;
3377 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3378 tcg_gen_goto_tb(n);
8984bd2e 3379 gen_set_pc_im(dest);
57fec1fe 3380 tcg_gen_exit_tb((long)tb + n);
6e256c93 3381 } else {
8984bd2e 3382 gen_set_pc_im(dest);
57fec1fe 3383 tcg_gen_exit_tb(0);
6e256c93 3384 }
c53be334
FB
3385}
3386
8aaca4c0
FB
3387static inline void gen_jmp (DisasContext *s, uint32_t dest)
3388{
551bd27f 3389 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3390 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3391 if (s->thumb)
d9ba4830
PB
3392 dest |= 1;
3393 gen_bx_im(s, dest);
8aaca4c0 3394 } else {
6e256c93 3395 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3396 s->is_jmp = DISAS_TB_JUMP;
3397 }
3398}
3399
d9ba4830 3400static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3401{
ee097184 3402 if (x)
d9ba4830 3403 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3404 else
d9ba4830 3405 gen_sxth(t0);
ee097184 3406 if (y)
d9ba4830 3407 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3408 else
d9ba4830
PB
3409 gen_sxth(t1);
3410 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3411}
3412
3413/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3414static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3415 uint32_t mask;
3416
3417 mask = 0;
3418 if (flags & (1 << 0))
3419 mask |= 0xff;
3420 if (flags & (1 << 1))
3421 mask |= 0xff00;
3422 if (flags & (1 << 2))
3423 mask |= 0xff0000;
3424 if (flags & (1 << 3))
3425 mask |= 0xff000000;
9ee6e8bb 3426
2ae23e75 3427 /* Mask out undefined bits. */
9ee6e8bb
PB
3428 mask &= ~CPSR_RESERVED;
3429 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3430 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3431 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3432 mask &= ~CPSR_IT;
9ee6e8bb 3433 /* Mask out execution state bits. */
2ae23e75 3434 if (!spsr)
e160c51c 3435 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3436 /* Mask out privileged bits. */
3437 if (IS_USER(s))
9ee6e8bb 3438 mask &= CPSR_USER;
b5ff1b31
FB
3439 return mask;
3440}
3441
3442/* Returns nonzero if access to the PSR is not permitted. */
3443static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3444{
d9ba4830 3445 TCGv tmp;
b5ff1b31
FB
3446 if (spsr) {
3447 /* ??? This is also undefined in system mode. */
3448 if (IS_USER(s))
3449 return 1;
d9ba4830
PB
3450
3451 tmp = load_cpu_field(spsr);
3452 tcg_gen_andi_i32(tmp, tmp, ~mask);
3453 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3454 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3455 store_cpu_field(tmp, spsr);
b5ff1b31 3456 } else {
d9ba4830 3457 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3458 }
3459 gen_lookup_tb(s);
3460 return 0;
3461}
3462
e9bb4aa9
JR
3463/* Generate an old-style exception return. Marks pc as dead. */
3464static void gen_exception_return(DisasContext *s, TCGv pc)
b5ff1b31 3465{
d9ba4830 3466 TCGv tmp;
e9bb4aa9 3467 store_reg(s, 15, pc);
d9ba4830
PB
3468 tmp = load_cpu_field(spsr);
3469 gen_set_cpsr(tmp, 0xffffffff);
3470 dead_tmp(tmp);
b5ff1b31
FB
3471 s->is_jmp = DISAS_UPDATE;
3472}
3473
b0109805
PB
3474/* Generate a v6 exception return. Marks both values as dead. */
3475static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3476{
b0109805
PB
3477 gen_set_cpsr(cpsr, 0xffffffff);
3478 dead_tmp(cpsr);
3479 store_reg(s, 15, pc);
9ee6e8bb
PB
3480 s->is_jmp = DISAS_UPDATE;
3481}
3b46e624 3482
9ee6e8bb
PB
3483static inline void
3484gen_set_condexec (DisasContext *s)
3485{
3486 if (s->condexec_mask) {
8f01245e
PB
3487 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3488 TCGv tmp = new_tmp();
3489 tcg_gen_movi_i32(tmp, val);
d9ba4830 3490 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3491 }
3492}
3b46e624 3493
9ee6e8bb
PB
3494static void gen_nop_hint(DisasContext *s, int val)
3495{
3496 switch (val) {
3497 case 3: /* wfi */
8984bd2e 3498 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3499 s->is_jmp = DISAS_WFI;
3500 break;
3501 case 2: /* wfe */
3502 case 4: /* sev */
3503 /* TODO: Implement SEV and WFE. May help SMP performance. */
3504 default: /* nop */
3505 break;
3506 }
3507}
99c475ab 3508
ad69471c
PB
3509/* These macros help make the code more readable when migrating from the
3510 old dyngen helpers. They should probably be removed when
3511 T0/T1 are removed. */
3512#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3513#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3514
ad69471c 3515#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3516
3517static inline int gen_neon_add(int size)
3518{
3519 switch (size) {
ad69471c
PB
3520 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3521 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3522 case 2: gen_op_addl_T0_T1(); break;
3523 default: return 1;
3524 }
3525 return 0;
3526}
3527
ad69471c
PB
3528static inline void gen_neon_rsb(int size)
3529{
3530 switch (size) {
3531 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3532 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3533 case 2: gen_op_rsbl_T0_T1(); break;
3534 default: return;
3535 }
3536}
3537
3538/* 32-bit pairwise ops end up the same as the elementwise versions. */
3539#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3540#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3541#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3542#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3543
3544/* FIXME: This is wrong. They set the wrong overflow bit. */
3545#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3546#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3547#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3548#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3549
3550#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3551 switch ((size << 1) | u) { \
3552 case 0: \
3553 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3554 break; \
3555 case 1: \
3556 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3557 break; \
3558 case 2: \
3559 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3560 break; \
3561 case 3: \
3562 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3563 break; \
3564 case 4: \
3565 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3566 break; \
3567 case 5: \
3568 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3569 break; \
3570 default: return 1; \
3571 }} while (0)
9ee6e8bb
PB
3572
3573#define GEN_NEON_INTEGER_OP(name) do { \
3574 switch ((size << 1) | u) { \
ad69471c
PB
3575 case 0: \
3576 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3577 break; \
3578 case 1: \
3579 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3580 break; \
3581 case 2: \
3582 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3583 break; \
3584 case 3: \
3585 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3586 break; \
3587 case 4: \
3588 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3589 break; \
3590 case 5: \
3591 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3592 break; \
9ee6e8bb
PB
3593 default: return 1; \
3594 }} while (0)
3595
3596static inline void
3597gen_neon_movl_scratch_T0(int scratch)
3598{
3599 uint32_t offset;
3600
3601 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3602 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3603}
3604
3605static inline void
3606gen_neon_movl_scratch_T1(int scratch)
3607{
3608 uint32_t offset;
3609
3610 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3611 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3612}
3613
3614static inline void
3615gen_neon_movl_T0_scratch(int scratch)
3616{
3617 uint32_t offset;
3618
3619 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3620 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3621}
3622
3623static inline void
3624gen_neon_movl_T1_scratch(int scratch)
3625{
3626 uint32_t offset;
3627
3628 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3629 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3630}
3631
3632static inline void gen_neon_get_scalar(int size, int reg)
3633{
3634 if (size == 1) {
3635 NEON_GET_REG(T0, reg >> 1, reg & 1);
3636 } else {
3637 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3638 if (reg & 1)
ad69471c 3639 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3640 else
ad69471c 3641 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3642 }
3643}
3644
3645static void gen_neon_unzip(int reg, int q, int tmp, int size)
3646{
3647 int n;
3648
3649 for (n = 0; n < q + 1; n += 2) {
3650 NEON_GET_REG(T0, reg, n);
3651 NEON_GET_REG(T0, reg, n + n);
3652 switch (size) {
ad69471c
PB
3653 case 0: gen_helper_neon_unzip_u8(); break;
3654 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3655 case 2: /* no-op */; break;
3656 default: abort();
3657 }
3658 gen_neon_movl_scratch_T0(tmp + n);
3659 gen_neon_movl_scratch_T1(tmp + n + 1);
3660 }
3661}
3662
3663static struct {
3664 int nregs;
3665 int interleave;
3666 int spacing;
3667} neon_ls_element_type[11] = {
3668 {4, 4, 1},
3669 {4, 4, 2},
3670 {4, 1, 1},
3671 {4, 2, 1},
3672 {3, 3, 1},
3673 {3, 3, 2},
3674 {3, 1, 1},
3675 {1, 1, 1},
3676 {2, 2, 1},
3677 {2, 2, 2},
3678 {2, 1, 1}
3679};
3680
3681/* Translate a NEON load/store element instruction. Return nonzero if the
3682 instruction is invalid. */
3683static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3684{
3685 int rd, rn, rm;
3686 int op;
3687 int nregs;
3688 int interleave;
3689 int stride;
3690 int size;
3691 int reg;
3692 int pass;
3693 int load;
3694 int shift;
9ee6e8bb 3695 int n;
b0109805 3696 TCGv tmp;
8f8e3aa4 3697 TCGv tmp2;
9ee6e8bb
PB
3698
3699 if (!vfp_enabled(env))
3700 return 1;
3701 VFP_DREG_D(rd, insn);
3702 rn = (insn >> 16) & 0xf;
3703 rm = insn & 0xf;
3704 load = (insn & (1 << 21)) != 0;
3705 if ((insn & (1 << 23)) == 0) {
3706 /* Load store all elements. */
3707 op = (insn >> 8) & 0xf;
3708 size = (insn >> 6) & 3;
3709 if (op > 10 || size == 3)
3710 return 1;
3711 nregs = neon_ls_element_type[op].nregs;
3712 interleave = neon_ls_element_type[op].interleave;
3713 gen_movl_T1_reg(s, rn);
3714 stride = (1 << size) * interleave;
3715 for (reg = 0; reg < nregs; reg++) {
3716 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3717 gen_movl_T1_reg(s, rn);
3718 gen_op_addl_T1_im((1 << size) * reg);
3719 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3720 gen_movl_T1_reg(s, rn);
3721 gen_op_addl_T1_im(1 << size);
3722 }
3723 for (pass = 0; pass < 2; pass++) {
3724 if (size == 2) {
3725 if (load) {
b0109805 3726 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3727 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3728 } else {
ad69471c 3729 tmp = neon_load_reg(rd, pass);
b0109805 3730 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3731 }
3732 gen_op_addl_T1_im(stride);
3733 } else if (size == 1) {
3734 if (load) {
b0109805 3735 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3736 gen_op_addl_T1_im(stride);
8f8e3aa4 3737 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3738 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3739 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3740 dead_tmp(tmp2);
3741 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3742 } else {
8f8e3aa4
PB
3743 tmp = neon_load_reg(rd, pass);
3744 tmp2 = new_tmp();
3745 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3746 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3747 gen_op_addl_T1_im(stride);
8f8e3aa4 3748 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3749 gen_op_addl_T1_im(stride);
3750 }
3751 } else /* size == 0 */ {
3752 if (load) {
a50f5b91 3753 TCGV_UNUSED(tmp2);
9ee6e8bb 3754 for (n = 0; n < 4; n++) {
b0109805 3755 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3756 gen_op_addl_T1_im(stride);
3757 if (n == 0) {
8f8e3aa4 3758 tmp2 = tmp;
9ee6e8bb 3759 } else {
8f8e3aa4
PB
3760 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3761 dead_tmp(tmp);
9ee6e8bb 3762 }
9ee6e8bb 3763 }
8f8e3aa4 3764 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3765 } else {
8f8e3aa4 3766 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3767 for (n = 0; n < 4; n++) {
8f8e3aa4 3768 tmp = new_tmp();
9ee6e8bb 3769 if (n == 0) {
8f8e3aa4 3770 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3771 } else {
8f8e3aa4 3772 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3773 }
b0109805 3774 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3775 gen_op_addl_T1_im(stride);
9ee6e8bb 3776 }
8f8e3aa4 3777 dead_tmp(tmp2);
9ee6e8bb
PB
3778 }
3779 }
3780 }
3781 rd += neon_ls_element_type[op].spacing;
3782 }
3783 stride = nregs * 8;
3784 } else {
3785 size = (insn >> 10) & 3;
3786 if (size == 3) {
3787 /* Load single element to all lanes. */
3788 if (!load)
3789 return 1;
3790 size = (insn >> 6) & 3;
3791 nregs = ((insn >> 8) & 3) + 1;
3792 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3793 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3794 for (reg = 0; reg < nregs; reg++) {
3795 switch (size) {
3796 case 0:
b0109805 3797 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3798 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3799 break;
3800 case 1:
b0109805 3801 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3802 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3803 break;
3804 case 2:
b0109805 3805 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3806 break;
3807 case 3:
3808 return 1;
a50f5b91
PB
3809 default: /* Avoid compiler warnings. */
3810 abort();
99c475ab 3811 }
9ee6e8bb 3812 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3813 tmp2 = new_tmp();
3814 tcg_gen_mov_i32(tmp2, tmp);
3815 neon_store_reg(rd, 0, tmp2);
3018f259 3816 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3817 rd += stride;
3818 }
3819 stride = (1 << size) * nregs;
3820 } else {
3821 /* Single element. */
3822 pass = (insn >> 7) & 1;
3823 switch (size) {
3824 case 0:
3825 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3826 stride = 1;
3827 break;
3828 case 1:
3829 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3830 stride = (insn & (1 << 5)) ? 2 : 1;
3831 break;
3832 case 2:
3833 shift = 0;
9ee6e8bb
PB
3834 stride = (insn & (1 << 6)) ? 2 : 1;
3835 break;
3836 default:
3837 abort();
3838 }
3839 nregs = ((insn >> 8) & 3) + 1;
3840 gen_movl_T1_reg(s, rn);
3841 for (reg = 0; reg < nregs; reg++) {
3842 if (load) {
9ee6e8bb
PB
3843 switch (size) {
3844 case 0:
b0109805 3845 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3846 break;
3847 case 1:
b0109805 3848 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3849 break;
3850 case 2:
b0109805 3851 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3852 break;
a50f5b91
PB
3853 default: /* Avoid compiler warnings. */
3854 abort();
9ee6e8bb
PB
3855 }
3856 if (size != 2) {
8f8e3aa4
PB
3857 tmp2 = neon_load_reg(rd, pass);
3858 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3859 dead_tmp(tmp2);
9ee6e8bb 3860 }
8f8e3aa4 3861 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3862 } else { /* Store */
8f8e3aa4
PB
3863 tmp = neon_load_reg(rd, pass);
3864 if (shift)
3865 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3866 switch (size) {
3867 case 0:
b0109805 3868 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3869 break;
3870 case 1:
b0109805 3871 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3872 break;
3873 case 2:
b0109805 3874 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3875 break;
99c475ab 3876 }
99c475ab 3877 }
9ee6e8bb
PB
3878 rd += stride;
3879 gen_op_addl_T1_im(1 << size);
99c475ab 3880 }
9ee6e8bb 3881 stride = nregs * (1 << size);
99c475ab 3882 }
9ee6e8bb
PB
3883 }
3884 if (rm != 15) {
b26eefb6
PB
3885 TCGv base;
3886
3887 base = load_reg(s, rn);
9ee6e8bb 3888 if (rm == 13) {
b26eefb6 3889 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3890 } else {
b26eefb6
PB
3891 TCGv index;
3892 index = load_reg(s, rm);
3893 tcg_gen_add_i32(base, base, index);
3894 dead_tmp(index);
9ee6e8bb 3895 }
b26eefb6 3896 store_reg(s, rn, base);
9ee6e8bb
PB
3897 }
3898 return 0;
3899}
3b46e624 3900
8f8e3aa4
PB
3901/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3902static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3903{
3904 tcg_gen_and_i32(t, t, c);
3905 tcg_gen_bic_i32(f, f, c);
3906 tcg_gen_or_i32(dest, t, f);
3907}
3908
a7812ae4 3909static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3910{
3911 switch (size) {
3912 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3913 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3914 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3915 default: abort();
3916 }
3917}
3918
a7812ae4 3919static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3920{
3921 switch (size) {
3922 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3923 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3924 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3925 default: abort();
3926 }
3927}
3928
a7812ae4 3929static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3930{
3931 switch (size) {
3932 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3933 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3934 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3935 default: abort();
3936 }
3937}
3938
3939static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3940 int q, int u)
3941{
3942 if (q) {
3943 if (u) {
3944 switch (size) {
3945 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3946 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3947 default: abort();
3948 }
3949 } else {
3950 switch (size) {
3951 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3952 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3953 default: abort();
3954 }
3955 }
3956 } else {
3957 if (u) {
3958 switch (size) {
3959 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3960 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3961 default: abort();
3962 }
3963 } else {
3964 switch (size) {
3965 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3966 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3967 default: abort();
3968 }
3969 }
3970 }
3971}
3972
a7812ae4 3973static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3974{
3975 if (u) {
3976 switch (size) {
3977 case 0: gen_helper_neon_widen_u8(dest, src); break;
3978 case 1: gen_helper_neon_widen_u16(dest, src); break;
3979 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3980 default: abort();
3981 }
3982 } else {
3983 switch (size) {
3984 case 0: gen_helper_neon_widen_s8(dest, src); break;
3985 case 1: gen_helper_neon_widen_s16(dest, src); break;
3986 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3987 default: abort();
3988 }
3989 }
3990 dead_tmp(src);
3991}
3992
3993static inline void gen_neon_addl(int size)
3994{
3995 switch (size) {
3996 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3997 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3998 case 2: tcg_gen_add_i64(CPU_V001); break;
3999 default: abort();
4000 }
4001}
4002
4003static inline void gen_neon_subl(int size)
4004{
4005 switch (size) {
4006 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4007 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4008 case 2: tcg_gen_sub_i64(CPU_V001); break;
4009 default: abort();
4010 }
4011}
4012
a7812ae4 4013static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4014{
4015 switch (size) {
4016 case 0: gen_helper_neon_negl_u16(var, var); break;
4017 case 1: gen_helper_neon_negl_u32(var, var); break;
4018 case 2: gen_helper_neon_negl_u64(var, var); break;
4019 default: abort();
4020 }
4021}
4022
a7812ae4 4023static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4024{
4025 switch (size) {
4026 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4027 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4028 default: abort();
4029 }
4030}
4031
a7812ae4 4032static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4033{
a7812ae4 4034 TCGv_i64 tmp;
ad69471c
PB
4035
4036 switch ((size << 1) | u) {
4037 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4038 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4039 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4040 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4041 case 4:
4042 tmp = gen_muls_i64_i32(a, b);
4043 tcg_gen_mov_i64(dest, tmp);
4044 break;
4045 case 5:
4046 tmp = gen_mulu_i64_i32(a, b);
4047 tcg_gen_mov_i64(dest, tmp);
4048 break;
4049 default: abort();
4050 }
4051 if (size < 2) {
4052 dead_tmp(b);
4053 dead_tmp(a);
4054 }
4055}
4056
9ee6e8bb
PB
4057/* Translate a NEON data processing instruction. Return nonzero if the
4058 instruction is invalid.
ad69471c
PB
4059 We process data in a mixture of 32-bit and 64-bit chunks.
4060 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4061
9ee6e8bb
PB
4062static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4063{
4064 int op;
4065 int q;
4066 int rd, rn, rm;
4067 int size;
4068 int shift;
4069 int pass;
4070 int count;
4071 int pairwise;
4072 int u;
4073 int n;
4074 uint32_t imm;
8f8e3aa4
PB
4075 TCGv tmp;
4076 TCGv tmp2;
4077 TCGv tmp3;
a7812ae4 4078 TCGv_i64 tmp64;
9ee6e8bb
PB
4079
4080 if (!vfp_enabled(env))
4081 return 1;
4082 q = (insn & (1 << 6)) != 0;
4083 u = (insn >> 24) & 1;
4084 VFP_DREG_D(rd, insn);
4085 VFP_DREG_N(rn, insn);
4086 VFP_DREG_M(rm, insn);
4087 size = (insn >> 20) & 3;
4088 if ((insn & (1 << 23)) == 0) {
4089 /* Three register same length. */
4090 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4091 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4092 || op == 10 || op == 11 || op == 16)) {
4093 /* 64-bit element instructions. */
9ee6e8bb 4094 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4095 neon_load_reg64(cpu_V0, rn + pass);
4096 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4097 switch (op) {
4098 case 1: /* VQADD */
4099 if (u) {
ad69471c 4100 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4101 } else {
ad69471c 4102 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4103 }
9ee6e8bb
PB
4104 break;
4105 case 5: /* VQSUB */
4106 if (u) {
ad69471c
PB
4107 gen_helper_neon_sub_saturate_u64(CPU_V001);
4108 } else {
4109 gen_helper_neon_sub_saturate_s64(CPU_V001);
4110 }
4111 break;
4112 case 8: /* VSHL */
4113 if (u) {
4114 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4115 } else {
4116 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4117 }
4118 break;
4119 case 9: /* VQSHL */
4120 if (u) {
4121 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4122 cpu_V0, cpu_V0);
4123 } else {
4124 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4125 cpu_V1, cpu_V0);
4126 }
4127 break;
4128 case 10: /* VRSHL */
4129 if (u) {
4130 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4131 } else {
ad69471c
PB
4132 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4133 }
4134 break;
4135 case 11: /* VQRSHL */
4136 if (u) {
4137 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4138 cpu_V1, cpu_V0);
4139 } else {
4140 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4141 cpu_V1, cpu_V0);
1e8d4eec 4142 }
9ee6e8bb
PB
4143 break;
4144 case 16:
4145 if (u) {
ad69471c 4146 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4147 } else {
ad69471c 4148 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4149 }
4150 break;
4151 default:
4152 abort();
2c0262af 4153 }
ad69471c 4154 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4155 }
9ee6e8bb 4156 return 0;
2c0262af 4157 }
9ee6e8bb
PB
4158 switch (op) {
4159 case 8: /* VSHL */
4160 case 9: /* VQSHL */
4161 case 10: /* VRSHL */
ad69471c 4162 case 11: /* VQRSHL */
9ee6e8bb 4163 {
ad69471c
PB
4164 int rtmp;
4165 /* Shift instruction operands are reversed. */
4166 rtmp = rn;
9ee6e8bb 4167 rn = rm;
ad69471c 4168 rm = rtmp;
9ee6e8bb
PB
4169 pairwise = 0;
4170 }
2c0262af 4171 break;
9ee6e8bb
PB
4172 case 20: /* VPMAX */
4173 case 21: /* VPMIN */
4174 case 23: /* VPADD */
4175 pairwise = 1;
2c0262af 4176 break;
9ee6e8bb
PB
4177 case 26: /* VPADD (float) */
4178 pairwise = (u && size < 2);
2c0262af 4179 break;
9ee6e8bb
PB
4180 case 30: /* VPMIN/VPMAX (float) */
4181 pairwise = u;
2c0262af 4182 break;
9ee6e8bb
PB
4183 default:
4184 pairwise = 0;
2c0262af 4185 break;
9ee6e8bb
PB
4186 }
4187 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4188
4189 if (pairwise) {
4190 /* Pairwise. */
4191 if (q)
4192 n = (pass & 1) * 2;
2c0262af 4193 else
9ee6e8bb
PB
4194 n = 0;
4195 if (pass < q + 1) {
4196 NEON_GET_REG(T0, rn, n);
4197 NEON_GET_REG(T1, rn, n + 1);
4198 } else {
4199 NEON_GET_REG(T0, rm, n);
4200 NEON_GET_REG(T1, rm, n + 1);
4201 }
4202 } else {
4203 /* Elementwise. */
4204 NEON_GET_REG(T0, rn, pass);
4205 NEON_GET_REG(T1, rm, pass);
4206 }
4207 switch (op) {
4208 case 0: /* VHADD */
4209 GEN_NEON_INTEGER_OP(hadd);
4210 break;
4211 case 1: /* VQADD */
ad69471c 4212 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4213 break;
9ee6e8bb
PB
4214 case 2: /* VRHADD */
4215 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4216 break;
9ee6e8bb
PB
4217 case 3: /* Logic ops. */
4218 switch ((u << 2) | size) {
4219 case 0: /* VAND */
2c0262af 4220 gen_op_andl_T0_T1();
9ee6e8bb
PB
4221 break;
4222 case 1: /* BIC */
4223 gen_op_bicl_T0_T1();
4224 break;
4225 case 2: /* VORR */
4226 gen_op_orl_T0_T1();
4227 break;
4228 case 3: /* VORN */
4229 gen_op_notl_T1();
4230 gen_op_orl_T0_T1();
4231 break;
4232 case 4: /* VEOR */
4233 gen_op_xorl_T0_T1();
4234 break;
4235 case 5: /* VBSL */
8f8e3aa4
PB
4236 tmp = neon_load_reg(rd, pass);
4237 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4238 dead_tmp(tmp);
9ee6e8bb
PB
4239 break;
4240 case 6: /* VBIT */
8f8e3aa4
PB
4241 tmp = neon_load_reg(rd, pass);
4242 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4243 dead_tmp(tmp);
9ee6e8bb
PB
4244 break;
4245 case 7: /* VBIF */
8f8e3aa4
PB
4246 tmp = neon_load_reg(rd, pass);
4247 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4248 dead_tmp(tmp);
9ee6e8bb 4249 break;
2c0262af
FB
4250 }
4251 break;
9ee6e8bb
PB
4252 case 4: /* VHSUB */
4253 GEN_NEON_INTEGER_OP(hsub);
4254 break;
4255 case 5: /* VQSUB */
ad69471c 4256 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4257 break;
9ee6e8bb
PB
4258 case 6: /* VCGT */
4259 GEN_NEON_INTEGER_OP(cgt);
4260 break;
4261 case 7: /* VCGE */
4262 GEN_NEON_INTEGER_OP(cge);
4263 break;
4264 case 8: /* VSHL */
ad69471c 4265 GEN_NEON_INTEGER_OP(shl);
2c0262af 4266 break;
9ee6e8bb 4267 case 9: /* VQSHL */
ad69471c 4268 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4269 break;
9ee6e8bb 4270 case 10: /* VRSHL */
ad69471c 4271 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4272 break;
9ee6e8bb 4273 case 11: /* VQRSHL */
ad69471c 4274 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4275 break;
4276 case 12: /* VMAX */
4277 GEN_NEON_INTEGER_OP(max);
4278 break;
4279 case 13: /* VMIN */
4280 GEN_NEON_INTEGER_OP(min);
4281 break;
4282 case 14: /* VABD */
4283 GEN_NEON_INTEGER_OP(abd);
4284 break;
4285 case 15: /* VABA */
4286 GEN_NEON_INTEGER_OP(abd);
4287 NEON_GET_REG(T1, rd, pass);
4288 gen_neon_add(size);
4289 break;
4290 case 16:
4291 if (!u) { /* VADD */
4292 if (gen_neon_add(size))
4293 return 1;
4294 } else { /* VSUB */
4295 switch (size) {
ad69471c
PB
4296 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4297 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4298 case 2: gen_op_subl_T0_T1(); break;
4299 default: return 1;
4300 }
4301 }
4302 break;
4303 case 17:
4304 if (!u) { /* VTST */
4305 switch (size) {
ad69471c
PB
4306 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4307 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4308 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4309 default: return 1;
4310 }
4311 } else { /* VCEQ */
4312 switch (size) {
ad69471c
PB
4313 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4314 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4315 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4316 default: return 1;
4317 }
4318 }
4319 break;
4320 case 18: /* Multiply. */
4321 switch (size) {
ad69471c
PB
4322 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4323 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4324 case 2: gen_op_mul_T0_T1(); break;
4325 default: return 1;
4326 }
4327 NEON_GET_REG(T1, rd, pass);
4328 if (u) { /* VMLS */
ad69471c 4329 gen_neon_rsb(size);
9ee6e8bb
PB
4330 } else { /* VMLA */
4331 gen_neon_add(size);
4332 }
4333 break;
4334 case 19: /* VMUL */
4335 if (u) { /* polynomial */
ad69471c 4336 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4337 } else { /* Integer */
4338 switch (size) {
ad69471c
PB
4339 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4340 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4341 case 2: gen_op_mul_T0_T1(); break;
4342 default: return 1;
4343 }
4344 }
4345 break;
4346 case 20: /* VPMAX */
4347 GEN_NEON_INTEGER_OP(pmax);
4348 break;
4349 case 21: /* VPMIN */
4350 GEN_NEON_INTEGER_OP(pmin);
4351 break;
4352 case 22: /* Hultiply high. */
4353 if (!u) { /* VQDMULH */
4354 switch (size) {
ad69471c
PB
4355 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4356 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4357 default: return 1;
4358 }
4359 } else { /* VQRDHMUL */
4360 switch (size) {
ad69471c
PB
4361 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4362 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4363 default: return 1;
4364 }
4365 }
4366 break;
4367 case 23: /* VPADD */
4368 if (u)
4369 return 1;
4370 switch (size) {
ad69471c
PB
4371 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4372 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4373 case 2: gen_op_addl_T0_T1(); break;
4374 default: return 1;
4375 }
4376 break;
4377 case 26: /* Floating point arithnetic. */
4378 switch ((u << 2) | size) {
4379 case 0: /* VADD */
ad69471c 4380 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4381 break;
4382 case 2: /* VSUB */
ad69471c 4383 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4384 break;
4385 case 4: /* VPADD */
ad69471c 4386 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4387 break;
4388 case 6: /* VABD */
ad69471c 4389 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4390 break;
4391 default:
4392 return 1;
4393 }
4394 break;
4395 case 27: /* Float multiply. */
ad69471c 4396 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4397 if (!u) {
4398 NEON_GET_REG(T1, rd, pass);
4399 if (size == 0) {
ad69471c 4400 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4401 } else {
ad69471c 4402 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4403 }
4404 }
4405 break;
4406 case 28: /* Float compare. */
4407 if (!u) {
ad69471c 4408 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4409 } else {
9ee6e8bb 4410 if (size == 0)
ad69471c 4411 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4412 else
ad69471c 4413 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4414 }
2c0262af 4415 break;
9ee6e8bb
PB
4416 case 29: /* Float compare absolute. */
4417 if (!u)
4418 return 1;
4419 if (size == 0)
ad69471c 4420 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4421 else
ad69471c 4422 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4423 break;
9ee6e8bb
PB
4424 case 30: /* Float min/max. */
4425 if (size == 0)
ad69471c 4426 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4427 else
ad69471c 4428 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4429 break;
4430 case 31:
4431 if (size == 0)
4373f3ce 4432 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4433 else
4373f3ce 4434 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4435 break;
9ee6e8bb
PB
4436 default:
4437 abort();
2c0262af 4438 }
9ee6e8bb
PB
4439 /* Save the result. For elementwise operations we can put it
4440 straight into the destination register. For pairwise operations
4441 we have to be careful to avoid clobbering the source operands. */
4442 if (pairwise && rd == rm) {
4443 gen_neon_movl_scratch_T0(pass);
4444 } else {
4445 NEON_SET_REG(T0, rd, pass);
4446 }
4447
4448 } /* for pass */
4449 if (pairwise && rd == rm) {
4450 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4451 gen_neon_movl_T0_scratch(pass);
4452 NEON_SET_REG(T0, rd, pass);
4453 }
4454 }
ad69471c 4455 /* End of 3 register same size operations. */
9ee6e8bb
PB
4456 } else if (insn & (1 << 4)) {
4457 if ((insn & 0x00380080) != 0) {
4458 /* Two registers and shift. */
4459 op = (insn >> 8) & 0xf;
4460 if (insn & (1 << 7)) {
4461 /* 64-bit shift. */
4462 size = 3;
4463 } else {
4464 size = 2;
4465 while ((insn & (1 << (size + 19))) == 0)
4466 size--;
4467 }
4468 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4469 /* To avoid excessive dumplication of ops we implement shift
4470 by immediate using the variable shift operations. */
4471 if (op < 8) {
4472 /* Shift by immediate:
4473 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4474 /* Right shifts are encoded as N - shift, where N is the
4475 element size in bits. */
4476 if (op <= 4)
4477 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4478 if (size == 3) {
4479 count = q + 1;
4480 } else {
4481 count = q ? 4: 2;
4482 }
4483 switch (size) {
4484 case 0:
4485 imm = (uint8_t) shift;
4486 imm |= imm << 8;
4487 imm |= imm << 16;
4488 break;
4489 case 1:
4490 imm = (uint16_t) shift;
4491 imm |= imm << 16;
4492 break;
4493 case 2:
4494 case 3:
4495 imm = shift;
4496 break;
4497 default:
4498 abort();
4499 }
4500
4501 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4502 if (size == 3) {
4503 neon_load_reg64(cpu_V0, rm + pass);
4504 tcg_gen_movi_i64(cpu_V1, imm);
4505 switch (op) {
4506 case 0: /* VSHR */
4507 case 1: /* VSRA */
4508 if (u)
4509 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4510 else
ad69471c 4511 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4512 break;
ad69471c
PB
4513 case 2: /* VRSHR */
4514 case 3: /* VRSRA */
4515 if (u)
4516 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4517 else
ad69471c 4518 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4519 break;
ad69471c
PB
4520 case 4: /* VSRI */
4521 if (!u)
4522 return 1;
4523 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4524 break;
4525 case 5: /* VSHL, VSLI */
4526 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4527 break;
4528 case 6: /* VQSHL */
4529 if (u)
4530 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4531 else
ad69471c
PB
4532 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4533 break;
4534 case 7: /* VQSHLU */
4535 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4536 break;
9ee6e8bb 4537 }
ad69471c
PB
4538 if (op == 1 || op == 3) {
4539 /* Accumulate. */
4540 neon_load_reg64(cpu_V0, rd + pass);
4541 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4542 } else if (op == 4 || (op == 5 && u)) {
4543 /* Insert */
4544 cpu_abort(env, "VS[LR]I.64 not implemented");
4545 }
4546 neon_store_reg64(cpu_V0, rd + pass);
4547 } else { /* size < 3 */
4548 /* Operands in T0 and T1. */
4549 gen_op_movl_T1_im(imm);
4550 NEON_GET_REG(T0, rm, pass);
4551 switch (op) {
4552 case 0: /* VSHR */
4553 case 1: /* VSRA */
4554 GEN_NEON_INTEGER_OP(shl);
4555 break;
4556 case 2: /* VRSHR */
4557 case 3: /* VRSRA */
4558 GEN_NEON_INTEGER_OP(rshl);
4559 break;
4560 case 4: /* VSRI */
4561 if (!u)
4562 return 1;
4563 GEN_NEON_INTEGER_OP(shl);
4564 break;
4565 case 5: /* VSHL, VSLI */
4566 switch (size) {
4567 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4568 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4569 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4570 default: return 1;
4571 }
4572 break;
4573 case 6: /* VQSHL */
4574 GEN_NEON_INTEGER_OP_ENV(qshl);
4575 break;
4576 case 7: /* VQSHLU */
4577 switch (size) {
4578 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4579 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4580 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4581 default: return 1;
4582 }
4583 break;
4584 }
4585
4586 if (op == 1 || op == 3) {
4587 /* Accumulate. */
4588 NEON_GET_REG(T1, rd, pass);
4589 gen_neon_add(size);
4590 } else if (op == 4 || (op == 5 && u)) {
4591 /* Insert */
4592 switch (size) {
4593 case 0:
4594 if (op == 4)
4595 imm = 0xff >> -shift;
4596 else
4597 imm = (uint8_t)(0xff << shift);
4598 imm |= imm << 8;
4599 imm |= imm << 16;
4600 break;
4601 case 1:
4602 if (op == 4)
4603 imm = 0xffff >> -shift;
4604 else
4605 imm = (uint16_t)(0xffff << shift);
4606 imm |= imm << 16;
4607 break;
4608 case 2:
4609 if (op == 4)
4610 imm = 0xffffffffu >> -shift;
4611 else
4612 imm = 0xffffffffu << shift;
4613 break;
4614 default:
4615 abort();
4616 }
4617 tmp = neon_load_reg(rd, pass);
4618 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4619 tcg_gen_andi_i32(tmp, tmp, ~imm);
4620 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4621 }
9ee6e8bb
PB
4622 NEON_SET_REG(T0, rd, pass);
4623 }
4624 } /* for pass */
4625 } else if (op < 10) {
ad69471c 4626 /* Shift by immediate and narrow:
9ee6e8bb
PB
4627 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4628 shift = shift - (1 << (size + 3));
4629 size++;
9ee6e8bb
PB
4630 switch (size) {
4631 case 1:
ad69471c 4632 imm = (uint16_t)shift;
9ee6e8bb 4633 imm |= imm << 16;
ad69471c 4634 tmp2 = tcg_const_i32(imm);
a7812ae4 4635 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4636 break;
4637 case 2:
ad69471c
PB
4638 imm = (uint32_t)shift;
4639 tmp2 = tcg_const_i32(imm);
a7812ae4 4640 TCGV_UNUSED_I64(tmp64);
4cc633c3 4641 break;
9ee6e8bb 4642 case 3:
a7812ae4
PB
4643 tmp64 = tcg_const_i64(shift);
4644 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4645 break;
4646 default:
4647 abort();
4648 }
4649
ad69471c
PB
4650 for (pass = 0; pass < 2; pass++) {
4651 if (size == 3) {
4652 neon_load_reg64(cpu_V0, rm + pass);
4653 if (q) {
4654 if (u)
a7812ae4 4655 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4656 else
a7812ae4 4657 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4658 } else {
4659 if (u)
a7812ae4 4660 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4661 else
a7812ae4 4662 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4663 }
2c0262af 4664 } else {
ad69471c
PB
4665 tmp = neon_load_reg(rm + pass, 0);
4666 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4667 tmp3 = neon_load_reg(rm + pass, 1);
4668 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4669 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4670 dead_tmp(tmp);
36aa55dc 4671 dead_tmp(tmp3);
9ee6e8bb 4672 }
ad69471c
PB
4673 tmp = new_tmp();
4674 if (op == 8 && !u) {
4675 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4676 } else {
ad69471c
PB
4677 if (op == 8)
4678 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4679 else
ad69471c
PB
4680 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4681 }
4682 if (pass == 0) {
4683 tmp2 = tmp;
4684 } else {
4685 neon_store_reg(rd, 0, tmp2);
4686 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4687 }
4688 } /* for pass */
4689 } else if (op == 10) {
4690 /* VSHLL */
ad69471c 4691 if (q || size == 3)
9ee6e8bb 4692 return 1;
ad69471c
PB
4693 tmp = neon_load_reg(rm, 0);
4694 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4695 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4696 if (pass == 1)
4697 tmp = tmp2;
4698
4699 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4700
9ee6e8bb
PB
4701 if (shift != 0) {
4702 /* The shift is less than the width of the source
ad69471c
PB
4703 type, so we can just shift the whole register. */
4704 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4705 if (size < 2 || !u) {
4706 uint64_t imm64;
4707 if (size == 0) {
4708 imm = (0xffu >> (8 - shift));
4709 imm |= imm << 16;
4710 } else {
4711 imm = 0xffff >> (16 - shift);
9ee6e8bb 4712 }
ad69471c
PB
4713 imm64 = imm | (((uint64_t)imm) << 32);
4714 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4715 }
4716 }
ad69471c 4717 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4718 }
4719 } else if (op == 15 || op == 16) {
4720 /* VCVT fixed-point. */
4721 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4722 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4723 if (op & 1) {
4724 if (u)
4373f3ce 4725 gen_vfp_ulto(0, shift);
9ee6e8bb 4726 else
4373f3ce 4727 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4728 } else {
4729 if (u)
4373f3ce 4730 gen_vfp_toul(0, shift);
9ee6e8bb 4731 else
4373f3ce 4732 gen_vfp_tosl(0, shift);
2c0262af 4733 }
4373f3ce 4734 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4735 }
4736 } else {
9ee6e8bb
PB
4737 return 1;
4738 }
4739 } else { /* (insn & 0x00380080) == 0 */
4740 int invert;
4741
4742 op = (insn >> 8) & 0xf;
4743 /* One register and immediate. */
4744 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4745 invert = (insn & (1 << 5)) != 0;
4746 switch (op) {
4747 case 0: case 1:
4748 /* no-op */
4749 break;
4750 case 2: case 3:
4751 imm <<= 8;
4752 break;
4753 case 4: case 5:
4754 imm <<= 16;
4755 break;
4756 case 6: case 7:
4757 imm <<= 24;
4758 break;
4759 case 8: case 9:
4760 imm |= imm << 16;
4761 break;
4762 case 10: case 11:
4763 imm = (imm << 8) | (imm << 24);
4764 break;
4765 case 12:
4766 imm = (imm < 8) | 0xff;
4767 break;
4768 case 13:
4769 imm = (imm << 16) | 0xffff;
4770 break;
4771 case 14:
4772 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4773 if (invert)
4774 imm = ~imm;
4775 break;
4776 case 15:
4777 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4778 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4779 break;
4780 }
4781 if (invert)
4782 imm = ~imm;
4783
4784 if (op != 14 || !invert)
4785 gen_op_movl_T1_im(imm);
4786
4787 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4788 if (op & 1 && op < 12) {
ad69471c 4789 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4790 if (invert) {
4791 /* The immediate value has already been inverted, so
4792 BIC becomes AND. */
ad69471c 4793 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4794 } else {
ad69471c 4795 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4796 }
9ee6e8bb 4797 } else {
ad69471c
PB
4798 /* VMOV, VMVN. */
4799 tmp = new_tmp();
9ee6e8bb 4800 if (op == 14 && invert) {
ad69471c
PB
4801 uint32_t val;
4802 val = 0;
9ee6e8bb
PB
4803 for (n = 0; n < 4; n++) {
4804 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4805 val |= 0xff << (n * 8);
9ee6e8bb 4806 }
ad69471c
PB
4807 tcg_gen_movi_i32(tmp, val);
4808 } else {
4809 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4810 }
9ee6e8bb 4811 }
ad69471c 4812 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4813 }
4814 }
e4b3861d 4815 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4816 if (size != 3) {
4817 op = (insn >> 8) & 0xf;
4818 if ((insn & (1 << 6)) == 0) {
4819 /* Three registers of different lengths. */
4820 int src1_wide;
4821 int src2_wide;
4822 int prewiden;
4823 /* prewiden, src1_wide, src2_wide */
4824 static const int neon_3reg_wide[16][3] = {
4825 {1, 0, 0}, /* VADDL */
4826 {1, 1, 0}, /* VADDW */
4827 {1, 0, 0}, /* VSUBL */
4828 {1, 1, 0}, /* VSUBW */
4829 {0, 1, 1}, /* VADDHN */
4830 {0, 0, 0}, /* VABAL */
4831 {0, 1, 1}, /* VSUBHN */
4832 {0, 0, 0}, /* VABDL */
4833 {0, 0, 0}, /* VMLAL */
4834 {0, 0, 0}, /* VQDMLAL */
4835 {0, 0, 0}, /* VMLSL */
4836 {0, 0, 0}, /* VQDMLSL */
4837 {0, 0, 0}, /* Integer VMULL */
4838 {0, 0, 0}, /* VQDMULL */
4839 {0, 0, 0} /* Polynomial VMULL */
4840 };
4841
4842 prewiden = neon_3reg_wide[op][0];
4843 src1_wide = neon_3reg_wide[op][1];
4844 src2_wide = neon_3reg_wide[op][2];
4845
ad69471c
PB
4846 if (size == 0 && (op == 9 || op == 11 || op == 13))
4847 return 1;
4848
9ee6e8bb
PB
4849 /* Avoid overlapping operands. Wide source operands are
4850 always aligned so will never overlap with wide
4851 destinations in problematic ways. */
8f8e3aa4
PB
4852 if (rd == rm && !src2_wide) {
4853 NEON_GET_REG(T0, rm, 1);
4854 gen_neon_movl_scratch_T0(2);
4855 } else if (rd == rn && !src1_wide) {
4856 NEON_GET_REG(T0, rn, 1);
4857 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4858 }
a50f5b91 4859 TCGV_UNUSED(tmp3);
9ee6e8bb 4860 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4861 if (src1_wide) {
4862 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4863 TCGV_UNUSED(tmp);
9ee6e8bb 4864 } else {
ad69471c
PB
4865 if (pass == 1 && rd == rn) {
4866 gen_neon_movl_T0_scratch(2);
4867 tmp = new_tmp();
4868 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4869 } else {
ad69471c
PB
4870 tmp = neon_load_reg(rn, pass);
4871 }
4872 if (prewiden) {
4873 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4874 }
4875 }
ad69471c
PB
4876 if (src2_wide) {
4877 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4878 TCGV_UNUSED(tmp2);
9ee6e8bb 4879 } else {
ad69471c 4880 if (pass == 1 && rd == rm) {
8f8e3aa4 4881 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4882 tmp2 = new_tmp();
4883 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4884 } else {
ad69471c
PB
4885 tmp2 = neon_load_reg(rm, pass);
4886 }
4887 if (prewiden) {
4888 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4889 }
9ee6e8bb
PB
4890 }
4891 switch (op) {
4892 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4893 gen_neon_addl(size);
9ee6e8bb
PB
4894 break;
4895 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4896 gen_neon_subl(size);
9ee6e8bb
PB
4897 break;
4898 case 5: case 7: /* VABAL, VABDL */
4899 switch ((size << 1) | u) {
ad69471c
PB
4900 case 0:
4901 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4902 break;
4903 case 1:
4904 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4905 break;
4906 case 2:
4907 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4908 break;
4909 case 3:
4910 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4911 break;
4912 case 4:
4913 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4914 break;
4915 case 5:
4916 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4917 break;
9ee6e8bb
PB
4918 default: abort();
4919 }
ad69471c
PB
4920 dead_tmp(tmp2);
4921 dead_tmp(tmp);
9ee6e8bb
PB
4922 break;
4923 case 8: case 9: case 10: case 11: case 12: case 13:
4924 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4925 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4926 break;
4927 case 14: /* Polynomial VMULL */
4928 cpu_abort(env, "Polynomial VMULL not implemented");
4929
4930 default: /* 15 is RESERVED. */
4931 return 1;
4932 }
4933 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4934 /* Accumulate. */
4935 if (op == 10 || op == 11) {
ad69471c 4936 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4937 }
4938
9ee6e8bb 4939 if (op != 13) {
ad69471c 4940 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4941 }
4942
4943 switch (op) {
4944 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4945 gen_neon_addl(size);
9ee6e8bb
PB
4946 break;
4947 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4948 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4949 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4950 break;
9ee6e8bb
PB
4951 /* Fall through. */
4952 case 13: /* VQDMULL */
ad69471c 4953 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4954 break;
4955 default:
4956 abort();
4957 }
ad69471c 4958 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4959 } else if (op == 4 || op == 6) {
4960 /* Narrowing operation. */
ad69471c 4961 tmp = new_tmp();
9ee6e8bb
PB
4962 if (u) {
4963 switch (size) {
ad69471c
PB
4964 case 0:
4965 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4966 break;
4967 case 1:
4968 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4969 break;
4970 case 2:
4971 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4972 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4973 break;
9ee6e8bb
PB
4974 default: abort();
4975 }
4976 } else {
4977 switch (size) {
ad69471c
PB
4978 case 0:
4979 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4980 break;
4981 case 1:
4982 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4983 break;
4984 case 2:
4985 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4986 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4987 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4988 break;
9ee6e8bb
PB
4989 default: abort();
4990 }
4991 }
ad69471c
PB
4992 if (pass == 0) {
4993 tmp3 = tmp;
4994 } else {
4995 neon_store_reg(rd, 0, tmp3);
4996 neon_store_reg(rd, 1, tmp);
4997 }
9ee6e8bb
PB
4998 } else {
4999 /* Write back the result. */
ad69471c 5000 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5001 }
5002 }
5003 } else {
5004 /* Two registers and a scalar. */
5005 switch (op) {
5006 case 0: /* Integer VMLA scalar */
5007 case 1: /* Float VMLA scalar */
5008 case 4: /* Integer VMLS scalar */
5009 case 5: /* Floating point VMLS scalar */
5010 case 8: /* Integer VMUL scalar */
5011 case 9: /* Floating point VMUL scalar */
5012 case 12: /* VQDMULH scalar */
5013 case 13: /* VQRDMULH scalar */
5014 gen_neon_get_scalar(size, rm);
8f8e3aa4 5015 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5016 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5017 if (pass != 0)
8f8e3aa4 5018 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5019 NEON_GET_REG(T1, rn, pass);
5020 if (op == 12) {
5021 if (size == 1) {
ad69471c 5022 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5023 } else {
ad69471c 5024 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5025 }
5026 } else if (op == 13) {
5027 if (size == 1) {
ad69471c 5028 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5029 } else {
ad69471c 5030 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5031 }
5032 } else if (op & 1) {
ad69471c 5033 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5034 } else {
5035 switch (size) {
ad69471c
PB
5036 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5037 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5038 case 2: gen_op_mul_T0_T1(); break;
5039 default: return 1;
5040 }
5041 }
5042 if (op < 8) {
5043 /* Accumulate. */
5044 NEON_GET_REG(T1, rd, pass);
5045 switch (op) {
5046 case 0:
5047 gen_neon_add(size);
5048 break;
5049 case 1:
ad69471c 5050 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5051 break;
5052 case 4:
ad69471c 5053 gen_neon_rsb(size);
9ee6e8bb
PB
5054 break;
5055 case 5:
ad69471c 5056 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5057 break;
5058 default:
5059 abort();
5060 }
5061 }
5062 NEON_SET_REG(T0, rd, pass);
5063 }
5064 break;
5065 case 2: /* VMLAL sclar */
5066 case 3: /* VQDMLAL scalar */
5067 case 6: /* VMLSL scalar */
5068 case 7: /* VQDMLSL scalar */
5069 case 10: /* VMULL scalar */
5070 case 11: /* VQDMULL scalar */
ad69471c
PB
5071 if (size == 0 && (op == 3 || op == 7 || op == 11))
5072 return 1;
5073
9ee6e8bb 5074 gen_neon_get_scalar(size, rm);
ad69471c
PB
5075 NEON_GET_REG(T1, rn, 1);
5076
9ee6e8bb 5077 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5078 if (pass == 0) {
5079 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5080 } else {
ad69471c
PB
5081 tmp = new_tmp();
5082 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5083 }
ad69471c
PB
5084 tmp2 = new_tmp();
5085 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5086 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5087 if (op == 6 || op == 7) {
ad69471c
PB
5088 gen_neon_negl(cpu_V0, size);
5089 }
5090 if (op != 11) {
5091 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5092 }
9ee6e8bb
PB
5093 switch (op) {
5094 case 2: case 6:
ad69471c 5095 gen_neon_addl(size);
9ee6e8bb
PB
5096 break;
5097 case 3: case 7:
ad69471c
PB
5098 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5099 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5100 break;
5101 case 10:
5102 /* no-op */
5103 break;
5104 case 11:
ad69471c 5105 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5106 break;
5107 default:
5108 abort();
5109 }
ad69471c 5110 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5111 }
5112 break;
5113 default: /* 14 and 15 are RESERVED */
5114 return 1;
5115 }
5116 }
5117 } else { /* size == 3 */
5118 if (!u) {
5119 /* Extract. */
9ee6e8bb 5120 imm = (insn >> 8) & 0xf;
ad69471c
PB
5121 count = q + 1;
5122
5123 if (imm > 7 && !q)
5124 return 1;
5125
5126 if (imm == 0) {
5127 neon_load_reg64(cpu_V0, rn);
5128 if (q) {
5129 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5130 }
ad69471c
PB
5131 } else if (imm == 8) {
5132 neon_load_reg64(cpu_V0, rn + 1);
5133 if (q) {
5134 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5135 }
ad69471c 5136 } else if (q) {
a7812ae4 5137 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5138 if (imm < 8) {
5139 neon_load_reg64(cpu_V0, rn);
a7812ae4 5140 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5141 } else {
5142 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5143 neon_load_reg64(tmp64, rm);
ad69471c
PB
5144 }
5145 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5146 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5147 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5148 if (imm < 8) {
5149 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5150 } else {
ad69471c
PB
5151 neon_load_reg64(cpu_V1, rm + 1);
5152 imm -= 8;
9ee6e8bb 5153 }
ad69471c 5154 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5155 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5156 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5157 } else {
a7812ae4 5158 /* BUGFIX */
ad69471c 5159 neon_load_reg64(cpu_V0, rn);
a7812ae4 5160 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5161 neon_load_reg64(cpu_V1, rm);
a7812ae4 5162 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5163 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5164 }
5165 neon_store_reg64(cpu_V0, rd);
5166 if (q) {
5167 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5168 }
5169 } else if ((insn & (1 << 11)) == 0) {
5170 /* Two register misc. */
5171 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5172 size = (insn >> 18) & 3;
5173 switch (op) {
5174 case 0: /* VREV64 */
5175 if (size == 3)
5176 return 1;
5177 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5178 NEON_GET_REG(T0, rm, pass * 2);
5179 NEON_GET_REG(T1, rm, pass * 2 + 1);
5180 switch (size) {
66896cb8 5181 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5182 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5183 case 2: /* no-op */ break;
5184 default: abort();
5185 }
5186 NEON_SET_REG(T0, rd, pass * 2 + 1);
5187 if (size == 2) {
5188 NEON_SET_REG(T1, rd, pass * 2);
5189 } else {
5190 gen_op_movl_T0_T1();
5191 switch (size) {
66896cb8 5192 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5193 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5194 default: abort();
5195 }
5196 NEON_SET_REG(T0, rd, pass * 2);
5197 }
5198 }
5199 break;
5200 case 4: case 5: /* VPADDL */
5201 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5202 if (size == 3)
5203 return 1;
ad69471c
PB
5204 for (pass = 0; pass < q + 1; pass++) {
5205 tmp = neon_load_reg(rm, pass * 2);
5206 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5207 tmp = neon_load_reg(rm, pass * 2 + 1);
5208 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5209 switch (size) {
5210 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5211 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5212 case 2: tcg_gen_add_i64(CPU_V001); break;
5213 default: abort();
5214 }
9ee6e8bb
PB
5215 if (op >= 12) {
5216 /* Accumulate. */
ad69471c
PB
5217 neon_load_reg64(cpu_V1, rd + pass);
5218 gen_neon_addl(size);
9ee6e8bb 5219 }
ad69471c 5220 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5221 }
5222 break;
5223 case 33: /* VTRN */
5224 if (size == 2) {
5225 for (n = 0; n < (q ? 4 : 2); n += 2) {
5226 NEON_GET_REG(T0, rm, n);
5227 NEON_GET_REG(T1, rd, n + 1);
5228 NEON_SET_REG(T1, rm, n);
5229 NEON_SET_REG(T0, rd, n + 1);
5230 }
5231 } else {
5232 goto elementwise;
5233 }
5234 break;
5235 case 34: /* VUZP */
5236 /* Reg Before After
5237 Rd A3 A2 A1 A0 B2 B0 A2 A0
5238 Rm B3 B2 B1 B0 B3 B1 A3 A1
5239 */
5240 if (size == 3)
5241 return 1;
5242 gen_neon_unzip(rd, q, 0, size);
5243 gen_neon_unzip(rm, q, 4, size);
5244 if (q) {
5245 static int unzip_order_q[8] =
5246 {0, 2, 4, 6, 1, 3, 5, 7};
5247 for (n = 0; n < 8; n++) {
5248 int reg = (n < 4) ? rd : rm;
5249 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5250 NEON_SET_REG(T0, reg, n % 4);
5251 }
5252 } else {
5253 static int unzip_order[4] =
5254 {0, 4, 1, 5};
5255 for (n = 0; n < 4; n++) {
5256 int reg = (n < 2) ? rd : rm;
5257 gen_neon_movl_T0_scratch(unzip_order[n]);
5258 NEON_SET_REG(T0, reg, n % 2);
5259 }
5260 }
5261 break;
5262 case 35: /* VZIP */
5263 /* Reg Before After
5264 Rd A3 A2 A1 A0 B1 A1 B0 A0
5265 Rm B3 B2 B1 B0 B3 A3 B2 A2
5266 */
5267 if (size == 3)
5268 return 1;
5269 count = (q ? 4 : 2);
5270 for (n = 0; n < count; n++) {
5271 NEON_GET_REG(T0, rd, n);
5272 NEON_GET_REG(T1, rd, n);
5273 switch (size) {
ad69471c
PB
5274 case 0: gen_helper_neon_zip_u8(); break;
5275 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5276 case 2: /* no-op */; break;
5277 default: abort();
5278 }
5279 gen_neon_movl_scratch_T0(n * 2);
5280 gen_neon_movl_scratch_T1(n * 2 + 1);
5281 }
5282 for (n = 0; n < count * 2; n++) {
5283 int reg = (n < count) ? rd : rm;
5284 gen_neon_movl_T0_scratch(n);
5285 NEON_SET_REG(T0, reg, n % count);
5286 }
5287 break;
5288 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5289 if (size == 3)
5290 return 1;
a50f5b91 5291 TCGV_UNUSED(tmp2);
9ee6e8bb 5292 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5293 neon_load_reg64(cpu_V0, rm + pass);
5294 tmp = new_tmp();
9ee6e8bb 5295 if (op == 36 && q == 0) {
ad69471c 5296 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5297 } else if (q) {
ad69471c 5298 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5299 } else {
ad69471c
PB
5300 gen_neon_narrow_sats(size, tmp, cpu_V0);
5301 }
5302 if (pass == 0) {
5303 tmp2 = tmp;
5304 } else {
5305 neon_store_reg(rd, 0, tmp2);
5306 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5307 }
9ee6e8bb
PB
5308 }
5309 break;
5310 case 38: /* VSHLL */
ad69471c 5311 if (q || size == 3)
9ee6e8bb 5312 return 1;
ad69471c
PB
5313 tmp = neon_load_reg(rm, 0);
5314 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5315 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5316 if (pass == 1)
5317 tmp = tmp2;
5318 gen_neon_widen(cpu_V0, tmp, size, 1);
5319 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5320 }
5321 break;
5322 default:
5323 elementwise:
5324 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5325 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5326 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5327 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5328 } else {
5329 NEON_GET_REG(T0, rm, pass);
5330 }
5331 switch (op) {
5332 case 1: /* VREV32 */
5333 switch (size) {
66896cb8 5334 case 0: tcg_gen_bswap32_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5335 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5336 default: return 1;
5337 }
5338 break;
5339 case 2: /* VREV16 */
5340 if (size != 0)
5341 return 1;
3670669c 5342 gen_rev16(cpu_T[0]);
9ee6e8bb 5343 break;
9ee6e8bb
PB
5344 case 8: /* CLS */
5345 switch (size) {
ad69471c
PB
5346 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5347 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5348 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5349 default: return 1;
5350 }
5351 break;
5352 case 9: /* CLZ */
5353 switch (size) {
ad69471c
PB
5354 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5355 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5356 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5357 default: return 1;
5358 }
5359 break;
5360 case 10: /* CNT */
5361 if (size != 0)
5362 return 1;
ad69471c 5363 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5364 break;
5365 case 11: /* VNOT */
5366 if (size != 0)
5367 return 1;
5368 gen_op_notl_T0();
5369 break;
5370 case 14: /* VQABS */
5371 switch (size) {
ad69471c
PB
5372 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5373 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5374 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5375 default: return 1;
5376 }
5377 break;
5378 case 15: /* VQNEG */
5379 switch (size) {
ad69471c
PB
5380 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5381 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5382 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5383 default: return 1;
5384 }
5385 break;
5386 case 16: case 19: /* VCGT #0, VCLE #0 */
5387 gen_op_movl_T1_im(0);
5388 switch(size) {
ad69471c
PB
5389 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5390 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5391 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5392 default: return 1;
5393 }
5394 if (op == 19)
5395 gen_op_notl_T0();
5396 break;
5397 case 17: case 20: /* VCGE #0, VCLT #0 */
5398 gen_op_movl_T1_im(0);
5399 switch(size) {
ad69471c
PB
5400 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5401 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5402 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5403 default: return 1;
5404 }
5405 if (op == 20)
5406 gen_op_notl_T0();
5407 break;
5408 case 18: /* VCEQ #0 */
5409 gen_op_movl_T1_im(0);
5410 switch(size) {
ad69471c
PB
5411 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5412 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5413 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5414 default: return 1;
5415 }
5416 break;
5417 case 22: /* VABS */
5418 switch(size) {
ad69471c
PB
5419 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5420 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5421 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5422 default: return 1;
5423 }
5424 break;
5425 case 23: /* VNEG */
5426 gen_op_movl_T1_im(0);
ad69471c
PB
5427 if (size == 3)
5428 return 1;
5429 gen_neon_rsb(size);
9ee6e8bb
PB
5430 break;
5431 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5432 gen_op_movl_T1_im(0);
ad69471c 5433 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5434 if (op == 27)
5435 gen_op_notl_T0();
5436 break;
5437 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5438 gen_op_movl_T1_im(0);
ad69471c 5439 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5440 if (op == 28)
5441 gen_op_notl_T0();
5442 break;
5443 case 26: /* Float VCEQ #0 */
5444 gen_op_movl_T1_im(0);
ad69471c 5445 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5446 break;
5447 case 30: /* Float VABS */
4373f3ce 5448 gen_vfp_abs(0);
9ee6e8bb
PB
5449 break;
5450 case 31: /* Float VNEG */
4373f3ce 5451 gen_vfp_neg(0);
9ee6e8bb
PB
5452 break;
5453 case 32: /* VSWP */
5454 NEON_GET_REG(T1, rd, pass);
5455 NEON_SET_REG(T1, rm, pass);
5456 break;
5457 case 33: /* VTRN */
5458 NEON_GET_REG(T1, rd, pass);
5459 switch (size) {
ad69471c
PB
5460 case 0: gen_helper_neon_trn_u8(); break;
5461 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5462 case 2: abort();
5463 default: return 1;
5464 }
5465 NEON_SET_REG(T1, rm, pass);
5466 break;
5467 case 56: /* Integer VRECPE */
4373f3ce 5468 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5469 break;
5470 case 57: /* Integer VRSQRTE */
4373f3ce 5471 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5472 break;
5473 case 58: /* Float VRECPE */
4373f3ce 5474 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5475 break;
5476 case 59: /* Float VRSQRTE */
4373f3ce 5477 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5478 break;
5479 case 60: /* VCVT.F32.S32 */
4373f3ce 5480 gen_vfp_tosiz(0);
9ee6e8bb
PB
5481 break;
5482 case 61: /* VCVT.F32.U32 */
4373f3ce 5483 gen_vfp_touiz(0);
9ee6e8bb
PB
5484 break;
5485 case 62: /* VCVT.S32.F32 */
4373f3ce 5486 gen_vfp_sito(0);
9ee6e8bb
PB
5487 break;
5488 case 63: /* VCVT.U32.F32 */
4373f3ce 5489 gen_vfp_uito(0);
9ee6e8bb
PB
5490 break;
5491 default:
5492 /* Reserved: 21, 29, 39-56 */
5493 return 1;
5494 }
5495 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5496 tcg_gen_st_f32(cpu_F0s, cpu_env,
5497 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5498 } else {
5499 NEON_SET_REG(T0, rd, pass);
5500 }
5501 }
5502 break;
5503 }
5504 } else if ((insn & (1 << 10)) == 0) {
5505 /* VTBL, VTBX. */
3018f259 5506 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5507 if (insn & (1 << 6)) {
8f8e3aa4 5508 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5509 } else {
8f8e3aa4
PB
5510 tmp = new_tmp();
5511 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5512 }
8f8e3aa4
PB
5513 tmp2 = neon_load_reg(rm, 0);
5514 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5515 tcg_const_i32(n));
3018f259 5516 dead_tmp(tmp);
9ee6e8bb 5517 if (insn & (1 << 6)) {
8f8e3aa4 5518 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5519 } else {
8f8e3aa4
PB
5520 tmp = new_tmp();
5521 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5522 }
8f8e3aa4
PB
5523 tmp3 = neon_load_reg(rm, 1);
5524 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5525 tcg_const_i32(n));
5526 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5527 neon_store_reg(rd, 1, tmp3);
5528 dead_tmp(tmp);
9ee6e8bb
PB
5529 } else if ((insn & 0x380) == 0) {
5530 /* VDUP */
5531 if (insn & (1 << 19)) {
5532 NEON_SET_REG(T0, rm, 1);
5533 } else {
5534 NEON_SET_REG(T0, rm, 0);
5535 }
5536 if (insn & (1 << 16)) {
ad69471c 5537 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5538 } else if (insn & (1 << 17)) {
5539 if ((insn >> 18) & 1)
ad69471c 5540 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5541 else
ad69471c 5542 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5543 }
5544 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5545 NEON_SET_REG(T0, rd, pass);
5546 }
5547 } else {
5548 return 1;
5549 }
5550 }
5551 }
5552 return 0;
5553}
5554
fe1479c3
PB
5555static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5556{
5557 int crn = (insn >> 16) & 0xf;
5558 int crm = insn & 0xf;
5559 int op1 = (insn >> 21) & 7;
5560 int op2 = (insn >> 5) & 7;
5561 int rt = (insn >> 12) & 0xf;
5562 TCGv tmp;
5563
5564 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5565 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5566 /* TEECR */
5567 if (IS_USER(s))
5568 return 1;
5569 tmp = load_cpu_field(teecr);
5570 store_reg(s, rt, tmp);
5571 return 0;
5572 }
5573 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5574 /* TEEHBR */
5575 if (IS_USER(s) && (env->teecr & 1))
5576 return 1;
5577 tmp = load_cpu_field(teehbr);
5578 store_reg(s, rt, tmp);
5579 return 0;
5580 }
5581 }
5582 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5583 op1, crn, crm, op2);
5584 return 1;
5585}
5586
5587static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5588{
5589 int crn = (insn >> 16) & 0xf;
5590 int crm = insn & 0xf;
5591 int op1 = (insn >> 21) & 7;
5592 int op2 = (insn >> 5) & 7;
5593 int rt = (insn >> 12) & 0xf;
5594 TCGv tmp;
5595
5596 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5597 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5598 /* TEECR */
5599 if (IS_USER(s))
5600 return 1;
5601 tmp = load_reg(s, rt);
5602 gen_helper_set_teecr(cpu_env, tmp);
5603 dead_tmp(tmp);
5604 return 0;
5605 }
5606 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5607 /* TEEHBR */
5608 if (IS_USER(s) && (env->teecr & 1))
5609 return 1;
5610 tmp = load_reg(s, rt);
5611 store_cpu_field(tmp, teehbr);
5612 return 0;
5613 }
5614 }
5615 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5616 op1, crn, crm, op2);
5617 return 1;
5618}
5619
9ee6e8bb
PB
5620static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5621{
5622 int cpnum;
5623
5624 cpnum = (insn >> 8) & 0xf;
5625 if (arm_feature(env, ARM_FEATURE_XSCALE)
5626 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5627 return 1;
5628
5629 switch (cpnum) {
5630 case 0:
5631 case 1:
5632 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5633 return disas_iwmmxt_insn(env, s, insn);
5634 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5635 return disas_dsp_insn(env, s, insn);
5636 }
5637 return 1;
5638 case 10:
5639 case 11:
5640 return disas_vfp_insn (env, s, insn);
fe1479c3
PB
5641 case 14:
5642 /* Coprocessors 7-15 are architecturally reserved by ARM.
5643 Unfortunately Intel decided to ignore this. */
5644 if (arm_feature(env, ARM_FEATURE_XSCALE))
5645 goto board;
5646 if (insn & (1 << 20))
5647 return disas_cp14_read(env, s, insn);
5648 else
5649 return disas_cp14_write(env, s, insn);
9ee6e8bb
PB
5650 case 15:
5651 return disas_cp15_insn (env, s, insn);
5652 default:
fe1479c3 5653 board:
9ee6e8bb
PB
5654 /* Unknown coprocessor. See if the board has hooked it. */
5655 return disas_cp_insn (env, s, insn);
5656 }
5657}
5658
5e3f878a
PB
5659
5660/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5661static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5662{
5663 TCGv tmp;
5664 tmp = new_tmp();
5665 tcg_gen_trunc_i64_i32(tmp, val);
5666 store_reg(s, rlow, tmp);
5667 tmp = new_tmp();
5668 tcg_gen_shri_i64(val, val, 32);
5669 tcg_gen_trunc_i64_i32(tmp, val);
5670 store_reg(s, rhigh, tmp);
5671}
5672
5673/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5674static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5675{
a7812ae4 5676 TCGv_i64 tmp;
5e3f878a
PB
5677 TCGv tmp2;
5678
36aa55dc 5679 /* Load value and extend to 64 bits. */
a7812ae4 5680 tmp = tcg_temp_new_i64();
5e3f878a
PB
5681 tmp2 = load_reg(s, rlow);
5682 tcg_gen_extu_i32_i64(tmp, tmp2);
5683 dead_tmp(tmp2);
5684 tcg_gen_add_i64(val, val, tmp);
5685}
5686
5687/* load and add a 64-bit value from a register pair. */
a7812ae4 5688static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5689{
a7812ae4 5690 TCGv_i64 tmp;
36aa55dc
PB
5691 TCGv tmpl;
5692 TCGv tmph;
5e3f878a
PB
5693
5694 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5695 tmpl = load_reg(s, rlow);
5696 tmph = load_reg(s, rhigh);
a7812ae4 5697 tmp = tcg_temp_new_i64();
36aa55dc
PB
5698 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5699 dead_tmp(tmpl);
5700 dead_tmp(tmph);
5e3f878a
PB
5701 tcg_gen_add_i64(val, val, tmp);
5702}
5703
5704/* Set N and Z flags from a 64-bit value. */
a7812ae4 5705static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5706{
5707 TCGv tmp = new_tmp();
5708 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5709 gen_logic_CC(tmp);
5710 dead_tmp(tmp);
5e3f878a
PB
5711}
5712
9ee6e8bb
PB
5713static void disas_arm_insn(CPUState * env, DisasContext *s)
5714{
5715 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5716 TCGv tmp;
3670669c 5717 TCGv tmp2;
6ddbc6e4 5718 TCGv tmp3;
b0109805 5719 TCGv addr;
a7812ae4 5720 TCGv_i64 tmp64;
9ee6e8bb
PB
5721
5722 insn = ldl_code(s->pc);
5723 s->pc += 4;
5724
5725 /* M variants do not implement ARM mode. */
5726 if (IS_M(env))
5727 goto illegal_op;
5728 cond = insn >> 28;
5729 if (cond == 0xf){
5730 /* Unconditional instructions. */
5731 if (((insn >> 25) & 7) == 1) {
5732 /* NEON Data processing. */
5733 if (!arm_feature(env, ARM_FEATURE_NEON))
5734 goto illegal_op;
5735
5736 if (disas_neon_data_insn(env, s, insn))
5737 goto illegal_op;
5738 return;
5739 }
5740 if ((insn & 0x0f100000) == 0x04000000) {
5741 /* NEON load/store. */
5742 if (!arm_feature(env, ARM_FEATURE_NEON))
5743 goto illegal_op;
5744
5745 if (disas_neon_ls_insn(env, s, insn))
5746 goto illegal_op;
5747 return;
5748 }
5749 if ((insn & 0x0d70f000) == 0x0550f000)
5750 return; /* PLD */
5751 else if ((insn & 0x0ffffdff) == 0x01010000) {
5752 ARCH(6);
5753 /* setend */
5754 if (insn & (1 << 9)) {
5755 /* BE8 mode not implemented. */
5756 goto illegal_op;
5757 }
5758 return;
5759 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5760 switch ((insn >> 4) & 0xf) {
5761 case 1: /* clrex */
5762 ARCH(6K);
8f8e3aa4 5763 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5764 return;
5765 case 4: /* dsb */
5766 case 5: /* dmb */
5767 case 6: /* isb */
5768 ARCH(7);
5769 /* We don't emulate caches so these are a no-op. */
5770 return;
5771 default:
5772 goto illegal_op;
5773 }
5774 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5775 /* srs */
5776 uint32_t offset;
5777 if (IS_USER(s))
5778 goto illegal_op;
5779 ARCH(6);
5780 op1 = (insn & 0x1f);
5781 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5782 addr = load_reg(s, 13);
9ee6e8bb 5783 } else {
b0109805
PB
5784 addr = new_tmp();
5785 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5786 }
5787 i = (insn >> 23) & 3;
5788 switch (i) {
5789 case 0: offset = -4; break; /* DA */
5790 case 1: offset = -8; break; /* DB */
5791 case 2: offset = 0; break; /* IA */
5792 case 3: offset = 4; break; /* IB */
5793 default: abort();
5794 }
5795 if (offset)
b0109805
PB
5796 tcg_gen_addi_i32(addr, addr, offset);
5797 tmp = load_reg(s, 14);
5798 gen_st32(tmp, addr, 0);
5799 tmp = new_tmp();
5800 gen_helper_cpsr_read(tmp);
5801 tcg_gen_addi_i32(addr, addr, 4);
5802 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5803 if (insn & (1 << 21)) {
5804 /* Base writeback. */
5805 switch (i) {
5806 case 0: offset = -8; break;
5807 case 1: offset = -4; break;
5808 case 2: offset = 4; break;
5809 case 3: offset = 0; break;
5810 default: abort();
5811 }
5812 if (offset)
b0109805 5813 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5814 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5815 gen_movl_reg_T1(s, 13);
5816 } else {
b0109805 5817 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5818 }
b0109805
PB
5819 } else {
5820 dead_tmp(addr);
9ee6e8bb
PB
5821 }
5822 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5823 /* rfe */
5824 uint32_t offset;
5825 if (IS_USER(s))
5826 goto illegal_op;
5827 ARCH(6);
5828 rn = (insn >> 16) & 0xf;
b0109805 5829 addr = load_reg(s, rn);
9ee6e8bb
PB
5830 i = (insn >> 23) & 3;
5831 switch (i) {
b0109805
PB
5832 case 0: offset = -4; break; /* DA */
5833 case 1: offset = -8; break; /* DB */
5834 case 2: offset = 0; break; /* IA */
5835 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5836 default: abort();
5837 }
5838 if (offset)
b0109805
PB
5839 tcg_gen_addi_i32(addr, addr, offset);
5840 /* Load PC into tmp and CPSR into tmp2. */
5841 tmp = gen_ld32(addr, 0);
5842 tcg_gen_addi_i32(addr, addr, 4);
5843 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5844 if (insn & (1 << 21)) {
5845 /* Base writeback. */
5846 switch (i) {
b0109805
PB
5847 case 0: offset = -8; break;
5848 case 1: offset = -4; break;
5849 case 2: offset = 4; break;
5850 case 3: offset = 0; break;
9ee6e8bb
PB
5851 default: abort();
5852 }
5853 if (offset)
b0109805
PB
5854 tcg_gen_addi_i32(addr, addr, offset);
5855 store_reg(s, rn, addr);
5856 } else {
5857 dead_tmp(addr);
9ee6e8bb 5858 }
b0109805 5859 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5860 } else if ((insn & 0x0e000000) == 0x0a000000) {
5861 /* branch link and change to thumb (blx <offset>) */
5862 int32_t offset;
5863
5864 val = (uint32_t)s->pc;
d9ba4830
PB
5865 tmp = new_tmp();
5866 tcg_gen_movi_i32(tmp, val);
5867 store_reg(s, 14, tmp);
9ee6e8bb
PB
5868 /* Sign-extend the 24-bit offset */
5869 offset = (((int32_t)insn) << 8) >> 8;
5870 /* offset * 4 + bit24 * 2 + (thumb bit) */
5871 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5872 /* pipeline offset */
5873 val += 4;
d9ba4830 5874 gen_bx_im(s, val);
9ee6e8bb
PB
5875 return;
5876 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5877 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5878 /* iWMMXt register transfer. */
5879 if (env->cp15.c15_cpar & (1 << 1))
5880 if (!disas_iwmmxt_insn(env, s, insn))
5881 return;
5882 }
5883 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5884 /* Coprocessor double register transfer. */
5885 } else if ((insn & 0x0f000010) == 0x0e000010) {
5886 /* Additional coprocessor register transfer. */
7997d92f 5887 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5888 uint32_t mask;
5889 uint32_t val;
5890 /* cps (privileged) */
5891 if (IS_USER(s))
5892 return;
5893 mask = val = 0;
5894 if (insn & (1 << 19)) {
5895 if (insn & (1 << 8))
5896 mask |= CPSR_A;
5897 if (insn & (1 << 7))
5898 mask |= CPSR_I;
5899 if (insn & (1 << 6))
5900 mask |= CPSR_F;
5901 if (insn & (1 << 18))
5902 val |= mask;
5903 }
7997d92f 5904 if (insn & (1 << 17)) {
9ee6e8bb
PB
5905 mask |= CPSR_M;
5906 val |= (insn & 0x1f);
5907 }
5908 if (mask) {
5909 gen_op_movl_T0_im(val);
5910 gen_set_psr_T0(s, mask, 0);
5911 }
5912 return;
5913 }
5914 goto illegal_op;
5915 }
5916 if (cond != 0xe) {
5917 /* if not always execute, we generate a conditional jump to
5918 next instruction */
5919 s->condlabel = gen_new_label();
d9ba4830 5920 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5921 s->condjmp = 1;
5922 }
5923 if ((insn & 0x0f900000) == 0x03000000) {
5924 if ((insn & (1 << 21)) == 0) {
5925 ARCH(6T2);
5926 rd = (insn >> 12) & 0xf;
5927 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5928 if ((insn & (1 << 22)) == 0) {
5929 /* MOVW */
5e3f878a
PB
5930 tmp = new_tmp();
5931 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5932 } else {
5933 /* MOVT */
5e3f878a 5934 tmp = load_reg(s, rd);
86831435 5935 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5936 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5937 }
5e3f878a 5938 store_reg(s, rd, tmp);
9ee6e8bb
PB
5939 } else {
5940 if (((insn >> 12) & 0xf) != 0xf)
5941 goto illegal_op;
5942 if (((insn >> 16) & 0xf) == 0) {
5943 gen_nop_hint(s, insn & 0xff);
5944 } else {
5945 /* CPSR = immediate */
5946 val = insn & 0xff;
5947 shift = ((insn >> 8) & 0xf) * 2;
5948 if (shift)
5949 val = (val >> shift) | (val << (32 - shift));
5950 gen_op_movl_T0_im(val);
5951 i = ((insn & (1 << 22)) != 0);
5952 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5953 goto illegal_op;
5954 }
5955 }
5956 } else if ((insn & 0x0f900000) == 0x01000000
5957 && (insn & 0x00000090) != 0x00000090) {
5958 /* miscellaneous instructions */
5959 op1 = (insn >> 21) & 3;
5960 sh = (insn >> 4) & 0xf;
5961 rm = insn & 0xf;
5962 switch (sh) {
5963 case 0x0: /* move program status register */
5964 if (op1 & 1) {
5965 /* PSR = reg */
5966 gen_movl_T0_reg(s, rm);
5967 i = ((op1 & 2) != 0);
5968 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5969 goto illegal_op;
5970 } else {
5971 /* reg = PSR */
5972 rd = (insn >> 12) & 0xf;
5973 if (op1 & 2) {
5974 if (IS_USER(s))
5975 goto illegal_op;
d9ba4830 5976 tmp = load_cpu_field(spsr);
9ee6e8bb 5977 } else {
d9ba4830
PB
5978 tmp = new_tmp();
5979 gen_helper_cpsr_read(tmp);
9ee6e8bb 5980 }
d9ba4830 5981 store_reg(s, rd, tmp);
9ee6e8bb
PB
5982 }
5983 break;
5984 case 0x1:
5985 if (op1 == 1) {
5986 /* branch/exchange thumb (bx). */
d9ba4830
PB
5987 tmp = load_reg(s, rm);
5988 gen_bx(s, tmp);
9ee6e8bb
PB
5989 } else if (op1 == 3) {
5990 /* clz */
5991 rd = (insn >> 12) & 0xf;
1497c961
PB
5992 tmp = load_reg(s, rm);
5993 gen_helper_clz(tmp, tmp);
5994 store_reg(s, rd, tmp);
9ee6e8bb
PB
5995 } else {
5996 goto illegal_op;
5997 }
5998 break;
5999 case 0x2:
6000 if (op1 == 1) {
6001 ARCH(5J); /* bxj */
6002 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6003 tmp = load_reg(s, rm);
6004 gen_bx(s, tmp);
9ee6e8bb
PB
6005 } else {
6006 goto illegal_op;
6007 }
6008 break;
6009 case 0x3:
6010 if (op1 != 1)
6011 goto illegal_op;
6012
6013 /* branch link/exchange thumb (blx) */
d9ba4830
PB
6014 tmp = load_reg(s, rm);
6015 tmp2 = new_tmp();
6016 tcg_gen_movi_i32(tmp2, s->pc);
6017 store_reg(s, 14, tmp2);
6018 gen_bx(s, tmp);
9ee6e8bb
PB
6019 break;
6020 case 0x5: /* saturating add/subtract */
6021 rd = (insn >> 12) & 0xf;
6022 rn = (insn >> 16) & 0xf;
b40d0353 6023 tmp = load_reg(s, rm);
5e3f878a 6024 tmp2 = load_reg(s, rn);
9ee6e8bb 6025 if (op1 & 2)
5e3f878a 6026 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 6027 if (op1 & 1)
5e3f878a 6028 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 6029 else
5e3f878a
PB
6030 gen_helper_add_saturate(tmp, tmp, tmp2);
6031 dead_tmp(tmp2);
6032 store_reg(s, rd, tmp);
9ee6e8bb
PB
6033 break;
6034 case 7: /* bkpt */
6035 gen_set_condexec(s);
5e3f878a 6036 gen_set_pc_im(s->pc - 4);
d9ba4830 6037 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
6038 s->is_jmp = DISAS_JUMP;
6039 break;
6040 case 0x8: /* signed multiply */
6041 case 0xa:
6042 case 0xc:
6043 case 0xe:
6044 rs = (insn >> 8) & 0xf;
6045 rn = (insn >> 12) & 0xf;
6046 rd = (insn >> 16) & 0xf;
6047 if (op1 == 1) {
6048 /* (32 * 16) >> 16 */
5e3f878a
PB
6049 tmp = load_reg(s, rm);
6050 tmp2 = load_reg(s, rs);
9ee6e8bb 6051 if (sh & 4)
5e3f878a 6052 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6053 else
5e3f878a 6054 gen_sxth(tmp2);
a7812ae4
PB
6055 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6056 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 6057 tmp = new_tmp();
a7812ae4 6058 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6059 if ((sh & 2) == 0) {
5e3f878a
PB
6060 tmp2 = load_reg(s, rn);
6061 gen_helper_add_setq(tmp, tmp, tmp2);
6062 dead_tmp(tmp2);
9ee6e8bb 6063 }
5e3f878a 6064 store_reg(s, rd, tmp);
9ee6e8bb
PB
6065 } else {
6066 /* 16 * 16 */
5e3f878a
PB
6067 tmp = load_reg(s, rm);
6068 tmp2 = load_reg(s, rs);
6069 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6070 dead_tmp(tmp2);
9ee6e8bb 6071 if (op1 == 2) {
a7812ae4
PB
6072 tmp64 = tcg_temp_new_i64();
6073 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6074 dead_tmp(tmp);
a7812ae4
PB
6075 gen_addq(s, tmp64, rn, rd);
6076 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6077 } else {
6078 if (op1 == 0) {
5e3f878a
PB
6079 tmp2 = load_reg(s, rn);
6080 gen_helper_add_setq(tmp, tmp, tmp2);
6081 dead_tmp(tmp2);
9ee6e8bb 6082 }
5e3f878a 6083 store_reg(s, rd, tmp);
9ee6e8bb
PB
6084 }
6085 }
6086 break;
6087 default:
6088 goto illegal_op;
6089 }
6090 } else if (((insn & 0x0e000000) == 0 &&
6091 (insn & 0x00000090) != 0x90) ||
6092 ((insn & 0x0e000000) == (1 << 25))) {
6093 int set_cc, logic_cc, shiftop;
6094
6095 op1 = (insn >> 21) & 0xf;
6096 set_cc = (insn >> 20) & 1;
6097 logic_cc = table_logic_cc[op1] & set_cc;
6098
6099 /* data processing instruction */
6100 if (insn & (1 << 25)) {
6101 /* immediate operand */
6102 val = insn & 0xff;
6103 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 6104 if (shift) {
9ee6e8bb 6105 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9
JR
6106 }
6107 tmp2 = new_tmp();
6108 tcg_gen_movi_i32(tmp2, val);
6109 if (logic_cc && shift) {
6110 gen_set_CF_bit31(tmp2);
6111 }
9ee6e8bb
PB
6112 } else {
6113 /* register */
6114 rm = (insn) & 0xf;
e9bb4aa9 6115 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6116 shiftop = (insn >> 5) & 3;
6117 if (!(insn & (1 << 4))) {
6118 shift = (insn >> 7) & 0x1f;
e9bb4aa9 6119 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
6120 } else {
6121 rs = (insn >> 8) & 0xf;
8984bd2e 6122 tmp = load_reg(s, rs);
e9bb4aa9 6123 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
6124 }
6125 }
6126 if (op1 != 0x0f && op1 != 0x0d) {
6127 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
6128 tmp = load_reg(s, rn);
6129 } else {
6130 TCGV_UNUSED(tmp);
9ee6e8bb
PB
6131 }
6132 rd = (insn >> 12) & 0xf;
6133 switch(op1) {
6134 case 0x00:
e9bb4aa9
JR
6135 tcg_gen_and_i32(tmp, tmp, tmp2);
6136 if (logic_cc) {
6137 gen_logic_CC(tmp);
6138 }
21aeb343 6139 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6140 break;
6141 case 0x01:
e9bb4aa9
JR
6142 tcg_gen_xor_i32(tmp, tmp, tmp2);
6143 if (logic_cc) {
6144 gen_logic_CC(tmp);
6145 }
21aeb343 6146 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6147 break;
6148 case 0x02:
6149 if (set_cc && rd == 15) {
6150 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 6151 if (IS_USER(s)) {
9ee6e8bb 6152 goto illegal_op;
e9bb4aa9
JR
6153 }
6154 gen_helper_sub_cc(tmp, tmp, tmp2);
6155 gen_exception_return(s, tmp);
9ee6e8bb 6156 } else {
e9bb4aa9
JR
6157 if (set_cc) {
6158 gen_helper_sub_cc(tmp, tmp, tmp2);
6159 } else {
6160 tcg_gen_sub_i32(tmp, tmp, tmp2);
6161 }
21aeb343 6162 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6163 }
6164 break;
6165 case 0x03:
e9bb4aa9
JR
6166 if (set_cc) {
6167 gen_helper_sub_cc(tmp, tmp2, tmp);
6168 } else {
6169 tcg_gen_sub_i32(tmp, tmp2, tmp);
6170 }
21aeb343 6171 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6172 break;
6173 case 0x04:
e9bb4aa9
JR
6174 if (set_cc) {
6175 gen_helper_add_cc(tmp, tmp, tmp2);
6176 } else {
6177 tcg_gen_add_i32(tmp, tmp, tmp2);
6178 }
21aeb343 6179 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6180 break;
6181 case 0x05:
e9bb4aa9
JR
6182 if (set_cc) {
6183 gen_helper_adc_cc(tmp, tmp, tmp2);
6184 } else {
6185 gen_add_carry(tmp, tmp, tmp2);
6186 }
21aeb343 6187 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6188 break;
6189 case 0x06:
e9bb4aa9
JR
6190 if (set_cc) {
6191 gen_helper_sbc_cc(tmp, tmp, tmp2);
6192 } else {
6193 gen_sub_carry(tmp, tmp, tmp2);
6194 }
21aeb343 6195 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6196 break;
6197 case 0x07:
e9bb4aa9
JR
6198 if (set_cc) {
6199 gen_helper_sbc_cc(tmp, tmp2, tmp);
6200 } else {
6201 gen_sub_carry(tmp, tmp2, tmp);
6202 }
21aeb343 6203 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6204 break;
6205 case 0x08:
6206 if (set_cc) {
e9bb4aa9
JR
6207 tcg_gen_and_i32(tmp, tmp, tmp2);
6208 gen_logic_CC(tmp);
9ee6e8bb 6209 }
e9bb4aa9 6210 dead_tmp(tmp);
9ee6e8bb
PB
6211 break;
6212 case 0x09:
6213 if (set_cc) {
e9bb4aa9
JR
6214 tcg_gen_xor_i32(tmp, tmp, tmp2);
6215 gen_logic_CC(tmp);
9ee6e8bb 6216 }
e9bb4aa9 6217 dead_tmp(tmp);
9ee6e8bb
PB
6218 break;
6219 case 0x0a:
6220 if (set_cc) {
e9bb4aa9 6221 gen_helper_sub_cc(tmp, tmp, tmp2);
9ee6e8bb 6222 }
e9bb4aa9 6223 dead_tmp(tmp);
9ee6e8bb
PB
6224 break;
6225 case 0x0b:
6226 if (set_cc) {
e9bb4aa9 6227 gen_helper_add_cc(tmp, tmp, tmp2);
9ee6e8bb 6228 }
e9bb4aa9 6229 dead_tmp(tmp);
9ee6e8bb
PB
6230 break;
6231 case 0x0c:
e9bb4aa9
JR
6232 tcg_gen_or_i32(tmp, tmp, tmp2);
6233 if (logic_cc) {
6234 gen_logic_CC(tmp);
6235 }
21aeb343 6236 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6237 break;
6238 case 0x0d:
6239 if (logic_cc && rd == 15) {
6240 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 6241 if (IS_USER(s)) {
9ee6e8bb 6242 goto illegal_op;
e9bb4aa9
JR
6243 }
6244 gen_exception_return(s, tmp2);
9ee6e8bb 6245 } else {
e9bb4aa9
JR
6246 if (logic_cc) {
6247 gen_logic_CC(tmp2);
6248 }
21aeb343 6249 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6250 }
6251 break;
6252 case 0x0e:
e9bb4aa9
JR
6253 tcg_gen_bic_i32(tmp, tmp, tmp2);
6254 if (logic_cc) {
6255 gen_logic_CC(tmp);
6256 }
21aeb343 6257 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
6258 break;
6259 default:
6260 case 0x0f:
e9bb4aa9
JR
6261 tcg_gen_not_i32(tmp2, tmp2);
6262 if (logic_cc) {
6263 gen_logic_CC(tmp2);
6264 }
21aeb343 6265 store_reg_bx(env, s, rd, tmp2);
9ee6e8bb
PB
6266 break;
6267 }
e9bb4aa9
JR
6268 if (op1 != 0x0f && op1 != 0x0d) {
6269 dead_tmp(tmp2);
6270 }
9ee6e8bb
PB
6271 } else {
6272 /* other instructions */
6273 op1 = (insn >> 24) & 0xf;
6274 switch(op1) {
6275 case 0x0:
6276 case 0x1:
6277 /* multiplies, extra load/stores */
6278 sh = (insn >> 5) & 3;
6279 if (sh == 0) {
6280 if (op1 == 0x0) {
6281 rd = (insn >> 16) & 0xf;
6282 rn = (insn >> 12) & 0xf;
6283 rs = (insn >> 8) & 0xf;
6284 rm = (insn) & 0xf;
6285 op1 = (insn >> 20) & 0xf;
6286 switch (op1) {
6287 case 0: case 1: case 2: case 3: case 6:
6288 /* 32 bit mul */
5e3f878a
PB
6289 tmp = load_reg(s, rs);
6290 tmp2 = load_reg(s, rm);
6291 tcg_gen_mul_i32(tmp, tmp, tmp2);
6292 dead_tmp(tmp2);
9ee6e8bb
PB
6293 if (insn & (1 << 22)) {
6294 /* Subtract (mls) */
6295 ARCH(6T2);
5e3f878a
PB
6296 tmp2 = load_reg(s, rn);
6297 tcg_gen_sub_i32(tmp, tmp2, tmp);
6298 dead_tmp(tmp2);
9ee6e8bb
PB
6299 } else if (insn & (1 << 21)) {
6300 /* Add */
5e3f878a
PB
6301 tmp2 = load_reg(s, rn);
6302 tcg_gen_add_i32(tmp, tmp, tmp2);
6303 dead_tmp(tmp2);
9ee6e8bb
PB
6304 }
6305 if (insn & (1 << 20))
5e3f878a
PB
6306 gen_logic_CC(tmp);
6307 store_reg(s, rd, tmp);
9ee6e8bb
PB
6308 break;
6309 default:
6310 /* 64 bit mul */
5e3f878a
PB
6311 tmp = load_reg(s, rs);
6312 tmp2 = load_reg(s, rm);
9ee6e8bb 6313 if (insn & (1 << 22))
a7812ae4 6314 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6315 else
a7812ae4 6316 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6317 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6318 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6319 if (!(insn & (1 << 23))) { /* double accumulate */
6320 ARCH(6);
a7812ae4
PB
6321 gen_addq_lo(s, tmp64, rn);
6322 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6323 }
6324 if (insn & (1 << 20))
a7812ae4
PB
6325 gen_logicq_cc(tmp64);
6326 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6327 break;
6328 }
6329 } else {
6330 rn = (insn >> 16) & 0xf;
6331 rd = (insn >> 12) & 0xf;
6332 if (insn & (1 << 23)) {
6333 /* load/store exclusive */
86753403
PB
6334 op1 = (insn >> 21) & 0x3;
6335 if (op1)
a47f43d2 6336 ARCH(6K);
86753403
PB
6337 else
6338 ARCH(6);
9ee6e8bb 6339 gen_movl_T1_reg(s, rn);
72f1c62f 6340 addr = cpu_T[1];
9ee6e8bb 6341 if (insn & (1 << 20)) {
8f8e3aa4 6342 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
86753403
PB
6343 switch (op1) {
6344 case 0: /* ldrex */
6345 tmp = gen_ld32(addr, IS_USER(s));
6346 break;
6347 case 1: /* ldrexd */
6348 tmp = gen_ld32(addr, IS_USER(s));
6349 store_reg(s, rd, tmp);
6350 tcg_gen_addi_i32(addr, addr, 4);
6351 tmp = gen_ld32(addr, IS_USER(s));
6352 rd++;
6353 break;
6354 case 2: /* ldrexb */
6355 tmp = gen_ld8u(addr, IS_USER(s));
6356 break;
6357 case 3: /* ldrexh */
6358 tmp = gen_ld16u(addr, IS_USER(s));
6359 break;
6360 default:
6361 abort();
6362 }
8f8e3aa4 6363 store_reg(s, rd, tmp);
9ee6e8bb 6364 } else {
8f8e3aa4 6365 int label = gen_new_label();
9ee6e8bb 6366 rm = insn & 0xf;
8f8e3aa4 6367 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6368 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6369 0, label);
8f8e3aa4 6370 tmp = load_reg(s,rm);
86753403
PB
6371 switch (op1) {
6372 case 0: /* strex */
6373 gen_st32(tmp, addr, IS_USER(s));
6374 break;
6375 case 1: /* strexd */
6376 gen_st32(tmp, addr, IS_USER(s));
6377 tcg_gen_addi_i32(addr, addr, 4);
6378 tmp = load_reg(s, rm + 1);
6379 gen_st32(tmp, addr, IS_USER(s));
6380 break;
6381 case 2: /* strexb */
6382 gen_st8(tmp, addr, IS_USER(s));
6383 break;
6384 case 3: /* strexh */
6385 gen_st16(tmp, addr, IS_USER(s));
6386 break;
6387 default:
6388 abort();
6389 }
2637a3be 6390 gen_set_label(label);
8f8e3aa4 6391 gen_movl_reg_T0(s, rd);
9ee6e8bb 6392 }
9ee6e8bb
PB
6393 } else {
6394 /* SWP instruction */
6395 rm = (insn) & 0xf;
6396
8984bd2e
PB
6397 /* ??? This is not really atomic. However we know
6398 we never have multiple CPUs running in parallel,
6399 so it is good enough. */
6400 addr = load_reg(s, rn);
6401 tmp = load_reg(s, rm);
9ee6e8bb 6402 if (insn & (1 << 22)) {
8984bd2e
PB
6403 tmp2 = gen_ld8u(addr, IS_USER(s));
6404 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6405 } else {
8984bd2e
PB
6406 tmp2 = gen_ld32(addr, IS_USER(s));
6407 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6408 }
8984bd2e
PB
6409 dead_tmp(addr);
6410 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6411 }
6412 }
6413 } else {
6414 int address_offset;
6415 int load;
6416 /* Misc load/store */
6417 rn = (insn >> 16) & 0xf;
6418 rd = (insn >> 12) & 0xf;
b0109805 6419 addr = load_reg(s, rn);
9ee6e8bb 6420 if (insn & (1 << 24))
b0109805 6421 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6422 address_offset = 0;
6423 if (insn & (1 << 20)) {
6424 /* load */
6425 switch(sh) {
6426 case 1:
b0109805 6427 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6428 break;
6429 case 2:
b0109805 6430 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6431 break;
6432 default:
6433 case 3:
b0109805 6434 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6435 break;
6436 }
6437 load = 1;
6438 } else if (sh & 2) {
6439 /* doubleword */
6440 if (sh & 1) {
6441 /* store */
b0109805
PB
6442 tmp = load_reg(s, rd);
6443 gen_st32(tmp, addr, IS_USER(s));
6444 tcg_gen_addi_i32(addr, addr, 4);
6445 tmp = load_reg(s, rd + 1);
6446 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6447 load = 0;
6448 } else {
6449 /* load */
b0109805
PB
6450 tmp = gen_ld32(addr, IS_USER(s));
6451 store_reg(s, rd, tmp);
6452 tcg_gen_addi_i32(addr, addr, 4);
6453 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6454 rd++;
6455 load = 1;
6456 }
6457 address_offset = -4;
6458 } else {
6459 /* store */
b0109805
PB
6460 tmp = load_reg(s, rd);
6461 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6462 load = 0;
6463 }
6464 /* Perform base writeback before the loaded value to
6465 ensure correct behavior with overlapping index registers.
6466 ldrd with base writeback is is undefined if the
6467 destination and index registers overlap. */
6468 if (!(insn & (1 << 24))) {
b0109805
PB
6469 gen_add_datah_offset(s, insn, address_offset, addr);
6470 store_reg(s, rn, addr);
9ee6e8bb
PB
6471 } else if (insn & (1 << 21)) {
6472 if (address_offset)
b0109805
PB
6473 tcg_gen_addi_i32(addr, addr, address_offset);
6474 store_reg(s, rn, addr);
6475 } else {
6476 dead_tmp(addr);
9ee6e8bb
PB
6477 }
6478 if (load) {
6479 /* Complete the load. */
b0109805 6480 store_reg(s, rd, tmp);
9ee6e8bb
PB
6481 }
6482 }
6483 break;
6484 case 0x4:
6485 case 0x5:
6486 goto do_ldst;
6487 case 0x6:
6488 case 0x7:
6489 if (insn & (1 << 4)) {
6490 ARCH(6);
6491 /* Armv6 Media instructions. */
6492 rm = insn & 0xf;
6493 rn = (insn >> 16) & 0xf;
2c0262af 6494 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6495 rs = (insn >> 8) & 0xf;
6496 switch ((insn >> 23) & 3) {
6497 case 0: /* Parallel add/subtract. */
6498 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6499 tmp = load_reg(s, rn);
6500 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6501 sh = (insn >> 5) & 7;
6502 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6503 goto illegal_op;
6ddbc6e4
PB
6504 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6505 dead_tmp(tmp2);
6506 store_reg(s, rd, tmp);
9ee6e8bb
PB
6507 break;
6508 case 1:
6509 if ((insn & 0x00700020) == 0) {
6c95676b 6510 /* Halfword pack. */
3670669c
PB
6511 tmp = load_reg(s, rn);
6512 tmp2 = load_reg(s, rm);
9ee6e8bb 6513 shift = (insn >> 7) & 0x1f;
3670669c
PB
6514 if (insn & (1 << 6)) {
6515 /* pkhtb */
22478e79
AZ
6516 if (shift == 0)
6517 shift = 31;
6518 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6519 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6520 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6521 } else {
6522 /* pkhbt */
22478e79
AZ
6523 if (shift)
6524 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6525 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6526 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6527 }
6528 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6529 dead_tmp(tmp2);
3670669c 6530 store_reg(s, rd, tmp);
9ee6e8bb
PB
6531 } else if ((insn & 0x00200020) == 0x00200000) {
6532 /* [us]sat */
6ddbc6e4 6533 tmp = load_reg(s, rm);
9ee6e8bb
PB
6534 shift = (insn >> 7) & 0x1f;
6535 if (insn & (1 << 6)) {
6536 if (shift == 0)
6537 shift = 31;
6ddbc6e4 6538 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6539 } else {
6ddbc6e4 6540 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6541 }
6542 sh = (insn >> 16) & 0x1f;
6543 if (sh != 0) {
6544 if (insn & (1 << 22))
6ddbc6e4 6545 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6546 else
6ddbc6e4 6547 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6548 }
6ddbc6e4 6549 store_reg(s, rd, tmp);
9ee6e8bb
PB
6550 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6551 /* [us]sat16 */
6ddbc6e4 6552 tmp = load_reg(s, rm);
9ee6e8bb
PB
6553 sh = (insn >> 16) & 0x1f;
6554 if (sh != 0) {
6555 if (insn & (1 << 22))
6ddbc6e4 6556 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6557 else
6ddbc6e4 6558 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6559 }
6ddbc6e4 6560 store_reg(s, rd, tmp);
9ee6e8bb
PB
6561 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6562 /* Select bytes. */
6ddbc6e4
PB
6563 tmp = load_reg(s, rn);
6564 tmp2 = load_reg(s, rm);
6565 tmp3 = new_tmp();
6566 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6567 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6568 dead_tmp(tmp3);
6569 dead_tmp(tmp2);
6570 store_reg(s, rd, tmp);
9ee6e8bb 6571 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6572 tmp = load_reg(s, rm);
9ee6e8bb
PB
6573 shift = (insn >> 10) & 3;
6574 /* ??? In many cases it's not neccessary to do a
6575 rotate, a shift is sufficient. */
6576 if (shift != 0)
5e3f878a 6577 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6578 op1 = (insn >> 20) & 7;
6579 switch (op1) {
5e3f878a
PB
6580 case 0: gen_sxtb16(tmp); break;
6581 case 2: gen_sxtb(tmp); break;
6582 case 3: gen_sxth(tmp); break;
6583 case 4: gen_uxtb16(tmp); break;
6584 case 6: gen_uxtb(tmp); break;
6585 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6586 default: goto illegal_op;
6587 }
6588 if (rn != 15) {
5e3f878a 6589 tmp2 = load_reg(s, rn);
9ee6e8bb 6590 if ((op1 & 3) == 0) {
5e3f878a 6591 gen_add16(tmp, tmp2);
9ee6e8bb 6592 } else {
5e3f878a
PB
6593 tcg_gen_add_i32(tmp, tmp, tmp2);
6594 dead_tmp(tmp2);
9ee6e8bb
PB
6595 }
6596 }
6c95676b 6597 store_reg(s, rd, tmp);
9ee6e8bb
PB
6598 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6599 /* rev */
b0109805 6600 tmp = load_reg(s, rm);
9ee6e8bb
PB
6601 if (insn & (1 << 22)) {
6602 if (insn & (1 << 7)) {
b0109805 6603 gen_revsh(tmp);
9ee6e8bb
PB
6604 } else {
6605 ARCH(6T2);
b0109805 6606 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6607 }
6608 } else {
6609 if (insn & (1 << 7))
b0109805 6610 gen_rev16(tmp);
9ee6e8bb 6611 else
66896cb8 6612 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 6613 }
b0109805 6614 store_reg(s, rd, tmp);
9ee6e8bb
PB
6615 } else {
6616 goto illegal_op;
6617 }
6618 break;
6619 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6620 tmp = load_reg(s, rm);
6621 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6622 if (insn & (1 << 20)) {
6623 /* Signed multiply most significant [accumulate]. */
a7812ae4 6624 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6625 if (insn & (1 << 5))
a7812ae4
PB
6626 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6627 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6628 tmp = new_tmp();
a7812ae4 6629 tcg_gen_trunc_i64_i32(tmp, tmp64);
955a7dd5
AZ
6630 if (rd != 15) {
6631 tmp2 = load_reg(s, rd);
9ee6e8bb 6632 if (insn & (1 << 6)) {
5e3f878a 6633 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6634 } else {
5e3f878a 6635 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6636 }
5e3f878a 6637 dead_tmp(tmp2);
9ee6e8bb 6638 }
955a7dd5 6639 store_reg(s, rn, tmp);
9ee6e8bb
PB
6640 } else {
6641 if (insn & (1 << 5))
5e3f878a
PB
6642 gen_swap_half(tmp2);
6643 gen_smul_dual(tmp, tmp2);
6644 /* This addition cannot overflow. */
6645 if (insn & (1 << 6)) {
6646 tcg_gen_sub_i32(tmp, tmp, tmp2);
6647 } else {
6648 tcg_gen_add_i32(tmp, tmp, tmp2);
6649 }
6650 dead_tmp(tmp2);
9ee6e8bb 6651 if (insn & (1 << 22)) {
5e3f878a 6652 /* smlald, smlsld */
a7812ae4
PB
6653 tmp64 = tcg_temp_new_i64();
6654 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6655 dead_tmp(tmp);
a7812ae4
PB
6656 gen_addq(s, tmp64, rd, rn);
6657 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6658 } else {
5e3f878a 6659 /* smuad, smusd, smlad, smlsd */
22478e79 6660 if (rd != 15)
9ee6e8bb 6661 {
22478e79 6662 tmp2 = load_reg(s, rd);
5e3f878a
PB
6663 gen_helper_add_setq(tmp, tmp, tmp2);
6664 dead_tmp(tmp2);
9ee6e8bb 6665 }
22478e79 6666 store_reg(s, rn, tmp);
9ee6e8bb
PB
6667 }
6668 }
6669 break;
6670 case 3:
6671 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6672 switch (op1) {
6673 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6674 ARCH(6);
6675 tmp = load_reg(s, rm);
6676 tmp2 = load_reg(s, rs);
6677 gen_helper_usad8(tmp, tmp, tmp2);
6678 dead_tmp(tmp2);
ded9d295
AZ
6679 if (rd != 15) {
6680 tmp2 = load_reg(s, rd);
6ddbc6e4
PB
6681 tcg_gen_add_i32(tmp, tmp, tmp2);
6682 dead_tmp(tmp2);
9ee6e8bb 6683 }
ded9d295 6684 store_reg(s, rn, tmp);
9ee6e8bb
PB
6685 break;
6686 case 0x20: case 0x24: case 0x28: case 0x2c:
6687 /* Bitfield insert/clear. */
6688 ARCH(6T2);
6689 shift = (insn >> 7) & 0x1f;
6690 i = (insn >> 16) & 0x1f;
6691 i = i + 1 - shift;
6692 if (rm == 15) {
5e3f878a
PB
6693 tmp = new_tmp();
6694 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6695 } else {
5e3f878a 6696 tmp = load_reg(s, rm);
9ee6e8bb
PB
6697 }
6698 if (i != 32) {
5e3f878a 6699 tmp2 = load_reg(s, rd);
8f8e3aa4 6700 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6701 dead_tmp(tmp2);
9ee6e8bb 6702 }
5e3f878a 6703 store_reg(s, rd, tmp);
9ee6e8bb
PB
6704 break;
6705 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6706 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 6707 ARCH(6T2);
5e3f878a 6708 tmp = load_reg(s, rm);
9ee6e8bb
PB
6709 shift = (insn >> 7) & 0x1f;
6710 i = ((insn >> 16) & 0x1f) + 1;
6711 if (shift + i > 32)
6712 goto illegal_op;
6713 if (i < 32) {
6714 if (op1 & 0x20) {
5e3f878a 6715 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6716 } else {
5e3f878a 6717 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6718 }
6719 }
5e3f878a 6720 store_reg(s, rd, tmp);
9ee6e8bb
PB
6721 break;
6722 default:
6723 goto illegal_op;
6724 }
6725 break;
6726 }
6727 break;
6728 }
6729 do_ldst:
6730 /* Check for undefined extension instructions
6731 * per the ARM Bible IE:
6732 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6733 */
6734 sh = (0xf << 20) | (0xf << 4);
6735 if (op1 == 0x7 && ((insn & sh) == sh))
6736 {
6737 goto illegal_op;
6738 }
6739 /* load/store byte/word */
6740 rn = (insn >> 16) & 0xf;
6741 rd = (insn >> 12) & 0xf;
b0109805 6742 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6743 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6744 if (insn & (1 << 24))
b0109805 6745 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6746 if (insn & (1 << 20)) {
6747 /* load */
9ee6e8bb 6748 if (insn & (1 << 22)) {
b0109805 6749 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6750 } else {
b0109805 6751 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6752 }
9ee6e8bb
PB
6753 } else {
6754 /* store */
b0109805 6755 tmp = load_reg(s, rd);
9ee6e8bb 6756 if (insn & (1 << 22))
b0109805 6757 gen_st8(tmp, tmp2, i);
9ee6e8bb 6758 else
b0109805 6759 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6760 }
6761 if (!(insn & (1 << 24))) {
b0109805
PB
6762 gen_add_data_offset(s, insn, tmp2);
6763 store_reg(s, rn, tmp2);
6764 } else if (insn & (1 << 21)) {
6765 store_reg(s, rn, tmp2);
6766 } else {
6767 dead_tmp(tmp2);
9ee6e8bb
PB
6768 }
6769 if (insn & (1 << 20)) {
6770 /* Complete the load. */
6771 if (rd == 15)
b0109805 6772 gen_bx(s, tmp);
9ee6e8bb 6773 else
b0109805 6774 store_reg(s, rd, tmp);
9ee6e8bb
PB
6775 }
6776 break;
6777 case 0x08:
6778 case 0x09:
6779 {
6780 int j, n, user, loaded_base;
b0109805 6781 TCGv loaded_var;
9ee6e8bb
PB
6782 /* load/store multiple words */
6783 /* XXX: store correct base if write back */
6784 user = 0;
6785 if (insn & (1 << 22)) {
6786 if (IS_USER(s))
6787 goto illegal_op; /* only usable in supervisor mode */
6788
6789 if ((insn & (1 << 15)) == 0)
6790 user = 1;
6791 }
6792 rn = (insn >> 16) & 0xf;
b0109805 6793 addr = load_reg(s, rn);
9ee6e8bb
PB
6794
6795 /* compute total size */
6796 loaded_base = 0;
a50f5b91 6797 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6798 n = 0;
6799 for(i=0;i<16;i++) {
6800 if (insn & (1 << i))
6801 n++;
6802 }
6803 /* XXX: test invalid n == 0 case ? */
6804 if (insn & (1 << 23)) {
6805 if (insn & (1 << 24)) {
6806 /* pre increment */
b0109805 6807 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6808 } else {
6809 /* post increment */
6810 }
6811 } else {
6812 if (insn & (1 << 24)) {
6813 /* pre decrement */
b0109805 6814 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6815 } else {
6816 /* post decrement */
6817 if (n != 1)
b0109805 6818 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6819 }
6820 }
6821 j = 0;
6822 for(i=0;i<16;i++) {
6823 if (insn & (1 << i)) {
6824 if (insn & (1 << 20)) {
6825 /* load */
b0109805 6826 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6827 if (i == 15) {
b0109805 6828 gen_bx(s, tmp);
9ee6e8bb 6829 } else if (user) {
b0109805
PB
6830 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6831 dead_tmp(tmp);
9ee6e8bb 6832 } else if (i == rn) {
b0109805 6833 loaded_var = tmp;
9ee6e8bb
PB
6834 loaded_base = 1;
6835 } else {
b0109805 6836 store_reg(s, i, tmp);
9ee6e8bb
PB
6837 }
6838 } else {
6839 /* store */
6840 if (i == 15) {
6841 /* special case: r15 = PC + 8 */
6842 val = (long)s->pc + 4;
b0109805
PB
6843 tmp = new_tmp();
6844 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6845 } else if (user) {
b0109805
PB
6846 tmp = new_tmp();
6847 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6848 } else {
b0109805 6849 tmp = load_reg(s, i);
9ee6e8bb 6850 }
b0109805 6851 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6852 }
6853 j++;
6854 /* no need to add after the last transfer */
6855 if (j != n)
b0109805 6856 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6857 }
6858 }
6859 if (insn & (1 << 21)) {
6860 /* write back */
6861 if (insn & (1 << 23)) {
6862 if (insn & (1 << 24)) {
6863 /* pre increment */
6864 } else {
6865 /* post increment */
b0109805 6866 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6867 }
6868 } else {
6869 if (insn & (1 << 24)) {
6870 /* pre decrement */
6871 if (n != 1)
b0109805 6872 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6873 } else {
6874 /* post decrement */
b0109805 6875 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6876 }
6877 }
b0109805
PB
6878 store_reg(s, rn, addr);
6879 } else {
6880 dead_tmp(addr);
9ee6e8bb
PB
6881 }
6882 if (loaded_base) {
b0109805 6883 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6884 }
6885 if ((insn & (1 << 22)) && !user) {
6886 /* Restore CPSR from SPSR. */
d9ba4830
PB
6887 tmp = load_cpu_field(spsr);
6888 gen_set_cpsr(tmp, 0xffffffff);
6889 dead_tmp(tmp);
9ee6e8bb
PB
6890 s->is_jmp = DISAS_UPDATE;
6891 }
6892 }
6893 break;
6894 case 0xa:
6895 case 0xb:
6896 {
6897 int32_t offset;
6898
6899 /* branch (and link) */
6900 val = (int32_t)s->pc;
6901 if (insn & (1 << 24)) {
5e3f878a
PB
6902 tmp = new_tmp();
6903 tcg_gen_movi_i32(tmp, val);
6904 store_reg(s, 14, tmp);
9ee6e8bb
PB
6905 }
6906 offset = (((int32_t)insn << 8) >> 8);
6907 val += (offset << 2) + 4;
6908 gen_jmp(s, val);
6909 }
6910 break;
6911 case 0xc:
6912 case 0xd:
6913 case 0xe:
6914 /* Coprocessor. */
6915 if (disas_coproc_insn(env, s, insn))
6916 goto illegal_op;
6917 break;
6918 case 0xf:
6919 /* swi */
5e3f878a 6920 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6921 s->is_jmp = DISAS_SWI;
6922 break;
6923 default:
6924 illegal_op:
6925 gen_set_condexec(s);
5e3f878a 6926 gen_set_pc_im(s->pc - 4);
d9ba4830 6927 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6928 s->is_jmp = DISAS_JUMP;
6929 break;
6930 }
6931 }
6932}
6933
6934/* Return true if this is a Thumb-2 logical op. */
6935static int
6936thumb2_logic_op(int op)
6937{
6938 return (op < 8);
6939}
6940
6941/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6942 then set condition code flags based on the result of the operation.
6943 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6944 to the high bit of T1.
6945 Returns zero if the opcode is valid. */
6946
6947static int
6948gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6949{
6950 int logic_cc;
6951
6952 logic_cc = 0;
6953 switch (op) {
6954 case 0: /* and */
6955 gen_op_andl_T0_T1();
6956 logic_cc = conds;
6957 break;
6958 case 1: /* bic */
6959 gen_op_bicl_T0_T1();
6960 logic_cc = conds;
6961 break;
6962 case 2: /* orr */
6963 gen_op_orl_T0_T1();
6964 logic_cc = conds;
6965 break;
6966 case 3: /* orn */
6967 gen_op_notl_T1();
6968 gen_op_orl_T0_T1();
6969 logic_cc = conds;
6970 break;
6971 case 4: /* eor */
6972 gen_op_xorl_T0_T1();
6973 logic_cc = conds;
6974 break;
6975 case 8: /* add */
6976 if (conds)
6977 gen_op_addl_T0_T1_cc();
6978 else
6979 gen_op_addl_T0_T1();
6980 break;
6981 case 10: /* adc */
6982 if (conds)
6983 gen_op_adcl_T0_T1_cc();
6984 else
b26eefb6 6985 gen_adc_T0_T1();
9ee6e8bb
PB
6986 break;
6987 case 11: /* sbc */
6988 if (conds)
6989 gen_op_sbcl_T0_T1_cc();
6990 else
3670669c 6991 gen_sbc_T0_T1();
9ee6e8bb
PB
6992 break;
6993 case 13: /* sub */
6994 if (conds)
6995 gen_op_subl_T0_T1_cc();
6996 else
6997 gen_op_subl_T0_T1();
6998 break;
6999 case 14: /* rsb */
7000 if (conds)
7001 gen_op_rsbl_T0_T1_cc();
7002 else
7003 gen_op_rsbl_T0_T1();
7004 break;
7005 default: /* 5, 6, 7, 9, 12, 15. */
7006 return 1;
7007 }
7008 if (logic_cc) {
7009 gen_op_logic_T0_cc();
7010 if (shifter_out)
b26eefb6 7011 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
7012 }
7013 return 0;
7014}
7015
7016/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7017 is not legal. */
7018static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7019{
b0109805 7020 uint32_t insn, imm, shift, offset;
9ee6e8bb 7021 uint32_t rd, rn, rm, rs;
b26eefb6 7022 TCGv tmp;
6ddbc6e4
PB
7023 TCGv tmp2;
7024 TCGv tmp3;
b0109805 7025 TCGv addr;
a7812ae4 7026 TCGv_i64 tmp64;
9ee6e8bb
PB
7027 int op;
7028 int shiftop;
7029 int conds;
7030 int logic_cc;
7031
7032 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7033 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 7034 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
7035 16-bit instructions to get correct prefetch abort behavior. */
7036 insn = insn_hw1;
7037 if ((insn & (1 << 12)) == 0) {
7038 /* Second half of blx. */
7039 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
7040 tmp = load_reg(s, 14);
7041 tcg_gen_addi_i32(tmp, tmp, offset);
7042 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 7043
d9ba4830 7044 tmp2 = new_tmp();
b0109805 7045 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7046 store_reg(s, 14, tmp2);
7047 gen_bx(s, tmp);
9ee6e8bb
PB
7048 return 0;
7049 }
7050 if (insn & (1 << 11)) {
7051 /* Second half of bl. */
7052 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 7053 tmp = load_reg(s, 14);
6a0d8a1d 7054 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 7055
d9ba4830 7056 tmp2 = new_tmp();
b0109805 7057 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
7058 store_reg(s, 14, tmp2);
7059 gen_bx(s, tmp);
9ee6e8bb
PB
7060 return 0;
7061 }
7062 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7063 /* Instruction spans a page boundary. Implement it as two
7064 16-bit instructions in case the second half causes an
7065 prefetch abort. */
7066 offset = ((int32_t)insn << 21) >> 9;
b0109805 7067 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
7068 gen_movl_reg_T0(s, 14);
7069 return 0;
7070 }
7071 /* Fall through to 32-bit decode. */
7072 }
7073
7074 insn = lduw_code(s->pc);
7075 s->pc += 2;
7076 insn |= (uint32_t)insn_hw1 << 16;
7077
7078 if ((insn & 0xf800e800) != 0xf000e800) {
7079 ARCH(6T2);
7080 }
7081
7082 rn = (insn >> 16) & 0xf;
7083 rs = (insn >> 12) & 0xf;
7084 rd = (insn >> 8) & 0xf;
7085 rm = insn & 0xf;
7086 switch ((insn >> 25) & 0xf) {
7087 case 0: case 1: case 2: case 3:
7088 /* 16-bit instructions. Should never happen. */
7089 abort();
7090 case 4:
7091 if (insn & (1 << 22)) {
7092 /* Other load/store, table branch. */
7093 if (insn & 0x01200000) {
7094 /* Load/store doubleword. */
7095 if (rn == 15) {
b0109805
PB
7096 addr = new_tmp();
7097 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7098 } else {
b0109805 7099 addr = load_reg(s, rn);
9ee6e8bb
PB
7100 }
7101 offset = (insn & 0xff) * 4;
7102 if ((insn & (1 << 23)) == 0)
7103 offset = -offset;
7104 if (insn & (1 << 24)) {
b0109805 7105 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7106 offset = 0;
7107 }
7108 if (insn & (1 << 20)) {
7109 /* ldrd */
b0109805
PB
7110 tmp = gen_ld32(addr, IS_USER(s));
7111 store_reg(s, rs, tmp);
7112 tcg_gen_addi_i32(addr, addr, 4);
7113 tmp = gen_ld32(addr, IS_USER(s));
7114 store_reg(s, rd, tmp);
9ee6e8bb
PB
7115 } else {
7116 /* strd */
b0109805
PB
7117 tmp = load_reg(s, rs);
7118 gen_st32(tmp, addr, IS_USER(s));
7119 tcg_gen_addi_i32(addr, addr, 4);
7120 tmp = load_reg(s, rd);
7121 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7122 }
7123 if (insn & (1 << 21)) {
7124 /* Base writeback. */
7125 if (rn == 15)
7126 goto illegal_op;
b0109805
PB
7127 tcg_gen_addi_i32(addr, addr, offset - 4);
7128 store_reg(s, rn, addr);
7129 } else {
7130 dead_tmp(addr);
9ee6e8bb
PB
7131 }
7132 } else if ((insn & (1 << 23)) == 0) {
7133 /* Load/store exclusive word. */
2c0262af 7134 gen_movl_T1_reg(s, rn);
72f1c62f 7135 addr = cpu_T[1];
2c0262af 7136 if (insn & (1 << 20)) {
8f8e3aa4
PB
7137 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7138 tmp = gen_ld32(addr, IS_USER(s));
7139 store_reg(s, rd, tmp);
9ee6e8bb 7140 } else {
8f8e3aa4
PB
7141 int label = gen_new_label();
7142 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7143 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7144 0, label);
8f8e3aa4
PB
7145 tmp = load_reg(s, rs);
7146 gen_st32(tmp, cpu_T[1], IS_USER(s));
7147 gen_set_label(label);
7148 gen_movl_reg_T0(s, rd);
9ee6e8bb 7149 }
9ee6e8bb
PB
7150 } else if ((insn & (1 << 6)) == 0) {
7151 /* Table Branch. */
7152 if (rn == 15) {
b0109805
PB
7153 addr = new_tmp();
7154 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7155 } else {
b0109805 7156 addr = load_reg(s, rn);
9ee6e8bb 7157 }
b26eefb6 7158 tmp = load_reg(s, rm);
b0109805 7159 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7160 if (insn & (1 << 4)) {
7161 /* tbh */
b0109805 7162 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7163 dead_tmp(tmp);
b0109805 7164 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7165 } else { /* tbb */
b26eefb6 7166 dead_tmp(tmp);
b0109805 7167 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7168 }
b0109805
PB
7169 dead_tmp(addr);
7170 tcg_gen_shli_i32(tmp, tmp, 1);
7171 tcg_gen_addi_i32(tmp, tmp, s->pc);
7172 store_reg(s, 15, tmp);
9ee6e8bb
PB
7173 } else {
7174 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7175 /* ??? These are not really atomic. However we know
7176 we never have multiple CPUs running in parallel,
7177 so it is good enough. */
9ee6e8bb 7178 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7179 /* Must use a global reg for the address because we have
7180 a conditional branch in the store instruction. */
9ee6e8bb 7181 gen_movl_T1_reg(s, rn);
8f8e3aa4 7182 addr = cpu_T[1];
9ee6e8bb 7183 if (insn & (1 << 20)) {
8f8e3aa4 7184 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7185 switch (op) {
7186 case 0:
8f8e3aa4 7187 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7188 break;
2c0262af 7189 case 1:
8f8e3aa4 7190 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7191 break;
9ee6e8bb 7192 case 3:
8f8e3aa4
PB
7193 tmp = gen_ld32(addr, IS_USER(s));
7194 tcg_gen_addi_i32(addr, addr, 4);
7195 tmp2 = gen_ld32(addr, IS_USER(s));
7196 store_reg(s, rd, tmp2);
2c0262af
FB
7197 break;
7198 default:
9ee6e8bb
PB
7199 goto illegal_op;
7200 }
8f8e3aa4 7201 store_reg(s, rs, tmp);
9ee6e8bb 7202 } else {
8f8e3aa4
PB
7203 int label = gen_new_label();
7204 /* Must use a global that is not killed by the branch. */
7205 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7206 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7207 tmp = load_reg(s, rs);
9ee6e8bb
PB
7208 switch (op) {
7209 case 0:
8f8e3aa4 7210 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7211 break;
7212 case 1:
8f8e3aa4 7213 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7214 break;
2c0262af 7215 case 3:
8f8e3aa4
PB
7216 gen_st32(tmp, addr, IS_USER(s));
7217 tcg_gen_addi_i32(addr, addr, 4);
7218 tmp = load_reg(s, rd);
7219 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7220 break;
9ee6e8bb
PB
7221 default:
7222 goto illegal_op;
2c0262af 7223 }
8f8e3aa4 7224 gen_set_label(label);
9ee6e8bb
PB
7225 gen_movl_reg_T0(s, rm);
7226 }
7227 }
7228 } else {
7229 /* Load/store multiple, RFE, SRS. */
7230 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7231 /* Not available in user mode. */
b0109805 7232 if (IS_USER(s))
9ee6e8bb
PB
7233 goto illegal_op;
7234 if (insn & (1 << 20)) {
7235 /* rfe */
b0109805
PB
7236 addr = load_reg(s, rn);
7237 if ((insn & (1 << 24)) == 0)
7238 tcg_gen_addi_i32(addr, addr, -8);
7239 /* Load PC into tmp and CPSR into tmp2. */
7240 tmp = gen_ld32(addr, 0);
7241 tcg_gen_addi_i32(addr, addr, 4);
7242 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7243 if (insn & (1 << 21)) {
7244 /* Base writeback. */
b0109805
PB
7245 if (insn & (1 << 24)) {
7246 tcg_gen_addi_i32(addr, addr, 4);
7247 } else {
7248 tcg_gen_addi_i32(addr, addr, -4);
7249 }
7250 store_reg(s, rn, addr);
7251 } else {
7252 dead_tmp(addr);
9ee6e8bb 7253 }
b0109805 7254 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7255 } else {
7256 /* srs */
7257 op = (insn & 0x1f);
7258 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7259 addr = load_reg(s, 13);
9ee6e8bb 7260 } else {
b0109805
PB
7261 addr = new_tmp();
7262 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7263 }
7264 if ((insn & (1 << 24)) == 0) {
b0109805 7265 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7266 }
b0109805
PB
7267 tmp = load_reg(s, 14);
7268 gen_st32(tmp, addr, 0);
7269 tcg_gen_addi_i32(addr, addr, 4);
7270 tmp = new_tmp();
7271 gen_helper_cpsr_read(tmp);
7272 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7273 if (insn & (1 << 21)) {
7274 if ((insn & (1 << 24)) == 0) {
b0109805 7275 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7276 } else {
b0109805 7277 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7278 }
7279 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7280 store_reg(s, 13, addr);
9ee6e8bb 7281 } else {
b0109805
PB
7282 gen_helper_set_r13_banked(cpu_env,
7283 tcg_const_i32(op), addr);
9ee6e8bb 7284 }
b0109805
PB
7285 } else {
7286 dead_tmp(addr);
9ee6e8bb
PB
7287 }
7288 }
7289 } else {
7290 int i;
7291 /* Load/store multiple. */
b0109805 7292 addr = load_reg(s, rn);
9ee6e8bb
PB
7293 offset = 0;
7294 for (i = 0; i < 16; i++) {
7295 if (insn & (1 << i))
7296 offset += 4;
7297 }
7298 if (insn & (1 << 24)) {
b0109805 7299 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7300 }
7301
7302 for (i = 0; i < 16; i++) {
7303 if ((insn & (1 << i)) == 0)
7304 continue;
7305 if (insn & (1 << 20)) {
7306 /* Load. */
b0109805 7307 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7308 if (i == 15) {
b0109805 7309 gen_bx(s, tmp);
9ee6e8bb 7310 } else {
b0109805 7311 store_reg(s, i, tmp);
9ee6e8bb
PB
7312 }
7313 } else {
7314 /* Store. */
b0109805
PB
7315 tmp = load_reg(s, i);
7316 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7317 }
b0109805 7318 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7319 }
7320 if (insn & (1 << 21)) {
7321 /* Base register writeback. */
7322 if (insn & (1 << 24)) {
b0109805 7323 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7324 }
7325 /* Fault if writeback register is in register list. */
7326 if (insn & (1 << rn))
7327 goto illegal_op;
b0109805
PB
7328 store_reg(s, rn, addr);
7329 } else {
7330 dead_tmp(addr);
9ee6e8bb
PB
7331 }
7332 }
7333 }
7334 break;
7335 case 5: /* Data processing register constant shift. */
7336 if (rn == 15)
7337 gen_op_movl_T0_im(0);
7338 else
7339 gen_movl_T0_reg(s, rn);
7340 gen_movl_T1_reg(s, rm);
7341 op = (insn >> 21) & 0xf;
7342 shiftop = (insn >> 4) & 3;
7343 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7344 conds = (insn & (1 << 20)) != 0;
7345 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7346 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7347 if (gen_thumb2_data_op(s, op, conds, 0))
7348 goto illegal_op;
7349 if (rd != 15)
7350 gen_movl_reg_T0(s, rd);
7351 break;
7352 case 13: /* Misc data processing. */
7353 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7354 if (op < 4 && (insn & 0xf000) != 0xf000)
7355 goto illegal_op;
7356 switch (op) {
7357 case 0: /* Register controlled shift. */
8984bd2e
PB
7358 tmp = load_reg(s, rn);
7359 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7360 if ((insn & 0x70) != 0)
7361 goto illegal_op;
7362 op = (insn >> 21) & 3;
8984bd2e
PB
7363 logic_cc = (insn & (1 << 20)) != 0;
7364 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7365 if (logic_cc)
7366 gen_logic_CC(tmp);
21aeb343 7367 store_reg_bx(env, s, rd, tmp);
9ee6e8bb
PB
7368 break;
7369 case 1: /* Sign/zero extend. */
5e3f878a 7370 tmp = load_reg(s, rm);
9ee6e8bb
PB
7371 shift = (insn >> 4) & 3;
7372 /* ??? In many cases it's not neccessary to do a
7373 rotate, a shift is sufficient. */
7374 if (shift != 0)
5e3f878a 7375 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7376 op = (insn >> 20) & 7;
7377 switch (op) {
5e3f878a
PB
7378 case 0: gen_sxth(tmp); break;
7379 case 1: gen_uxth(tmp); break;
7380 case 2: gen_sxtb16(tmp); break;
7381 case 3: gen_uxtb16(tmp); break;
7382 case 4: gen_sxtb(tmp); break;
7383 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7384 default: goto illegal_op;
7385 }
7386 if (rn != 15) {
5e3f878a 7387 tmp2 = load_reg(s, rn);
9ee6e8bb 7388 if ((op >> 1) == 1) {
5e3f878a 7389 gen_add16(tmp, tmp2);
9ee6e8bb 7390 } else {
5e3f878a
PB
7391 tcg_gen_add_i32(tmp, tmp, tmp2);
7392 dead_tmp(tmp2);
9ee6e8bb
PB
7393 }
7394 }
5e3f878a 7395 store_reg(s, rd, tmp);
9ee6e8bb
PB
7396 break;
7397 case 2: /* SIMD add/subtract. */
7398 op = (insn >> 20) & 7;
7399 shift = (insn >> 4) & 7;
7400 if ((op & 3) == 3 || (shift & 3) == 3)
7401 goto illegal_op;
6ddbc6e4
PB
7402 tmp = load_reg(s, rn);
7403 tmp2 = load_reg(s, rm);
7404 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7405 dead_tmp(tmp2);
7406 store_reg(s, rd, tmp);
9ee6e8bb
PB
7407 break;
7408 case 3: /* Other data processing. */
7409 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7410 if (op < 4) {
7411 /* Saturating add/subtract. */
d9ba4830
PB
7412 tmp = load_reg(s, rn);
7413 tmp2 = load_reg(s, rm);
9ee6e8bb 7414 if (op & 2)
d9ba4830 7415 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7416 if (op & 1)
d9ba4830 7417 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7418 else
d9ba4830
PB
7419 gen_helper_add_saturate(tmp, tmp, tmp2);
7420 dead_tmp(tmp2);
9ee6e8bb 7421 } else {
d9ba4830 7422 tmp = load_reg(s, rn);
9ee6e8bb
PB
7423 switch (op) {
7424 case 0x0a: /* rbit */
d9ba4830 7425 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7426 break;
7427 case 0x08: /* rev */
66896cb8 7428 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
7429 break;
7430 case 0x09: /* rev16 */
d9ba4830 7431 gen_rev16(tmp);
9ee6e8bb
PB
7432 break;
7433 case 0x0b: /* revsh */
d9ba4830 7434 gen_revsh(tmp);
9ee6e8bb
PB
7435 break;
7436 case 0x10: /* sel */
d9ba4830 7437 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7438 tmp3 = new_tmp();
7439 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7440 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7441 dead_tmp(tmp3);
d9ba4830 7442 dead_tmp(tmp2);
9ee6e8bb
PB
7443 break;
7444 case 0x18: /* clz */
d9ba4830 7445 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7446 break;
7447 default:
7448 goto illegal_op;
7449 }
7450 }
d9ba4830 7451 store_reg(s, rd, tmp);
9ee6e8bb
PB
7452 break;
7453 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7454 op = (insn >> 4) & 0xf;
d9ba4830
PB
7455 tmp = load_reg(s, rn);
7456 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7457 switch ((insn >> 20) & 7) {
7458 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7459 tcg_gen_mul_i32(tmp, tmp, tmp2);
7460 dead_tmp(tmp2);
9ee6e8bb 7461 if (rs != 15) {
d9ba4830 7462 tmp2 = load_reg(s, rs);
9ee6e8bb 7463 if (op)
d9ba4830 7464 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7465 else
d9ba4830
PB
7466 tcg_gen_add_i32(tmp, tmp, tmp2);
7467 dead_tmp(tmp2);
9ee6e8bb 7468 }
9ee6e8bb
PB
7469 break;
7470 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7471 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7472 dead_tmp(tmp2);
9ee6e8bb 7473 if (rs != 15) {
d9ba4830
PB
7474 tmp2 = load_reg(s, rs);
7475 gen_helper_add_setq(tmp, tmp, tmp2);
7476 dead_tmp(tmp2);
9ee6e8bb 7477 }
9ee6e8bb
PB
7478 break;
7479 case 2: /* Dual multiply add. */
7480 case 4: /* Dual multiply subtract. */
7481 if (op)
d9ba4830
PB
7482 gen_swap_half(tmp2);
7483 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7484 /* This addition cannot overflow. */
7485 if (insn & (1 << 22)) {
d9ba4830 7486 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7487 } else {
d9ba4830 7488 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7489 }
d9ba4830 7490 dead_tmp(tmp2);
9ee6e8bb
PB
7491 if (rs != 15)
7492 {
d9ba4830
PB
7493 tmp2 = load_reg(s, rs);
7494 gen_helper_add_setq(tmp, tmp, tmp2);
7495 dead_tmp(tmp2);
9ee6e8bb 7496 }
9ee6e8bb
PB
7497 break;
7498 case 3: /* 32 * 16 -> 32msb */
7499 if (op)
d9ba4830 7500 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7501 else
d9ba4830 7502 gen_sxth(tmp2);
a7812ae4
PB
7503 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7504 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7505 tmp = new_tmp();
a7812ae4 7506 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7507 if (rs != 15)
7508 {
d9ba4830
PB
7509 tmp2 = load_reg(s, rs);
7510 gen_helper_add_setq(tmp, tmp, tmp2);
7511 dead_tmp(tmp2);
9ee6e8bb 7512 }
9ee6e8bb
PB
7513 break;
7514 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7515 gen_imull(tmp, tmp2);
7516 if (insn & (1 << 5)) {
7517 gen_roundqd(tmp, tmp2);
7518 dead_tmp(tmp2);
7519 } else {
7520 dead_tmp(tmp);
7521 tmp = tmp2;
7522 }
9ee6e8bb 7523 if (rs != 15) {
d9ba4830 7524 tmp2 = load_reg(s, rs);
9ee6e8bb 7525 if (insn & (1 << 21)) {
d9ba4830 7526 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7527 } else {
d9ba4830 7528 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7529 }
d9ba4830 7530 dead_tmp(tmp2);
2c0262af 7531 }
9ee6e8bb
PB
7532 break;
7533 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7534 gen_helper_usad8(tmp, tmp, tmp2);
7535 dead_tmp(tmp2);
9ee6e8bb 7536 if (rs != 15) {
d9ba4830
PB
7537 tmp2 = load_reg(s, rs);
7538 tcg_gen_add_i32(tmp, tmp, tmp2);
7539 dead_tmp(tmp2);
5fd46862 7540 }
9ee6e8bb 7541 break;
2c0262af 7542 }
d9ba4830 7543 store_reg(s, rd, tmp);
2c0262af 7544 break;
9ee6e8bb
PB
7545 case 6: case 7: /* 64-bit multiply, Divide. */
7546 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7547 tmp = load_reg(s, rn);
7548 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7549 if ((op & 0x50) == 0x10) {
7550 /* sdiv, udiv */
7551 if (!arm_feature(env, ARM_FEATURE_DIV))
7552 goto illegal_op;
7553 if (op & 0x20)
5e3f878a 7554 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7555 else
5e3f878a
PB
7556 gen_helper_sdiv(tmp, tmp, tmp2);
7557 dead_tmp(tmp2);
7558 store_reg(s, rd, tmp);
9ee6e8bb
PB
7559 } else if ((op & 0xe) == 0xc) {
7560 /* Dual multiply accumulate long. */
7561 if (op & 1)
5e3f878a
PB
7562 gen_swap_half(tmp2);
7563 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7564 if (op & 0x10) {
5e3f878a 7565 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7566 } else {
5e3f878a 7567 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7568 }
5e3f878a 7569 dead_tmp(tmp2);
a7812ae4
PB
7570 /* BUGFIX */
7571 tmp64 = tcg_temp_new_i64();
7572 tcg_gen_ext_i32_i64(tmp64, tmp);
7573 dead_tmp(tmp);
7574 gen_addq(s, tmp64, rs, rd);
7575 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7576 } else {
9ee6e8bb
PB
7577 if (op & 0x20) {
7578 /* Unsigned 64-bit multiply */
a7812ae4 7579 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7580 } else {
9ee6e8bb
PB
7581 if (op & 8) {
7582 /* smlalxy */
5e3f878a
PB
7583 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7584 dead_tmp(tmp2);
a7812ae4
PB
7585 tmp64 = tcg_temp_new_i64();
7586 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7587 dead_tmp(tmp);
9ee6e8bb
PB
7588 } else {
7589 /* Signed 64-bit multiply */
a7812ae4 7590 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7591 }
b5ff1b31 7592 }
9ee6e8bb
PB
7593 if (op & 4) {
7594 /* umaal */
a7812ae4
PB
7595 gen_addq_lo(s, tmp64, rs);
7596 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7597 } else if (op & 0x40) {
7598 /* 64-bit accumulate. */
a7812ae4 7599 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7600 }
a7812ae4 7601 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7602 }
2c0262af 7603 break;
9ee6e8bb
PB
7604 }
7605 break;
7606 case 6: case 7: case 14: case 15:
7607 /* Coprocessor. */
7608 if (((insn >> 24) & 3) == 3) {
7609 /* Translate into the equivalent ARM encoding. */
7610 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7611 if (disas_neon_data_insn(env, s, insn))
7612 goto illegal_op;
7613 } else {
7614 if (insn & (1 << 28))
7615 goto illegal_op;
7616 if (disas_coproc_insn (env, s, insn))
7617 goto illegal_op;
7618 }
7619 break;
7620 case 8: case 9: case 10: case 11:
7621 if (insn & (1 << 15)) {
7622 /* Branches, misc control. */
7623 if (insn & 0x5000) {
7624 /* Unconditional branch. */
7625 /* signextend(hw1[10:0]) -> offset[:12]. */
7626 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7627 /* hw1[10:0] -> offset[11:1]. */
7628 offset |= (insn & 0x7ff) << 1;
7629 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7630 offset[24:22] already have the same value because of the
7631 sign extension above. */
7632 offset ^= ((~insn) & (1 << 13)) << 10;
7633 offset ^= ((~insn) & (1 << 11)) << 11;
7634
9ee6e8bb
PB
7635 if (insn & (1 << 14)) {
7636 /* Branch and link. */
b0109805 7637 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7638 gen_movl_reg_T1(s, 14);
b5ff1b31 7639 }
3b46e624 7640
b0109805 7641 offset += s->pc;
9ee6e8bb
PB
7642 if (insn & (1 << 12)) {
7643 /* b/bl */
b0109805 7644 gen_jmp(s, offset);
9ee6e8bb
PB
7645 } else {
7646 /* blx */
b0109805
PB
7647 offset &= ~(uint32_t)2;
7648 gen_bx_im(s, offset);
2c0262af 7649 }
9ee6e8bb
PB
7650 } else if (((insn >> 23) & 7) == 7) {
7651 /* Misc control */
7652 if (insn & (1 << 13))
7653 goto illegal_op;
7654
7655 if (insn & (1 << 26)) {
7656 /* Secure monitor call (v6Z) */
7657 goto illegal_op; /* not implemented. */
2c0262af 7658 } else {
9ee6e8bb
PB
7659 op = (insn >> 20) & 7;
7660 switch (op) {
7661 case 0: /* msr cpsr. */
7662 if (IS_M(env)) {
8984bd2e
PB
7663 tmp = load_reg(s, rn);
7664 addr = tcg_const_i32(insn & 0xff);
7665 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7666 gen_lookup_tb(s);
7667 break;
7668 }
7669 /* fall through */
7670 case 1: /* msr spsr. */
7671 if (IS_M(env))
7672 goto illegal_op;
7673 gen_movl_T0_reg(s, rn);
7674 if (gen_set_psr_T0(s,
7675 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7676 op == 1))
7677 goto illegal_op;
7678 break;
7679 case 2: /* cps, nop-hint. */
7680 if (((insn >> 8) & 7) == 0) {
7681 gen_nop_hint(s, insn & 0xff);
7682 }
7683 /* Implemented as NOP in user mode. */
7684 if (IS_USER(s))
7685 break;
7686 offset = 0;
7687 imm = 0;
7688 if (insn & (1 << 10)) {
7689 if (insn & (1 << 7))
7690 offset |= CPSR_A;
7691 if (insn & (1 << 6))
7692 offset |= CPSR_I;
7693 if (insn & (1 << 5))
7694 offset |= CPSR_F;
7695 if (insn & (1 << 9))
7696 imm = CPSR_A | CPSR_I | CPSR_F;
7697 }
7698 if (insn & (1 << 8)) {
7699 offset |= 0x1f;
7700 imm |= (insn & 0x1f);
7701 }
7702 if (offset) {
7703 gen_op_movl_T0_im(imm);
7704 gen_set_psr_T0(s, offset, 0);
7705 }
7706 break;
7707 case 3: /* Special control operations. */
7708 op = (insn >> 4) & 0xf;
7709 switch (op) {
7710 case 2: /* clrex */
8f8e3aa4 7711 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7712 break;
7713 case 4: /* dsb */
7714 case 5: /* dmb */
7715 case 6: /* isb */
7716 /* These execute as NOPs. */
7717 ARCH(7);
7718 break;
7719 default:
7720 goto illegal_op;
7721 }
7722 break;
7723 case 4: /* bxj */
7724 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7725 tmp = load_reg(s, rn);
7726 gen_bx(s, tmp);
9ee6e8bb
PB
7727 break;
7728 case 5: /* Exception return. */
7729 /* Unpredictable in user mode. */
7730 goto illegal_op;
7731 case 6: /* mrs cpsr. */
8984bd2e 7732 tmp = new_tmp();
9ee6e8bb 7733 if (IS_M(env)) {
8984bd2e
PB
7734 addr = tcg_const_i32(insn & 0xff);
7735 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7736 } else {
8984bd2e 7737 gen_helper_cpsr_read(tmp);
9ee6e8bb 7738 }
8984bd2e 7739 store_reg(s, rd, tmp);
9ee6e8bb
PB
7740 break;
7741 case 7: /* mrs spsr. */
7742 /* Not accessible in user mode. */
7743 if (IS_USER(s) || IS_M(env))
7744 goto illegal_op;
d9ba4830
PB
7745 tmp = load_cpu_field(spsr);
7746 store_reg(s, rd, tmp);
9ee6e8bb 7747 break;
2c0262af
FB
7748 }
7749 }
9ee6e8bb
PB
7750 } else {
7751 /* Conditional branch. */
7752 op = (insn >> 22) & 0xf;
7753 /* Generate a conditional jump to next instruction. */
7754 s->condlabel = gen_new_label();
d9ba4830 7755 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7756 s->condjmp = 1;
7757
7758 /* offset[11:1] = insn[10:0] */
7759 offset = (insn & 0x7ff) << 1;
7760 /* offset[17:12] = insn[21:16]. */
7761 offset |= (insn & 0x003f0000) >> 4;
7762 /* offset[31:20] = insn[26]. */
7763 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7764 /* offset[18] = insn[13]. */
7765 offset |= (insn & (1 << 13)) << 5;
7766 /* offset[19] = insn[11]. */
7767 offset |= (insn & (1 << 11)) << 8;
7768
7769 /* jump to the offset */
b0109805 7770 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7771 }
7772 } else {
7773 /* Data processing immediate. */
7774 if (insn & (1 << 25)) {
7775 if (insn & (1 << 24)) {
7776 if (insn & (1 << 20))
7777 goto illegal_op;
7778 /* Bitfield/Saturate. */
7779 op = (insn >> 21) & 7;
7780 imm = insn & 0x1f;
7781 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7782 if (rn == 15) {
7783 tmp = new_tmp();
7784 tcg_gen_movi_i32(tmp, 0);
7785 } else {
7786 tmp = load_reg(s, rn);
7787 }
9ee6e8bb
PB
7788 switch (op) {
7789 case 2: /* Signed bitfield extract. */
7790 imm++;
7791 if (shift + imm > 32)
7792 goto illegal_op;
7793 if (imm < 32)
6ddbc6e4 7794 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7795 break;
7796 case 6: /* Unsigned bitfield extract. */
7797 imm++;
7798 if (shift + imm > 32)
7799 goto illegal_op;
7800 if (imm < 32)
6ddbc6e4 7801 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7802 break;
7803 case 3: /* Bitfield insert/clear. */
7804 if (imm < shift)
7805 goto illegal_op;
7806 imm = imm + 1 - shift;
7807 if (imm != 32) {
6ddbc6e4 7808 tmp2 = load_reg(s, rd);
8f8e3aa4 7809 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7810 dead_tmp(tmp2);
9ee6e8bb
PB
7811 }
7812 break;
7813 case 7:
7814 goto illegal_op;
7815 default: /* Saturate. */
9ee6e8bb
PB
7816 if (shift) {
7817 if (op & 1)
6ddbc6e4 7818 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7819 else
6ddbc6e4 7820 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7821 }
6ddbc6e4 7822 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7823 if (op & 4) {
7824 /* Unsigned. */
9ee6e8bb 7825 if ((op & 1) && shift == 0)
6ddbc6e4 7826 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7827 else
6ddbc6e4 7828 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7829 } else {
9ee6e8bb 7830 /* Signed. */
9ee6e8bb 7831 if ((op & 1) && shift == 0)
6ddbc6e4 7832 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7833 else
6ddbc6e4 7834 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7835 }
9ee6e8bb 7836 break;
2c0262af 7837 }
6ddbc6e4 7838 store_reg(s, rd, tmp);
9ee6e8bb
PB
7839 } else {
7840 imm = ((insn & 0x04000000) >> 15)
7841 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7842 if (insn & (1 << 22)) {
7843 /* 16-bit immediate. */
7844 imm |= (insn >> 4) & 0xf000;
7845 if (insn & (1 << 23)) {
7846 /* movt */
5e3f878a 7847 tmp = load_reg(s, rd);
86831435 7848 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7849 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7850 } else {
9ee6e8bb 7851 /* movw */
5e3f878a
PB
7852 tmp = new_tmp();
7853 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7854 }
7855 } else {
9ee6e8bb
PB
7856 /* Add/sub 12-bit immediate. */
7857 if (rn == 15) {
b0109805 7858 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7859 if (insn & (1 << 23))
b0109805 7860 offset -= imm;
9ee6e8bb 7861 else
b0109805 7862 offset += imm;
5e3f878a
PB
7863 tmp = new_tmp();
7864 tcg_gen_movi_i32(tmp, offset);
2c0262af 7865 } else {
5e3f878a 7866 tmp = load_reg(s, rn);
9ee6e8bb 7867 if (insn & (1 << 23))
5e3f878a 7868 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7869 else
5e3f878a 7870 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7871 }
9ee6e8bb 7872 }
5e3f878a 7873 store_reg(s, rd, tmp);
191abaa2 7874 }
9ee6e8bb
PB
7875 } else {
7876 int shifter_out = 0;
7877 /* modified 12-bit immediate. */
7878 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7879 imm = (insn & 0xff);
7880 switch (shift) {
7881 case 0: /* XY */
7882 /* Nothing to do. */
7883 break;
7884 case 1: /* 00XY00XY */
7885 imm |= imm << 16;
7886 break;
7887 case 2: /* XY00XY00 */
7888 imm |= imm << 16;
7889 imm <<= 8;
7890 break;
7891 case 3: /* XYXYXYXY */
7892 imm |= imm << 16;
7893 imm |= imm << 8;
7894 break;
7895 default: /* Rotated constant. */
7896 shift = (shift << 1) | (imm >> 7);
7897 imm |= 0x80;
7898 imm = imm << (32 - shift);
7899 shifter_out = 1;
7900 break;
b5ff1b31 7901 }
9ee6e8bb
PB
7902 gen_op_movl_T1_im(imm);
7903 rn = (insn >> 16) & 0xf;
7904 if (rn == 15)
7905 gen_op_movl_T0_im(0);
7906 else
7907 gen_movl_T0_reg(s, rn);
7908 op = (insn >> 21) & 0xf;
7909 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7910 shifter_out))
7911 goto illegal_op;
7912 rd = (insn >> 8) & 0xf;
7913 if (rd != 15) {
7914 gen_movl_reg_T0(s, rd);
2c0262af 7915 }
2c0262af 7916 }
9ee6e8bb
PB
7917 }
7918 break;
7919 case 12: /* Load/store single data item. */
7920 {
7921 int postinc = 0;
7922 int writeback = 0;
b0109805 7923 int user;
9ee6e8bb
PB
7924 if ((insn & 0x01100000) == 0x01000000) {
7925 if (disas_neon_ls_insn(env, s, insn))
c1713132 7926 goto illegal_op;
9ee6e8bb
PB
7927 break;
7928 }
b0109805 7929 user = IS_USER(s);
9ee6e8bb 7930 if (rn == 15) {
b0109805 7931 addr = new_tmp();
9ee6e8bb
PB
7932 /* PC relative. */
7933 /* s->pc has already been incremented by 4. */
7934 imm = s->pc & 0xfffffffc;
7935 if (insn & (1 << 23))
7936 imm += insn & 0xfff;
7937 else
7938 imm -= insn & 0xfff;
b0109805 7939 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7940 } else {
b0109805 7941 addr = load_reg(s, rn);
9ee6e8bb
PB
7942 if (insn & (1 << 23)) {
7943 /* Positive offset. */
7944 imm = insn & 0xfff;
b0109805 7945 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7946 } else {
7947 op = (insn >> 8) & 7;
7948 imm = insn & 0xff;
7949 switch (op) {
7950 case 0: case 8: /* Shifted Register. */
7951 shift = (insn >> 4) & 0xf;
7952 if (shift > 3)
18c9b560 7953 goto illegal_op;
b26eefb6 7954 tmp = load_reg(s, rm);
9ee6e8bb 7955 if (shift)
b26eefb6 7956 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7957 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7958 dead_tmp(tmp);
9ee6e8bb
PB
7959 break;
7960 case 4: /* Negative offset. */
b0109805 7961 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7962 break;
7963 case 6: /* User privilege. */
b0109805
PB
7964 tcg_gen_addi_i32(addr, addr, imm);
7965 user = 1;
9ee6e8bb
PB
7966 break;
7967 case 1: /* Post-decrement. */
7968 imm = -imm;
7969 /* Fall through. */
7970 case 3: /* Post-increment. */
9ee6e8bb
PB
7971 postinc = 1;
7972 writeback = 1;
7973 break;
7974 case 5: /* Pre-decrement. */
7975 imm = -imm;
7976 /* Fall through. */
7977 case 7: /* Pre-increment. */
b0109805 7978 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7979 writeback = 1;
7980 break;
7981 default:
b7bcbe95 7982 goto illegal_op;
9ee6e8bb
PB
7983 }
7984 }
7985 }
7986 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7987 if (insn & (1 << 20)) {
7988 /* Load. */
7989 if (rs == 15 && op != 2) {
7990 if (op & 2)
b5ff1b31 7991 goto illegal_op;
9ee6e8bb
PB
7992 /* Memory hint. Implemented as NOP. */
7993 } else {
7994 switch (op) {
b0109805
PB
7995 case 0: tmp = gen_ld8u(addr, user); break;
7996 case 4: tmp = gen_ld8s(addr, user); break;
7997 case 1: tmp = gen_ld16u(addr, user); break;
7998 case 5: tmp = gen_ld16s(addr, user); break;
7999 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
8000 default: goto illegal_op;
8001 }
8002 if (rs == 15) {
b0109805 8003 gen_bx(s, tmp);
9ee6e8bb 8004 } else {
b0109805 8005 store_reg(s, rs, tmp);
9ee6e8bb
PB
8006 }
8007 }
8008 } else {
8009 /* Store. */
8010 if (rs == 15)
b7bcbe95 8011 goto illegal_op;
b0109805 8012 tmp = load_reg(s, rs);
9ee6e8bb 8013 switch (op) {
b0109805
PB
8014 case 0: gen_st8(tmp, addr, user); break;
8015 case 1: gen_st16(tmp, addr, user); break;
8016 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 8017 default: goto illegal_op;
b7bcbe95 8018 }
2c0262af 8019 }
9ee6e8bb 8020 if (postinc)
b0109805
PB
8021 tcg_gen_addi_i32(addr, addr, imm);
8022 if (writeback) {
8023 store_reg(s, rn, addr);
8024 } else {
8025 dead_tmp(addr);
8026 }
9ee6e8bb
PB
8027 }
8028 break;
8029 default:
8030 goto illegal_op;
2c0262af 8031 }
9ee6e8bb
PB
8032 return 0;
8033illegal_op:
8034 return 1;
2c0262af
FB
8035}
8036
9ee6e8bb 8037static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
8038{
8039 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8040 int32_t offset;
8041 int i;
b26eefb6 8042 TCGv tmp;
d9ba4830 8043 TCGv tmp2;
b0109805 8044 TCGv addr;
99c475ab 8045
9ee6e8bb
PB
8046 if (s->condexec_mask) {
8047 cond = s->condexec_cond;
8048 s->condlabel = gen_new_label();
d9ba4830 8049 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
8050 s->condjmp = 1;
8051 }
8052
b5ff1b31 8053 insn = lduw_code(s->pc);
99c475ab 8054 s->pc += 2;
b5ff1b31 8055
99c475ab
FB
8056 switch (insn >> 12) {
8057 case 0: case 1:
8058 rd = insn & 7;
8059 op = (insn >> 11) & 3;
8060 if (op == 3) {
8061 /* add/subtract */
8062 rn = (insn >> 3) & 7;
8063 gen_movl_T0_reg(s, rn);
8064 if (insn & (1 << 10)) {
8065 /* immediate */
8066 gen_op_movl_T1_im((insn >> 6) & 7);
8067 } else {
8068 /* reg */
8069 rm = (insn >> 6) & 7;
8070 gen_movl_T1_reg(s, rm);
8071 }
9ee6e8bb
PB
8072 if (insn & (1 << 9)) {
8073 if (s->condexec_mask)
8074 gen_op_subl_T0_T1();
8075 else
8076 gen_op_subl_T0_T1_cc();
8077 } else {
8078 if (s->condexec_mask)
8079 gen_op_addl_T0_T1();
8080 else
8081 gen_op_addl_T0_T1_cc();
8082 }
99c475ab
FB
8083 gen_movl_reg_T0(s, rd);
8084 } else {
8085 /* shift immediate */
8086 rm = (insn >> 3) & 7;
8087 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
8088 tmp = load_reg(s, rm);
8089 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8090 if (!s->condexec_mask)
8091 gen_logic_CC(tmp);
8092 store_reg(s, rd, tmp);
99c475ab
FB
8093 }
8094 break;
8095 case 2: case 3:
8096 /* arithmetic large immediate */
8097 op = (insn >> 11) & 3;
8098 rd = (insn >> 8) & 0x7;
8099 if (op == 0) {
8100 gen_op_movl_T0_im(insn & 0xff);
8101 } else {
8102 gen_movl_T0_reg(s, rd);
8103 gen_op_movl_T1_im(insn & 0xff);
8104 }
8105 switch (op) {
8106 case 0: /* mov */
9ee6e8bb
PB
8107 if (!s->condexec_mask)
8108 gen_op_logic_T0_cc();
99c475ab
FB
8109 break;
8110 case 1: /* cmp */
8111 gen_op_subl_T0_T1_cc();
8112 break;
8113 case 2: /* add */
9ee6e8bb
PB
8114 if (s->condexec_mask)
8115 gen_op_addl_T0_T1();
8116 else
8117 gen_op_addl_T0_T1_cc();
99c475ab
FB
8118 break;
8119 case 3: /* sub */
9ee6e8bb
PB
8120 if (s->condexec_mask)
8121 gen_op_subl_T0_T1();
8122 else
8123 gen_op_subl_T0_T1_cc();
99c475ab
FB
8124 break;
8125 }
8126 if (op != 1)
8127 gen_movl_reg_T0(s, rd);
8128 break;
8129 case 4:
8130 if (insn & (1 << 11)) {
8131 rd = (insn >> 8) & 7;
5899f386
FB
8132 /* load pc-relative. Bit 1 of PC is ignored. */
8133 val = s->pc + 2 + ((insn & 0xff) * 4);
8134 val &= ~(uint32_t)2;
b0109805
PB
8135 addr = new_tmp();
8136 tcg_gen_movi_i32(addr, val);
8137 tmp = gen_ld32(addr, IS_USER(s));
8138 dead_tmp(addr);
8139 store_reg(s, rd, tmp);
99c475ab
FB
8140 break;
8141 }
8142 if (insn & (1 << 10)) {
8143 /* data processing extended or blx */
8144 rd = (insn & 7) | ((insn >> 4) & 8);
8145 rm = (insn >> 3) & 0xf;
8146 op = (insn >> 8) & 3;
8147 switch (op) {
8148 case 0: /* add */
8149 gen_movl_T0_reg(s, rd);
8150 gen_movl_T1_reg(s, rm);
8151 gen_op_addl_T0_T1();
8152 gen_movl_reg_T0(s, rd);
8153 break;
8154 case 1: /* cmp */
8155 gen_movl_T0_reg(s, rd);
8156 gen_movl_T1_reg(s, rm);
8157 gen_op_subl_T0_T1_cc();
8158 break;
8159 case 2: /* mov/cpy */
8160 gen_movl_T0_reg(s, rm);
8161 gen_movl_reg_T0(s, rd);
8162 break;
8163 case 3:/* branch [and link] exchange thumb register */
b0109805 8164 tmp = load_reg(s, rm);
99c475ab
FB
8165 if (insn & (1 << 7)) {
8166 val = (uint32_t)s->pc | 1;
b0109805
PB
8167 tmp2 = new_tmp();
8168 tcg_gen_movi_i32(tmp2, val);
8169 store_reg(s, 14, tmp2);
99c475ab 8170 }
d9ba4830 8171 gen_bx(s, tmp);
99c475ab
FB
8172 break;
8173 }
8174 break;
8175 }
8176
8177 /* data processing register */
8178 rd = insn & 7;
8179 rm = (insn >> 3) & 7;
8180 op = (insn >> 6) & 0xf;
8181 if (op == 2 || op == 3 || op == 4 || op == 7) {
8182 /* the shift/rotate ops want the operands backwards */
8183 val = rm;
8184 rm = rd;
8185 rd = val;
8186 val = 1;
8187 } else {
8188 val = 0;
8189 }
8190
8191 if (op == 9) /* neg */
8192 gen_op_movl_T0_im(0);
8193 else if (op != 0xf) /* mvn doesn't read its first operand */
8194 gen_movl_T0_reg(s, rd);
8195
8196 gen_movl_T1_reg(s, rm);
5899f386 8197 switch (op) {
99c475ab
FB
8198 case 0x0: /* and */
8199 gen_op_andl_T0_T1();
9ee6e8bb
PB
8200 if (!s->condexec_mask)
8201 gen_op_logic_T0_cc();
99c475ab
FB
8202 break;
8203 case 0x1: /* eor */
8204 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8205 if (!s->condexec_mask)
8206 gen_op_logic_T0_cc();
99c475ab
FB
8207 break;
8208 case 0x2: /* lsl */
9ee6e8bb 8209 if (s->condexec_mask) {
8984bd2e 8210 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8211 } else {
8984bd2e 8212 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8213 gen_op_logic_T1_cc();
8214 }
99c475ab
FB
8215 break;
8216 case 0x3: /* lsr */
9ee6e8bb 8217 if (s->condexec_mask) {
8984bd2e 8218 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8219 } else {
8984bd2e 8220 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8221 gen_op_logic_T1_cc();
8222 }
99c475ab
FB
8223 break;
8224 case 0x4: /* asr */
9ee6e8bb 8225 if (s->condexec_mask) {
8984bd2e 8226 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8227 } else {
8984bd2e 8228 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8229 gen_op_logic_T1_cc();
8230 }
99c475ab
FB
8231 break;
8232 case 0x5: /* adc */
9ee6e8bb 8233 if (s->condexec_mask)
b26eefb6 8234 gen_adc_T0_T1();
9ee6e8bb
PB
8235 else
8236 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8237 break;
8238 case 0x6: /* sbc */
9ee6e8bb 8239 if (s->condexec_mask)
3670669c 8240 gen_sbc_T0_T1();
9ee6e8bb
PB
8241 else
8242 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8243 break;
8244 case 0x7: /* ror */
9ee6e8bb 8245 if (s->condexec_mask) {
8984bd2e 8246 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8247 } else {
8984bd2e 8248 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8249 gen_op_logic_T1_cc();
8250 }
99c475ab
FB
8251 break;
8252 case 0x8: /* tst */
8253 gen_op_andl_T0_T1();
8254 gen_op_logic_T0_cc();
8255 rd = 16;
5899f386 8256 break;
99c475ab 8257 case 0x9: /* neg */
9ee6e8bb 8258 if (s->condexec_mask)
390efc54 8259 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8260 else
8261 gen_op_subl_T0_T1_cc();
99c475ab
FB
8262 break;
8263 case 0xa: /* cmp */
8264 gen_op_subl_T0_T1_cc();
8265 rd = 16;
8266 break;
8267 case 0xb: /* cmn */
8268 gen_op_addl_T0_T1_cc();
8269 rd = 16;
8270 break;
8271 case 0xc: /* orr */
8272 gen_op_orl_T0_T1();
9ee6e8bb
PB
8273 if (!s->condexec_mask)
8274 gen_op_logic_T0_cc();
99c475ab
FB
8275 break;
8276 case 0xd: /* mul */
8277 gen_op_mull_T0_T1();
9ee6e8bb
PB
8278 if (!s->condexec_mask)
8279 gen_op_logic_T0_cc();
99c475ab
FB
8280 break;
8281 case 0xe: /* bic */
8282 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8283 if (!s->condexec_mask)
8284 gen_op_logic_T0_cc();
99c475ab
FB
8285 break;
8286 case 0xf: /* mvn */
8287 gen_op_notl_T1();
9ee6e8bb
PB
8288 if (!s->condexec_mask)
8289 gen_op_logic_T1_cc();
99c475ab 8290 val = 1;
5899f386 8291 rm = rd;
99c475ab
FB
8292 break;
8293 }
8294 if (rd != 16) {
8295 if (val)
5899f386 8296 gen_movl_reg_T1(s, rm);
99c475ab
FB
8297 else
8298 gen_movl_reg_T0(s, rd);
8299 }
8300 break;
8301
8302 case 5:
8303 /* load/store register offset. */
8304 rd = insn & 7;
8305 rn = (insn >> 3) & 7;
8306 rm = (insn >> 6) & 7;
8307 op = (insn >> 9) & 7;
b0109805 8308 addr = load_reg(s, rn);
b26eefb6 8309 tmp = load_reg(s, rm);
b0109805 8310 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8311 dead_tmp(tmp);
99c475ab
FB
8312
8313 if (op < 3) /* store */
b0109805 8314 tmp = load_reg(s, rd);
99c475ab
FB
8315
8316 switch (op) {
8317 case 0: /* str */
b0109805 8318 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8319 break;
8320 case 1: /* strh */
b0109805 8321 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8322 break;
8323 case 2: /* strb */
b0109805 8324 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8325 break;
8326 case 3: /* ldrsb */
b0109805 8327 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8328 break;
8329 case 4: /* ldr */
b0109805 8330 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8331 break;
8332 case 5: /* ldrh */
b0109805 8333 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8334 break;
8335 case 6: /* ldrb */
b0109805 8336 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8337 break;
8338 case 7: /* ldrsh */
b0109805 8339 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8340 break;
8341 }
8342 if (op >= 3) /* load */
b0109805
PB
8343 store_reg(s, rd, tmp);
8344 dead_tmp(addr);
99c475ab
FB
8345 break;
8346
8347 case 6:
8348 /* load/store word immediate offset */
8349 rd = insn & 7;
8350 rn = (insn >> 3) & 7;
b0109805 8351 addr = load_reg(s, rn);
99c475ab 8352 val = (insn >> 4) & 0x7c;
b0109805 8353 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8354
8355 if (insn & (1 << 11)) {
8356 /* load */
b0109805
PB
8357 tmp = gen_ld32(addr, IS_USER(s));
8358 store_reg(s, rd, tmp);
99c475ab
FB
8359 } else {
8360 /* store */
b0109805
PB
8361 tmp = load_reg(s, rd);
8362 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8363 }
b0109805 8364 dead_tmp(addr);
99c475ab
FB
8365 break;
8366
8367 case 7:
8368 /* load/store byte immediate offset */
8369 rd = insn & 7;
8370 rn = (insn >> 3) & 7;
b0109805 8371 addr = load_reg(s, rn);
99c475ab 8372 val = (insn >> 6) & 0x1f;
b0109805 8373 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8374
8375 if (insn & (1 << 11)) {
8376 /* load */
b0109805
PB
8377 tmp = gen_ld8u(addr, IS_USER(s));
8378 store_reg(s, rd, tmp);
99c475ab
FB
8379 } else {
8380 /* store */
b0109805
PB
8381 tmp = load_reg(s, rd);
8382 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8383 }
b0109805 8384 dead_tmp(addr);
99c475ab
FB
8385 break;
8386
8387 case 8:
8388 /* load/store halfword immediate offset */
8389 rd = insn & 7;
8390 rn = (insn >> 3) & 7;
b0109805 8391 addr = load_reg(s, rn);
99c475ab 8392 val = (insn >> 5) & 0x3e;
b0109805 8393 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8394
8395 if (insn & (1 << 11)) {
8396 /* load */
b0109805
PB
8397 tmp = gen_ld16u(addr, IS_USER(s));
8398 store_reg(s, rd, tmp);
99c475ab
FB
8399 } else {
8400 /* store */
b0109805
PB
8401 tmp = load_reg(s, rd);
8402 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8403 }
b0109805 8404 dead_tmp(addr);
99c475ab
FB
8405 break;
8406
8407 case 9:
8408 /* load/store from stack */
8409 rd = (insn >> 8) & 7;
b0109805 8410 addr = load_reg(s, 13);
99c475ab 8411 val = (insn & 0xff) * 4;
b0109805 8412 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8413
8414 if (insn & (1 << 11)) {
8415 /* load */
b0109805
PB
8416 tmp = gen_ld32(addr, IS_USER(s));
8417 store_reg(s, rd, tmp);
99c475ab
FB
8418 } else {
8419 /* store */
b0109805
PB
8420 tmp = load_reg(s, rd);
8421 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8422 }
b0109805 8423 dead_tmp(addr);
99c475ab
FB
8424 break;
8425
8426 case 10:
8427 /* add to high reg */
8428 rd = (insn >> 8) & 7;
5899f386
FB
8429 if (insn & (1 << 11)) {
8430 /* SP */
5e3f878a 8431 tmp = load_reg(s, 13);
5899f386
FB
8432 } else {
8433 /* PC. bit 1 is ignored. */
5e3f878a
PB
8434 tmp = new_tmp();
8435 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8436 }
99c475ab 8437 val = (insn & 0xff) * 4;
5e3f878a
PB
8438 tcg_gen_addi_i32(tmp, tmp, val);
8439 store_reg(s, rd, tmp);
99c475ab
FB
8440 break;
8441
8442 case 11:
8443 /* misc */
8444 op = (insn >> 8) & 0xf;
8445 switch (op) {
8446 case 0:
8447 /* adjust stack pointer */
b26eefb6 8448 tmp = load_reg(s, 13);
99c475ab
FB
8449 val = (insn & 0x7f) * 4;
8450 if (insn & (1 << 7))
6a0d8a1d 8451 val = -(int32_t)val;
b26eefb6
PB
8452 tcg_gen_addi_i32(tmp, tmp, val);
8453 store_reg(s, 13, tmp);
99c475ab
FB
8454 break;
8455
9ee6e8bb
PB
8456 case 2: /* sign/zero extend. */
8457 ARCH(6);
8458 rd = insn & 7;
8459 rm = (insn >> 3) & 7;
b0109805 8460 tmp = load_reg(s, rm);
9ee6e8bb 8461 switch ((insn >> 6) & 3) {
b0109805
PB
8462 case 0: gen_sxth(tmp); break;
8463 case 1: gen_sxtb(tmp); break;
8464 case 2: gen_uxth(tmp); break;
8465 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8466 }
b0109805 8467 store_reg(s, rd, tmp);
9ee6e8bb 8468 break;
99c475ab
FB
8469 case 4: case 5: case 0xc: case 0xd:
8470 /* push/pop */
b0109805 8471 addr = load_reg(s, 13);
5899f386
FB
8472 if (insn & (1 << 8))
8473 offset = 4;
99c475ab 8474 else
5899f386
FB
8475 offset = 0;
8476 for (i = 0; i < 8; i++) {
8477 if (insn & (1 << i))
8478 offset += 4;
8479 }
8480 if ((insn & (1 << 11)) == 0) {
b0109805 8481 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8482 }
99c475ab
FB
8483 for (i = 0; i < 8; i++) {
8484 if (insn & (1 << i)) {
8485 if (insn & (1 << 11)) {
8486 /* pop */
b0109805
PB
8487 tmp = gen_ld32(addr, IS_USER(s));
8488 store_reg(s, i, tmp);
99c475ab
FB
8489 } else {
8490 /* push */
b0109805
PB
8491 tmp = load_reg(s, i);
8492 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8493 }
5899f386 8494 /* advance to the next address. */
b0109805 8495 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8496 }
8497 }
a50f5b91 8498 TCGV_UNUSED(tmp);
99c475ab
FB
8499 if (insn & (1 << 8)) {
8500 if (insn & (1 << 11)) {
8501 /* pop pc */
b0109805 8502 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8503 /* don't set the pc until the rest of the instruction
8504 has completed */
8505 } else {
8506 /* push lr */
b0109805
PB
8507 tmp = load_reg(s, 14);
8508 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8509 }
b0109805 8510 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8511 }
5899f386 8512 if ((insn & (1 << 11)) == 0) {
b0109805 8513 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8514 }
99c475ab 8515 /* write back the new stack pointer */
b0109805 8516 store_reg(s, 13, addr);
99c475ab
FB
8517 /* set the new PC value */
8518 if ((insn & 0x0900) == 0x0900)
b0109805 8519 gen_bx(s, tmp);
99c475ab
FB
8520 break;
8521
9ee6e8bb
PB
8522 case 1: case 3: case 9: case 11: /* czb */
8523 rm = insn & 7;
d9ba4830 8524 tmp = load_reg(s, rm);
9ee6e8bb
PB
8525 s->condlabel = gen_new_label();
8526 s->condjmp = 1;
8527 if (insn & (1 << 11))
cb63669a 8528 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8529 else
cb63669a 8530 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8531 dead_tmp(tmp);
9ee6e8bb
PB
8532 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8533 val = (uint32_t)s->pc + 2;
8534 val += offset;
8535 gen_jmp(s, val);
8536 break;
8537
8538 case 15: /* IT, nop-hint. */
8539 if ((insn & 0xf) == 0) {
8540 gen_nop_hint(s, (insn >> 4) & 0xf);
8541 break;
8542 }
8543 /* If Then. */
8544 s->condexec_cond = (insn >> 4) & 0xe;
8545 s->condexec_mask = insn & 0x1f;
8546 /* No actual code generated for this insn, just setup state. */
8547 break;
8548
06c949e6 8549 case 0xe: /* bkpt */
9ee6e8bb 8550 gen_set_condexec(s);
5e3f878a 8551 gen_set_pc_im(s->pc - 2);
d9ba4830 8552 gen_exception(EXCP_BKPT);
06c949e6
PB
8553 s->is_jmp = DISAS_JUMP;
8554 break;
8555
9ee6e8bb
PB
8556 case 0xa: /* rev */
8557 ARCH(6);
8558 rn = (insn >> 3) & 0x7;
8559 rd = insn & 0x7;
b0109805 8560 tmp = load_reg(s, rn);
9ee6e8bb 8561 switch ((insn >> 6) & 3) {
66896cb8 8562 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
8563 case 1: gen_rev16(tmp); break;
8564 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8565 default: goto illegal_op;
8566 }
b0109805 8567 store_reg(s, rd, tmp);
9ee6e8bb
PB
8568 break;
8569
8570 case 6: /* cps */
8571 ARCH(6);
8572 if (IS_USER(s))
8573 break;
8574 if (IS_M(env)) {
8984bd2e 8575 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8576 /* PRIMASK */
8984bd2e
PB
8577 if (insn & 1) {
8578 addr = tcg_const_i32(16);
8579 gen_helper_v7m_msr(cpu_env, addr, tmp);
8580 }
9ee6e8bb 8581 /* FAULTMASK */
8984bd2e
PB
8582 if (insn & 2) {
8583 addr = tcg_const_i32(17);
8584 gen_helper_v7m_msr(cpu_env, addr, tmp);
8585 }
9ee6e8bb
PB
8586 gen_lookup_tb(s);
8587 } else {
8588 if (insn & (1 << 4))
8589 shift = CPSR_A | CPSR_I | CPSR_F;
8590 else
8591 shift = 0;
8592
8593 val = ((insn & 7) << 6) & shift;
8594 gen_op_movl_T0_im(val);
8595 gen_set_psr_T0(s, shift, 0);
8596 }
8597 break;
8598
99c475ab
FB
8599 default:
8600 goto undef;
8601 }
8602 break;
8603
8604 case 12:
8605 /* load/store multiple */
8606 rn = (insn >> 8) & 0x7;
b0109805 8607 addr = load_reg(s, rn);
99c475ab
FB
8608 for (i = 0; i < 8; i++) {
8609 if (insn & (1 << i)) {
99c475ab
FB
8610 if (insn & (1 << 11)) {
8611 /* load */
b0109805
PB
8612 tmp = gen_ld32(addr, IS_USER(s));
8613 store_reg(s, i, tmp);
99c475ab
FB
8614 } else {
8615 /* store */
b0109805
PB
8616 tmp = load_reg(s, i);
8617 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8618 }
5899f386 8619 /* advance to the next address */
b0109805 8620 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8621 }
8622 }
5899f386 8623 /* Base register writeback. */
b0109805
PB
8624 if ((insn & (1 << rn)) == 0) {
8625 store_reg(s, rn, addr);
8626 } else {
8627 dead_tmp(addr);
8628 }
99c475ab
FB
8629 break;
8630
8631 case 13:
8632 /* conditional branch or swi */
8633 cond = (insn >> 8) & 0xf;
8634 if (cond == 0xe)
8635 goto undef;
8636
8637 if (cond == 0xf) {
8638 /* swi */
9ee6e8bb 8639 gen_set_condexec(s);
422ebf69 8640 gen_set_pc_im(s->pc);
9ee6e8bb 8641 s->is_jmp = DISAS_SWI;
99c475ab
FB
8642 break;
8643 }
8644 /* generate a conditional jump to next instruction */
e50e6a20 8645 s->condlabel = gen_new_label();
d9ba4830 8646 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8647 s->condjmp = 1;
99c475ab
FB
8648 gen_movl_T1_reg(s, 15);
8649
8650 /* jump to the offset */
5899f386 8651 val = (uint32_t)s->pc + 2;
99c475ab 8652 offset = ((int32_t)insn << 24) >> 24;
5899f386 8653 val += offset << 1;
8aaca4c0 8654 gen_jmp(s, val);
99c475ab
FB
8655 break;
8656
8657 case 14:
358bf29e 8658 if (insn & (1 << 11)) {
9ee6e8bb
PB
8659 if (disas_thumb2_insn(env, s, insn))
8660 goto undef32;
358bf29e
PB
8661 break;
8662 }
9ee6e8bb 8663 /* unconditional branch */
99c475ab
FB
8664 val = (uint32_t)s->pc;
8665 offset = ((int32_t)insn << 21) >> 21;
8666 val += (offset << 1) + 2;
8aaca4c0 8667 gen_jmp(s, val);
99c475ab
FB
8668 break;
8669
8670 case 15:
9ee6e8bb 8671 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8672 goto undef32;
9ee6e8bb 8673 break;
99c475ab
FB
8674 }
8675 return;
9ee6e8bb
PB
8676undef32:
8677 gen_set_condexec(s);
5e3f878a 8678 gen_set_pc_im(s->pc - 4);
d9ba4830 8679 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8680 s->is_jmp = DISAS_JUMP;
8681 return;
8682illegal_op:
99c475ab 8683undef:
9ee6e8bb 8684 gen_set_condexec(s);
5e3f878a 8685 gen_set_pc_im(s->pc - 2);
d9ba4830 8686 gen_exception(EXCP_UDEF);
99c475ab
FB
8687 s->is_jmp = DISAS_JUMP;
8688}
8689
2c0262af
FB
8690/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8691 basic block 'tb'. If search_pc is TRUE, also generate PC
8692 information for each intermediate instruction. */
2cfc5f17
TS
8693static inline void gen_intermediate_code_internal(CPUState *env,
8694 TranslationBlock *tb,
8695 int search_pc)
2c0262af
FB
8696{
8697 DisasContext dc1, *dc = &dc1;
a1d1bb31 8698 CPUBreakpoint *bp;
2c0262af
FB
8699 uint16_t *gen_opc_end;
8700 int j, lj;
0fa85d43 8701 target_ulong pc_start;
b5ff1b31 8702 uint32_t next_page_start;
2e70f6ef
PB
8703 int num_insns;
8704 int max_insns;
3b46e624 8705
2c0262af 8706 /* generate intermediate code */
b26eefb6
PB
8707 num_temps = 0;
8708 memset(temps, 0, sizeof(temps));
8709
0fa85d43 8710 pc_start = tb->pc;
3b46e624 8711
2c0262af
FB
8712 dc->tb = tb;
8713
2c0262af 8714 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8715
8716 dc->is_jmp = DISAS_NEXT;
8717 dc->pc = pc_start;
8aaca4c0 8718 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8719 dc->condjmp = 0;
5899f386 8720 dc->thumb = env->thumb;
9ee6e8bb
PB
8721 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8722 dc->condexec_cond = env->condexec_bits >> 4;
b5ff1b31 8723#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8724 if (IS_M(env)) {
8725 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8726 } else {
8727 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8728 }
b5ff1b31 8729#endif
a7812ae4
PB
8730 cpu_F0s = tcg_temp_new_i32();
8731 cpu_F1s = tcg_temp_new_i32();
8732 cpu_F0d = tcg_temp_new_i64();
8733 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8734 cpu_V0 = cpu_F0d;
8735 cpu_V1 = cpu_F1d;
e677137d 8736 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8737 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8738 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8739 lj = -1;
2e70f6ef
PB
8740 num_insns = 0;
8741 max_insns = tb->cflags & CF_COUNT_MASK;
8742 if (max_insns == 0)
8743 max_insns = CF_COUNT_MASK;
8744
8745 gen_icount_start();
9ee6e8bb
PB
8746 /* Reset the conditional execution bits immediately. This avoids
8747 complications trying to do it at the end of the block. */
8748 if (env->condexec_bits)
8f01245e
PB
8749 {
8750 TCGv tmp = new_tmp();
8751 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8752 store_cpu_field(tmp, condexec_bits);
8f01245e 8753 }
2c0262af 8754 do {
fbb4a2e3
PB
8755#ifdef CONFIG_USER_ONLY
8756 /* Intercept jump to the magic kernel page. */
8757 if (dc->pc >= 0xffff0000) {
8758 /* We always get here via a jump, so know we are not in a
8759 conditional execution block. */
8760 gen_exception(EXCP_KERNEL_TRAP);
8761 dc->is_jmp = DISAS_UPDATE;
8762 break;
8763 }
8764#else
9ee6e8bb
PB
8765 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8766 /* We always get here via a jump, so know we are not in a
8767 conditional execution block. */
d9ba4830 8768 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8769 dc->is_jmp = DISAS_UPDATE;
8770 break;
9ee6e8bb
PB
8771 }
8772#endif
8773
c0ce998e
AL
8774 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8775 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8776 if (bp->pc == dc->pc) {
9ee6e8bb 8777 gen_set_condexec(dc);
5e3f878a 8778 gen_set_pc_im(dc->pc);
d9ba4830 8779 gen_exception(EXCP_DEBUG);
1fddef4b 8780 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8781 /* Advance PC so that clearing the breakpoint will
8782 invalidate this TB. */
8783 dc->pc += 2;
8784 goto done_generating;
1fddef4b
FB
8785 break;
8786 }
8787 }
8788 }
2c0262af
FB
8789 if (search_pc) {
8790 j = gen_opc_ptr - gen_opc_buf;
8791 if (lj < j) {
8792 lj++;
8793 while (lj < j)
8794 gen_opc_instr_start[lj++] = 0;
8795 }
0fa85d43 8796 gen_opc_pc[lj] = dc->pc;
2c0262af 8797 gen_opc_instr_start[lj] = 1;
2e70f6ef 8798 gen_opc_icount[lj] = num_insns;
2c0262af 8799 }
e50e6a20 8800
2e70f6ef
PB
8801 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8802 gen_io_start();
8803
9ee6e8bb
PB
8804 if (env->thumb) {
8805 disas_thumb_insn(env, dc);
8806 if (dc->condexec_mask) {
8807 dc->condexec_cond = (dc->condexec_cond & 0xe)
8808 | ((dc->condexec_mask >> 4) & 1);
8809 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8810 if (dc->condexec_mask == 0) {
8811 dc->condexec_cond = 0;
8812 }
8813 }
8814 } else {
8815 disas_arm_insn(env, dc);
8816 }
b26eefb6
PB
8817 if (num_temps) {
8818 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8819 num_temps = 0;
8820 }
e50e6a20
FB
8821
8822 if (dc->condjmp && !dc->is_jmp) {
8823 gen_set_label(dc->condlabel);
8824 dc->condjmp = 0;
8825 }
aaf2d97d 8826 /* Translation stops when a conditional branch is encountered.
e50e6a20 8827 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8828 * Also stop translation when a page boundary is reached. This
bf20dc07 8829 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8830 num_insns ++;
1fddef4b
FB
8831 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8832 !env->singlestep_enabled &&
1b530a6d 8833 !singlestep &&
2e70f6ef
PB
8834 dc->pc < next_page_start &&
8835 num_insns < max_insns);
8836
8837 if (tb->cflags & CF_LAST_IO) {
8838 if (dc->condjmp) {
8839 /* FIXME: This can theoretically happen with self-modifying
8840 code. */
8841 cpu_abort(env, "IO on conditional branch instruction");
8842 }
8843 gen_io_end();
8844 }
9ee6e8bb 8845
b5ff1b31 8846 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8847 instruction was a conditional branch or trap, and the PC has
8848 already been written. */
551bd27f 8849 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8850 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8851 if (dc->condjmp) {
9ee6e8bb
PB
8852 gen_set_condexec(dc);
8853 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8854 gen_exception(EXCP_SWI);
9ee6e8bb 8855 } else {
d9ba4830 8856 gen_exception(EXCP_DEBUG);
9ee6e8bb 8857 }
e50e6a20
FB
8858 gen_set_label(dc->condlabel);
8859 }
8860 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8861 gen_set_pc_im(dc->pc);
e50e6a20 8862 dc->condjmp = 0;
8aaca4c0 8863 }
9ee6e8bb
PB
8864 gen_set_condexec(dc);
8865 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8866 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8867 } else {
8868 /* FIXME: Single stepping a WFI insn will not halt
8869 the CPU. */
d9ba4830 8870 gen_exception(EXCP_DEBUG);
9ee6e8bb 8871 }
8aaca4c0 8872 } else {
9ee6e8bb
PB
8873 /* While branches must always occur at the end of an IT block,
8874 there are a few other things that can cause us to terminate
8875 the TB in the middel of an IT block:
8876 - Exception generating instructions (bkpt, swi, undefined).
8877 - Page boundaries.
8878 - Hardware watchpoints.
8879 Hardware breakpoints have already been handled and skip this code.
8880 */
8881 gen_set_condexec(dc);
8aaca4c0 8882 switch(dc->is_jmp) {
8aaca4c0 8883 case DISAS_NEXT:
6e256c93 8884 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8885 break;
8886 default:
8887 case DISAS_JUMP:
8888 case DISAS_UPDATE:
8889 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8890 tcg_gen_exit_tb(0);
8aaca4c0
FB
8891 break;
8892 case DISAS_TB_JUMP:
8893 /* nothing more to generate */
8894 break;
9ee6e8bb 8895 case DISAS_WFI:
d9ba4830 8896 gen_helper_wfi();
9ee6e8bb
PB
8897 break;
8898 case DISAS_SWI:
d9ba4830 8899 gen_exception(EXCP_SWI);
9ee6e8bb 8900 break;
8aaca4c0 8901 }
e50e6a20
FB
8902 if (dc->condjmp) {
8903 gen_set_label(dc->condlabel);
9ee6e8bb 8904 gen_set_condexec(dc);
6e256c93 8905 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8906 dc->condjmp = 0;
8907 }
2c0262af 8908 }
2e70f6ef 8909
9ee6e8bb 8910done_generating:
2e70f6ef 8911 gen_icount_end(tb, num_insns);
2c0262af
FB
8912 *gen_opc_ptr = INDEX_op_end;
8913
8914#ifdef DEBUG_DISAS
8fec2b8c 8915 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
8916 qemu_log("----------------\n");
8917 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8918 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
8919 qemu_log("\n");
2c0262af
FB
8920 }
8921#endif
b5ff1b31
FB
8922 if (search_pc) {
8923 j = gen_opc_ptr - gen_opc_buf;
8924 lj++;
8925 while (lj <= j)
8926 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8927 } else {
2c0262af 8928 tb->size = dc->pc - pc_start;
2e70f6ef 8929 tb->icount = num_insns;
b5ff1b31 8930 }
2c0262af
FB
8931}
8932
2cfc5f17 8933void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8934{
2cfc5f17 8935 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8936}
8937
2cfc5f17 8938void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8939{
2cfc5f17 8940 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8941}
8942
b5ff1b31
FB
8943static const char *cpu_mode_names[16] = {
8944 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8945 "???", "???", "???", "und", "???", "???", "???", "sys"
8946};
9ee6e8bb 8947
5fafdf24 8948void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8949 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8950 int flags)
2c0262af
FB
8951{
8952 int i;
06e80fc9 8953#if 0
bc380d17 8954 union {
b7bcbe95
FB
8955 uint32_t i;
8956 float s;
8957 } s0, s1;
8958 CPU_DoubleU d;
a94a6abf
PB
8959 /* ??? This assumes float64 and double have the same layout.
8960 Oh well, it's only debug dumps. */
8961 union {
8962 float64 f64;
8963 double d;
8964 } d0;
06e80fc9 8965#endif
b5ff1b31 8966 uint32_t psr;
2c0262af
FB
8967
8968 for(i=0;i<16;i++) {
7fe48483 8969 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8970 if ((i % 4) == 3)
7fe48483 8971 cpu_fprintf(f, "\n");
2c0262af 8972 else
7fe48483 8973 cpu_fprintf(f, " ");
2c0262af 8974 }
b5ff1b31 8975 psr = cpsr_read(env);
687fa640
TS
8976 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8977 psr,
b5ff1b31
FB
8978 psr & (1 << 31) ? 'N' : '-',
8979 psr & (1 << 30) ? 'Z' : '-',
8980 psr & (1 << 29) ? 'C' : '-',
8981 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8982 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8983 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8984
5e3f878a 8985#if 0
b7bcbe95 8986 for (i = 0; i < 16; i++) {
8e96005d
FB
8987 d.d = env->vfp.regs[i];
8988 s0.i = d.l.lower;
8989 s1.i = d.l.upper;
a94a6abf
PB
8990 d0.f64 = d.d;
8991 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8992 i * 2, (int)s0.i, s0.s,
a94a6abf 8993 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8994 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8995 d0.d);
b7bcbe95 8996 }
40f137e1 8997 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8998#endif
2c0262af 8999}
a6b025d3 9000
d2856f1a
AJ
9001void gen_pc_load(CPUState *env, TranslationBlock *tb,
9002 unsigned long searched_pc, int pc_pos, void *puc)
9003{
9004 env->regs[15] = gen_opc_pc[pc_pos];
9005}