]> git.proxmox.com Git - mirror_qemu.git/blob - target-arm/translate.c
Implement some UA2007 block ASIs
[mirror_qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22 #include <stdarg.h>
23 #include <stdlib.h>
24 #include <stdio.h>
25 #include <string.h>
26 #include <inttypes.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "tcg-op.h"
32 #include "qemu-log.h"
33
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 int is_mem;
60 #if !defined(CONFIG_USER_ONLY)
61 int user;
62 #endif
63 } DisasContext;
64
65 #if defined(CONFIG_USER_ONLY)
66 #define IS_USER(s) 1
67 #else
68 #define IS_USER(s) (s->user)
69 #endif
70
71 /* These instructions trap after executing, so defer them until after the
72 conditional executions state has been updated. */
73 #define DISAS_WFI 4
74 #define DISAS_SWI 5
75
76 static TCGv cpu_env;
77 /* We reuse the same 64-bit temporaries for efficiency. */
78 static TCGv cpu_V0, cpu_V1, cpu_M0;
79
80 /* FIXME: These should be removed. */
81 static TCGv cpu_T[2];
82 static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
83
84 #define ICOUNT_TEMP cpu_T[0]
85 #include "gen-icount.h"
86
87 /* initialize TCG globals. */
88 void arm_translate_init(void)
89 {
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91
92 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
93 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
94 }
95
96 /* The code generator doesn't like lots of temporaries, so maintain our own
97 cache for reuse within a function. */
98 #define MAX_TEMPS 8
99 static int num_temps;
100 static TCGv temps[MAX_TEMPS];
101
102 /* Allocate a temporary variable. */
103 static TCGv new_tmp(void)
104 {
105 TCGv tmp;
106 if (num_temps == MAX_TEMPS)
107 abort();
108
109 if (GET_TCGV(temps[num_temps]))
110 return temps[num_temps++];
111
112 tmp = tcg_temp_new(TCG_TYPE_I32);
113 temps[num_temps++] = tmp;
114 return tmp;
115 }
116
117 /* Release a temporary variable. */
118 static void dead_tmp(TCGv tmp)
119 {
120 int i;
121 num_temps--;
122 i = num_temps;
123 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
124 return;
125
126 /* Shuffle this temp to the last slot. */
127 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
128 i--;
129 while (i < num_temps) {
130 temps[i] = temps[i + 1];
131 i++;
132 }
133 temps[i] = tmp;
134 }
135
136 static inline TCGv load_cpu_offset(int offset)
137 {
138 TCGv tmp = new_tmp();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
141 }
142
143 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144
145 static inline void store_cpu_offset(TCGv var, int offset)
146 {
147 tcg_gen_st_i32(var, cpu_env, offset);
148 dead_tmp(var);
149 }
150
151 #define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUState, name))
153
154 /* Set a variable to the value of a CPU register. */
155 static void load_reg_var(DisasContext *s, TCGv var, int reg)
156 {
157 if (reg == 15) {
158 uint32_t addr;
159 /* normaly, since we updated PC, we need only to add one insn */
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
166 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
167 }
168 }
169
170 /* Create a new temporary and set it to the value of a CPU register. */
171 static inline TCGv load_reg(DisasContext *s, int reg)
172 {
173 TCGv tmp = new_tmp();
174 load_reg_var(s, tmp, reg);
175 return tmp;
176 }
177
178 /* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
180 static void store_reg(DisasContext *s, int reg, TCGv var)
181 {
182 if (reg == 15) {
183 tcg_gen_andi_i32(var, var, ~1);
184 s->is_jmp = DISAS_JUMP;
185 }
186 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
187 dead_tmp(var);
188 }
189
190
191 /* Basic operations. */
192 #define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
193 #define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
194 #define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
195 #define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
196
197 #define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
198 #define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
199 #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200 #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
201
202 #define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
203 #define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
204 #define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
205 #define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206 #define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
207 #define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
208
209 #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
210 #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
211 #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212 #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
213 #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
214 #define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
215 #define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
216
217 #define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
218 #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
219 #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
220 #define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
221 #define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
222
223 /* Value extensions. */
224 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
226 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228
229 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
231
232 #define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
233
234 #define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235 /* Set NZCV flags from the high 4 bits of var. */
236 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237
238 static void gen_exception(int excp)
239 {
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
244 }
245
246 static void gen_smul_dual(TCGv a, TCGv b)
247 {
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
250 tcg_gen_ext16s_i32(tmp1, a);
251 tcg_gen_ext16s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
259 }
260
261 /* Byteswap each halfword. */
262 static void gen_rev16(TCGv var)
263 {
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
271 }
272
273 /* Byteswap low halfword and sign extend. */
274 static void gen_revsh(TCGv var)
275 {
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
283 }
284
285 /* Unsigned bitfield extract. */
286 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287 {
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
291 }
292
293 /* Signed bitfield extract. */
294 static void gen_sbfx(TCGv var, int shift, int width)
295 {
296 uint32_t signbit;
297
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
305 }
306 }
307
308 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
309 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310 {
311 tcg_gen_andi_i32(val, val, mask);
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
314 tcg_gen_or_i32(dest, base, val);
315 }
316
317 /* Round the top 32 bits of a 64-bit value. */
318 static void gen_roundqd(TCGv a, TCGv b)
319 {
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
322 }
323
324 /* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
326 /* 32x32->64 multiply. Marks inputs as dead. */
327 static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
328 {
329 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
330 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
331
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338 }
339
340 static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
341 {
342 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
343 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
351 }
352
353 /* Unsigned 32x32->64 multiply. */
354 static void gen_op_mull_T0_T1(void)
355 {
356 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
357 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
358
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365 }
366
367 /* Signed 32x32->64 multiply. */
368 static void gen_imull(TCGv a, TCGv b)
369 {
370 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
371 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
372
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
376 tcg_gen_trunc_i64_i32(a, tmp1);
377 tcg_gen_shri_i64(tmp1, tmp1, 32);
378 tcg_gen_trunc_i64_i32(b, tmp1);
379 }
380 #define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
381
382 /* Swap low and high halfwords. */
383 static void gen_swap_half(TCGv var)
384 {
385 TCGv tmp = new_tmp();
386 tcg_gen_shri_i32(tmp, var, 16);
387 tcg_gen_shli_i32(var, var, 16);
388 tcg_gen_or_i32(var, var, tmp);
389 dead_tmp(tmp);
390 }
391
392 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
394 t0 &= ~0x8000;
395 t1 &= ~0x8000;
396 t0 = (t0 + t1) ^ tmp;
397 */
398
399 static void gen_add16(TCGv t0, TCGv t1)
400 {
401 TCGv tmp = new_tmp();
402 tcg_gen_xor_i32(tmp, t0, t1);
403 tcg_gen_andi_i32(tmp, tmp, 0x8000);
404 tcg_gen_andi_i32(t0, t0, ~0x8000);
405 tcg_gen_andi_i32(t1, t1, ~0x8000);
406 tcg_gen_add_i32(t0, t0, t1);
407 tcg_gen_xor_i32(t0, t0, tmp);
408 dead_tmp(tmp);
409 dead_tmp(t1);
410 }
411
412 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413
414 /* Set CF to the top bit of var. */
415 static void gen_set_CF_bit31(TCGv var)
416 {
417 TCGv tmp = new_tmp();
418 tcg_gen_shri_i32(tmp, var, 31);
419 gen_set_CF(var);
420 dead_tmp(tmp);
421 }
422
423 /* Set N and Z flags from var. */
424 static inline void gen_logic_CC(TCGv var)
425 {
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
427 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
428 }
429
430 /* T0 += T1 + CF. */
431 static void gen_adc_T0_T1(void)
432 {
433 TCGv tmp;
434 gen_op_addl_T0_T1();
435 tmp = load_cpu_field(CF);
436 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
437 dead_tmp(tmp);
438 }
439
440 /* dest = T0 - T1 + CF - 1. */
441 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
442 {
443 TCGv tmp;
444 tcg_gen_sub_i32(dest, t0, t1);
445 tmp = load_cpu_field(CF);
446 tcg_gen_add_i32(dest, dest, tmp);
447 tcg_gen_subi_i32(dest, dest, 1);
448 dead_tmp(tmp);
449 }
450
451 #define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
452 #define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453
454 /* T0 &= ~T1. Clobbers T1. */
455 /* FIXME: Implement bic natively. */
456 static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
457 {
458 TCGv tmp = new_tmp();
459 tcg_gen_not_i32(tmp, t1);
460 tcg_gen_and_i32(dest, t0, tmp);
461 dead_tmp(tmp);
462 }
463 static inline void gen_op_bicl_T0_T1(void)
464 {
465 gen_op_notl_T1();
466 gen_op_andl_T0_T1();
467 }
468
469 /* FIXME: Implement this natively. */
470 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471
472 /* FIXME: Implement this natively. */
473 static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
474 {
475 TCGv tmp;
476
477 if (i == 0)
478 return;
479
480 tmp = new_tmp();
481 tcg_gen_shri_i32(tmp, t1, i);
482 tcg_gen_shli_i32(t1, t1, 32 - i);
483 tcg_gen_or_i32(t0, t1, tmp);
484 dead_tmp(tmp);
485 }
486
487 static void shifter_out_im(TCGv var, int shift)
488 {
489 TCGv tmp = new_tmp();
490 if (shift == 0) {
491 tcg_gen_andi_i32(tmp, var, 1);
492 } else {
493 tcg_gen_shri_i32(tmp, var, shift);
494 if (shift != 31);
495 tcg_gen_andi_i32(tmp, tmp, 1);
496 }
497 gen_set_CF(tmp);
498 dead_tmp(tmp);
499 }
500
501 /* Shift by immediate. Includes special handling for shift == 0. */
502 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
503 {
504 switch (shiftop) {
505 case 0: /* LSL */
506 if (shift != 0) {
507 if (flags)
508 shifter_out_im(var, 32 - shift);
509 tcg_gen_shli_i32(var, var, shift);
510 }
511 break;
512 case 1: /* LSR */
513 if (shift == 0) {
514 if (flags) {
515 tcg_gen_shri_i32(var, var, 31);
516 gen_set_CF(var);
517 }
518 tcg_gen_movi_i32(var, 0);
519 } else {
520 if (flags)
521 shifter_out_im(var, shift - 1);
522 tcg_gen_shri_i32(var, var, shift);
523 }
524 break;
525 case 2: /* ASR */
526 if (shift == 0)
527 shift = 32;
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 if (shift == 32)
531 shift = 31;
532 tcg_gen_sari_i32(var, var, shift);
533 break;
534 case 3: /* ROR/RRX */
535 if (shift != 0) {
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 tcg_gen_rori_i32(var, var, shift); break;
539 } else {
540 TCGv tmp = load_cpu_field(CF);
541 if (flags)
542 shifter_out_im(var, 0);
543 tcg_gen_shri_i32(var, var, 1);
544 tcg_gen_shli_i32(tmp, tmp, 31);
545 tcg_gen_or_i32(var, var, tmp);
546 dead_tmp(tmp);
547 }
548 }
549 };
550
551 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
552 TCGv shift, int flags)
553 {
554 if (flags) {
555 switch (shiftop) {
556 case 0: gen_helper_shl_cc(var, var, shift); break;
557 case 1: gen_helper_shr_cc(var, var, shift); break;
558 case 2: gen_helper_sar_cc(var, var, shift); break;
559 case 3: gen_helper_ror_cc(var, var, shift); break;
560 }
561 } else {
562 switch (shiftop) {
563 case 0: gen_helper_shl(var, var, shift); break;
564 case 1: gen_helper_shr(var, var, shift); break;
565 case 2: gen_helper_sar(var, var, shift); break;
566 case 3: gen_helper_ror(var, var, shift); break;
567 }
568 }
569 dead_tmp(shift);
570 }
571
572 #define PAS_OP(pfx) \
573 switch (op2) { \
574 case 0: gen_pas_helper(glue(pfx,add16)); break; \
575 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
576 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
577 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
578 case 4: gen_pas_helper(glue(pfx,add8)); break; \
579 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 }
581 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
582 {
583 TCGv tmp;
584
585 switch (op1) {
586 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 case 1:
588 tmp = tcg_temp_new(TCG_TYPE_PTR);
589 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
590 PAS_OP(s)
591 break;
592 case 5:
593 tmp = tcg_temp_new(TCG_TYPE_PTR);
594 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
595 PAS_OP(u)
596 break;
597 #undef gen_pas_helper
598 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
599 case 2:
600 PAS_OP(q);
601 break;
602 case 3:
603 PAS_OP(sh);
604 break;
605 case 6:
606 PAS_OP(uq);
607 break;
608 case 7:
609 PAS_OP(uh);
610 break;
611 #undef gen_pas_helper
612 }
613 }
614 #undef PAS_OP
615
616 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
617 #define PAS_OP(pfx) \
618 switch (op2) { \
619 case 0: gen_pas_helper(glue(pfx,add8)); break; \
620 case 1: gen_pas_helper(glue(pfx,add16)); break; \
621 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
622 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
623 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
624 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 }
626 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
627 {
628 TCGv tmp;
629
630 switch (op1) {
631 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 case 0:
633 tmp = tcg_temp_new(TCG_TYPE_PTR);
634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
635 PAS_OP(s)
636 break;
637 case 4:
638 tmp = tcg_temp_new(TCG_TYPE_PTR);
639 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
640 PAS_OP(u)
641 break;
642 #undef gen_pas_helper
643 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
644 case 1:
645 PAS_OP(q);
646 break;
647 case 2:
648 PAS_OP(sh);
649 break;
650 case 5:
651 PAS_OP(uq);
652 break;
653 case 6:
654 PAS_OP(uh);
655 break;
656 #undef gen_pas_helper
657 }
658 }
659 #undef PAS_OP
660
661 static void gen_test_cc(int cc, int label)
662 {
663 TCGv tmp;
664 TCGv tmp2;
665 int inv;
666
667 switch (cc) {
668 case 0: /* eq: Z */
669 tmp = load_cpu_field(ZF);
670 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
671 break;
672 case 1: /* ne: !Z */
673 tmp = load_cpu_field(ZF);
674 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
675 break;
676 case 2: /* cs: C */
677 tmp = load_cpu_field(CF);
678 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
679 break;
680 case 3: /* cc: !C */
681 tmp = load_cpu_field(CF);
682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
683 break;
684 case 4: /* mi: N */
685 tmp = load_cpu_field(NF);
686 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
687 break;
688 case 5: /* pl: !N */
689 tmp = load_cpu_field(NF);
690 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
691 break;
692 case 6: /* vs: V */
693 tmp = load_cpu_field(VF);
694 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
695 break;
696 case 7: /* vc: !V */
697 tmp = load_cpu_field(VF);
698 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
699 break;
700 case 8: /* hi: C && !Z */
701 inv = gen_new_label();
702 tmp = load_cpu_field(CF);
703 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
704 dead_tmp(tmp);
705 tmp = load_cpu_field(ZF);
706 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
707 gen_set_label(inv);
708 break;
709 case 9: /* ls: !C || Z */
710 tmp = load_cpu_field(CF);
711 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
712 dead_tmp(tmp);
713 tmp = load_cpu_field(ZF);
714 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
715 break;
716 case 10: /* ge: N == V -> N ^ V == 0 */
717 tmp = load_cpu_field(VF);
718 tmp2 = load_cpu_field(NF);
719 tcg_gen_xor_i32(tmp, tmp, tmp2);
720 dead_tmp(tmp2);
721 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
722 break;
723 case 11: /* lt: N != V -> N ^ V != 0 */
724 tmp = load_cpu_field(VF);
725 tmp2 = load_cpu_field(NF);
726 tcg_gen_xor_i32(tmp, tmp, tmp2);
727 dead_tmp(tmp2);
728 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
729 break;
730 case 12: /* gt: !Z && N == V */
731 inv = gen_new_label();
732 tmp = load_cpu_field(ZF);
733 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
734 dead_tmp(tmp);
735 tmp = load_cpu_field(VF);
736 tmp2 = load_cpu_field(NF);
737 tcg_gen_xor_i32(tmp, tmp, tmp2);
738 dead_tmp(tmp2);
739 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
740 gen_set_label(inv);
741 break;
742 case 13: /* le: Z || N != V */
743 tmp = load_cpu_field(ZF);
744 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
745 dead_tmp(tmp);
746 tmp = load_cpu_field(VF);
747 tmp2 = load_cpu_field(NF);
748 tcg_gen_xor_i32(tmp, tmp, tmp2);
749 dead_tmp(tmp2);
750 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
751 break;
752 default:
753 fprintf(stderr, "Bad condition code 0x%x\n", cc);
754 abort();
755 }
756 dead_tmp(tmp);
757 }
758
759 const uint8_t table_logic_cc[16] = {
760 1, /* and */
761 1, /* xor */
762 0, /* sub */
763 0, /* rsb */
764 0, /* add */
765 0, /* adc */
766 0, /* sbc */
767 0, /* rsc */
768 1, /* andl */
769 1, /* xorl */
770 0, /* cmp */
771 0, /* cmn */
772 1, /* orr */
773 1, /* mov */
774 1, /* bic */
775 1, /* mvn */
776 };
777
778 /* Set PC and Thumb state from an immediate address. */
779 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
780 {
781 TCGv tmp;
782
783 s->is_jmp = DISAS_UPDATE;
784 tmp = new_tmp();
785 if (s->thumb != (addr & 1)) {
786 tcg_gen_movi_i32(tmp, addr & 1);
787 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
788 }
789 tcg_gen_movi_i32(tmp, addr & ~1);
790 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
791 dead_tmp(tmp);
792 }
793
794 /* Set PC and Thumb state from var. var is marked as dead. */
795 static inline void gen_bx(DisasContext *s, TCGv var)
796 {
797 TCGv tmp;
798
799 s->is_jmp = DISAS_UPDATE;
800 tmp = new_tmp();
801 tcg_gen_andi_i32(tmp, var, 1);
802 store_cpu_field(tmp, thumb);
803 tcg_gen_andi_i32(var, var, ~1);
804 store_cpu_field(var, regs[15]);
805 }
806
807 /* TODO: This should be removed. Use gen_bx instead. */
808 static inline void gen_bx_T0(DisasContext *s)
809 {
810 TCGv tmp = new_tmp();
811 tcg_gen_mov_i32(tmp, cpu_T[0]);
812 gen_bx(s, tmp);
813 }
814
815 #if defined(CONFIG_USER_ONLY)
816 #define gen_ldst(name, s) gen_op_##name##_raw()
817 #else
818 #define gen_ldst(name, s) do { \
819 s->is_mem = 1; \
820 if (IS_USER(s)) \
821 gen_op_##name##_user(); \
822 else \
823 gen_op_##name##_kernel(); \
824 } while (0)
825 #endif
826 static inline TCGv gen_ld8s(TCGv addr, int index)
827 {
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld8s(tmp, addr, index);
830 return tmp;
831 }
832 static inline TCGv gen_ld8u(TCGv addr, int index)
833 {
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld8u(tmp, addr, index);
836 return tmp;
837 }
838 static inline TCGv gen_ld16s(TCGv addr, int index)
839 {
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld16s(tmp, addr, index);
842 return tmp;
843 }
844 static inline TCGv gen_ld16u(TCGv addr, int index)
845 {
846 TCGv tmp = new_tmp();
847 tcg_gen_qemu_ld16u(tmp, addr, index);
848 return tmp;
849 }
850 static inline TCGv gen_ld32(TCGv addr, int index)
851 {
852 TCGv tmp = new_tmp();
853 tcg_gen_qemu_ld32u(tmp, addr, index);
854 return tmp;
855 }
856 static inline void gen_st8(TCGv val, TCGv addr, int index)
857 {
858 tcg_gen_qemu_st8(val, addr, index);
859 dead_tmp(val);
860 }
861 static inline void gen_st16(TCGv val, TCGv addr, int index)
862 {
863 tcg_gen_qemu_st16(val, addr, index);
864 dead_tmp(val);
865 }
866 static inline void gen_st32(TCGv val, TCGv addr, int index)
867 {
868 tcg_gen_qemu_st32(val, addr, index);
869 dead_tmp(val);
870 }
871
872 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
873 {
874 load_reg_var(s, cpu_T[0], reg);
875 }
876
877 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
878 {
879 load_reg_var(s, cpu_T[1], reg);
880 }
881
882 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
883 {
884 load_reg_var(s, cpu_T[2], reg);
885 }
886
887 static inline void gen_set_pc_im(uint32_t val)
888 {
889 TCGv tmp = new_tmp();
890 tcg_gen_movi_i32(tmp, val);
891 store_cpu_field(tmp, regs[15]);
892 }
893
894 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
895 {
896 TCGv tmp;
897 if (reg == 15) {
898 tmp = new_tmp();
899 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
900 } else {
901 tmp = cpu_T[t];
902 }
903 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
904 if (reg == 15) {
905 dead_tmp(tmp);
906 s->is_jmp = DISAS_JUMP;
907 }
908 }
909
910 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
911 {
912 gen_movl_reg_TN(s, reg, 0);
913 }
914
915 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
916 {
917 gen_movl_reg_TN(s, reg, 1);
918 }
919
920 /* Force a TB lookup after an instruction that changes the CPU state. */
921 static inline void gen_lookup_tb(DisasContext *s)
922 {
923 gen_op_movl_T0_im(s->pc);
924 gen_movl_reg_T0(s, 15);
925 s->is_jmp = DISAS_UPDATE;
926 }
927
928 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
929 TCGv var)
930 {
931 int val, rm, shift, shiftop;
932 TCGv offset;
933
934 if (!(insn & (1 << 25))) {
935 /* immediate */
936 val = insn & 0xfff;
937 if (!(insn & (1 << 23)))
938 val = -val;
939 if (val != 0)
940 tcg_gen_addi_i32(var, var, val);
941 } else {
942 /* shift/register */
943 rm = (insn) & 0xf;
944 shift = (insn >> 7) & 0x1f;
945 shiftop = (insn >> 5) & 3;
946 offset = load_reg(s, rm);
947 gen_arm_shift_im(offset, shiftop, shift, 0);
948 if (!(insn & (1 << 23)))
949 tcg_gen_sub_i32(var, var, offset);
950 else
951 tcg_gen_add_i32(var, var, offset);
952 dead_tmp(offset);
953 }
954 }
955
956 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
957 int extra, TCGv var)
958 {
959 int val, rm;
960 TCGv offset;
961
962 if (insn & (1 << 22)) {
963 /* immediate */
964 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
965 if (!(insn & (1 << 23)))
966 val = -val;
967 val += extra;
968 if (val != 0)
969 tcg_gen_addi_i32(var, var, val);
970 } else {
971 /* register */
972 if (extra)
973 tcg_gen_addi_i32(var, var, extra);
974 rm = (insn) & 0xf;
975 offset = load_reg(s, rm);
976 if (!(insn & (1 << 23)))
977 tcg_gen_sub_i32(var, var, offset);
978 else
979 tcg_gen_add_i32(var, var, offset);
980 dead_tmp(offset);
981 }
982 }
983
984 #define VFP_OP2(name) \
985 static inline void gen_vfp_##name(int dp) \
986 { \
987 if (dp) \
988 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
989 else \
990 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
991 }
992
993 #define VFP_OP1(name) \
994 static inline void gen_vfp_##name(int dp, int arg) \
995 { \
996 if (dp) \
997 gen_op_vfp_##name##d(arg); \
998 else \
999 gen_op_vfp_##name##s(arg); \
1000 }
1001
1002 VFP_OP2(add)
1003 VFP_OP2(sub)
1004 VFP_OP2(mul)
1005 VFP_OP2(div)
1006
1007 #undef VFP_OP2
1008
1009 static inline void gen_vfp_abs(int dp)
1010 {
1011 if (dp)
1012 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1013 else
1014 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1015 }
1016
1017 static inline void gen_vfp_neg(int dp)
1018 {
1019 if (dp)
1020 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1021 else
1022 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1023 }
1024
1025 static inline void gen_vfp_sqrt(int dp)
1026 {
1027 if (dp)
1028 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1029 else
1030 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1031 }
1032
1033 static inline void gen_vfp_cmp(int dp)
1034 {
1035 if (dp)
1036 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1037 else
1038 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1039 }
1040
1041 static inline void gen_vfp_cmpe(int dp)
1042 {
1043 if (dp)
1044 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1045 else
1046 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1047 }
1048
1049 static inline void gen_vfp_F1_ld0(int dp)
1050 {
1051 if (dp)
1052 tcg_gen_movi_i64(cpu_F1d, 0);
1053 else
1054 tcg_gen_movi_i32(cpu_F1s, 0);
1055 }
1056
1057 static inline void gen_vfp_uito(int dp)
1058 {
1059 if (dp)
1060 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1061 else
1062 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1063 }
1064
1065 static inline void gen_vfp_sito(int dp)
1066 {
1067 if (dp)
1068 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
1069 else
1070 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
1071 }
1072
1073 static inline void gen_vfp_toui(int dp)
1074 {
1075 if (dp)
1076 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1077 else
1078 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1079 }
1080
1081 static inline void gen_vfp_touiz(int dp)
1082 {
1083 if (dp)
1084 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1085 else
1086 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1087 }
1088
1089 static inline void gen_vfp_tosi(int dp)
1090 {
1091 if (dp)
1092 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1093 else
1094 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1095 }
1096
1097 static inline void gen_vfp_tosiz(int dp)
1098 {
1099 if (dp)
1100 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
1101 else
1102 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1103 }
1104
1105 #define VFP_GEN_FIX(name) \
1106 static inline void gen_vfp_##name(int dp, int shift) \
1107 { \
1108 if (dp) \
1109 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1110 else \
1111 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
1112 }
1113 VFP_GEN_FIX(tosh)
1114 VFP_GEN_FIX(tosl)
1115 VFP_GEN_FIX(touh)
1116 VFP_GEN_FIX(toul)
1117 VFP_GEN_FIX(shto)
1118 VFP_GEN_FIX(slto)
1119 VFP_GEN_FIX(uhto)
1120 VFP_GEN_FIX(ulto)
1121 #undef VFP_GEN_FIX
1122
1123 static inline void gen_vfp_ld(DisasContext *s, int dp)
1124 {
1125 if (dp)
1126 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
1127 else
1128 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
1129 }
1130
1131 static inline void gen_vfp_st(DisasContext *s, int dp)
1132 {
1133 if (dp)
1134 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
1135 else
1136 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
1137 }
1138
1139 static inline long
1140 vfp_reg_offset (int dp, int reg)
1141 {
1142 if (dp)
1143 return offsetof(CPUARMState, vfp.regs[reg]);
1144 else if (reg & 1) {
1145 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1146 + offsetof(CPU_DoubleU, l.upper);
1147 } else {
1148 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1149 + offsetof(CPU_DoubleU, l.lower);
1150 }
1151 }
1152
1153 /* Return the offset of a 32-bit piece of a NEON register.
1154 zero is the least significant end of the register. */
1155 static inline long
1156 neon_reg_offset (int reg, int n)
1157 {
1158 int sreg;
1159 sreg = reg * 2 + n;
1160 return vfp_reg_offset(0, sreg);
1161 }
1162
1163 /* FIXME: Remove these. */
1164 #define neon_T0 cpu_T[0]
1165 #define neon_T1 cpu_T[1]
1166 #define NEON_GET_REG(T, reg, n) \
1167 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1168 #define NEON_SET_REG(T, reg, n) \
1169 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1170
1171 static TCGv neon_load_reg(int reg, int pass)
1172 {
1173 TCGv tmp = new_tmp();
1174 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1175 return tmp;
1176 }
1177
1178 static void neon_store_reg(int reg, int pass, TCGv var)
1179 {
1180 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1181 dead_tmp(var);
1182 }
1183
1184 static inline void neon_load_reg64(TCGv var, int reg)
1185 {
1186 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1187 }
1188
1189 static inline void neon_store_reg64(TCGv var, int reg)
1190 {
1191 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1192 }
1193
1194 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1195 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1196 #define tcg_gen_st_f32 tcg_gen_st_i32
1197 #define tcg_gen_st_f64 tcg_gen_st_i64
1198
1199 static inline void gen_mov_F0_vreg(int dp, int reg)
1200 {
1201 if (dp)
1202 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1203 else
1204 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1205 }
1206
1207 static inline void gen_mov_F1_vreg(int dp, int reg)
1208 {
1209 if (dp)
1210 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1211 else
1212 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1213 }
1214
1215 static inline void gen_mov_vreg_F0(int dp, int reg)
1216 {
1217 if (dp)
1218 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1219 else
1220 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1221 }
1222
1223 #define ARM_CP_RW_BIT (1 << 20)
1224
1225 static inline void iwmmxt_load_reg(TCGv var, int reg)
1226 {
1227 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1228 }
1229
1230 static inline void iwmmxt_store_reg(TCGv var, int reg)
1231 {
1232 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1233 }
1234
1235 static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1236 {
1237 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1238 }
1239
1240 static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1241 {
1242 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1243 }
1244
1245 static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1246 {
1247 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1248 }
1249
1250 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1251 {
1252 iwmmxt_store_reg(cpu_M0, rn);
1253 }
1254
1255 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1256 {
1257 iwmmxt_load_reg(cpu_M0, rn);
1258 }
1259
1260 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1261 {
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1264 }
1265
1266 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1267 {
1268 iwmmxt_load_reg(cpu_V1, rn);
1269 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1270 }
1271
1272 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1273 {
1274 iwmmxt_load_reg(cpu_V1, rn);
1275 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1276 }
1277
1278 #define IWMMXT_OP(name) \
1279 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1280 { \
1281 iwmmxt_load_reg(cpu_V1, rn); \
1282 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1283 }
1284
1285 #define IWMMXT_OP_ENV(name) \
1286 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1287 { \
1288 iwmmxt_load_reg(cpu_V1, rn); \
1289 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1290 }
1291
1292 #define IWMMXT_OP_ENV_SIZE(name) \
1293 IWMMXT_OP_ENV(name##b) \
1294 IWMMXT_OP_ENV(name##w) \
1295 IWMMXT_OP_ENV(name##l)
1296
1297 #define IWMMXT_OP_ENV1(name) \
1298 static inline void gen_op_iwmmxt_##name##_M0(void) \
1299 { \
1300 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1301 }
1302
1303 IWMMXT_OP(maddsq)
1304 IWMMXT_OP(madduq)
1305 IWMMXT_OP(sadb)
1306 IWMMXT_OP(sadw)
1307 IWMMXT_OP(mulslw)
1308 IWMMXT_OP(mulshw)
1309 IWMMXT_OP(mululw)
1310 IWMMXT_OP(muluhw)
1311 IWMMXT_OP(macsw)
1312 IWMMXT_OP(macuw)
1313
1314 IWMMXT_OP_ENV_SIZE(unpackl)
1315 IWMMXT_OP_ENV_SIZE(unpackh)
1316
1317 IWMMXT_OP_ENV1(unpacklub)
1318 IWMMXT_OP_ENV1(unpackluw)
1319 IWMMXT_OP_ENV1(unpacklul)
1320 IWMMXT_OP_ENV1(unpackhub)
1321 IWMMXT_OP_ENV1(unpackhuw)
1322 IWMMXT_OP_ENV1(unpackhul)
1323 IWMMXT_OP_ENV1(unpacklsb)
1324 IWMMXT_OP_ENV1(unpacklsw)
1325 IWMMXT_OP_ENV1(unpacklsl)
1326 IWMMXT_OP_ENV1(unpackhsb)
1327 IWMMXT_OP_ENV1(unpackhsw)
1328 IWMMXT_OP_ENV1(unpackhsl)
1329
1330 IWMMXT_OP_ENV_SIZE(cmpeq)
1331 IWMMXT_OP_ENV_SIZE(cmpgtu)
1332 IWMMXT_OP_ENV_SIZE(cmpgts)
1333
1334 IWMMXT_OP_ENV_SIZE(mins)
1335 IWMMXT_OP_ENV_SIZE(minu)
1336 IWMMXT_OP_ENV_SIZE(maxs)
1337 IWMMXT_OP_ENV_SIZE(maxu)
1338
1339 IWMMXT_OP_ENV_SIZE(subn)
1340 IWMMXT_OP_ENV_SIZE(addn)
1341 IWMMXT_OP_ENV_SIZE(subu)
1342 IWMMXT_OP_ENV_SIZE(addu)
1343 IWMMXT_OP_ENV_SIZE(subs)
1344 IWMMXT_OP_ENV_SIZE(adds)
1345
1346 IWMMXT_OP_ENV(avgb0)
1347 IWMMXT_OP_ENV(avgb1)
1348 IWMMXT_OP_ENV(avgw0)
1349 IWMMXT_OP_ENV(avgw1)
1350
1351 IWMMXT_OP(msadb)
1352
1353 IWMMXT_OP_ENV(packuw)
1354 IWMMXT_OP_ENV(packul)
1355 IWMMXT_OP_ENV(packuq)
1356 IWMMXT_OP_ENV(packsw)
1357 IWMMXT_OP_ENV(packsl)
1358 IWMMXT_OP_ENV(packsq)
1359
1360 static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1361 {
1362 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1363 }
1364
1365 static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1366 {
1367 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1368 }
1369
1370 static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1371 {
1372 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1373 }
1374
1375 static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1376 {
1377 iwmmxt_load_reg(cpu_V1, rn);
1378 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1379 }
1380
1381 static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1382 {
1383 TCGv tmp = tcg_const_i32(shift);
1384 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1385 }
1386
1387 static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1388 {
1389 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1390 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1391 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1392 }
1393
1394 static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1395 {
1396 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1397 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1398 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1399 }
1400
1401 static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1402 {
1403 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1404 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1405 if (mask != ~0u)
1406 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1407 }
1408
1409 static void gen_op_iwmmxt_set_mup(void)
1410 {
1411 TCGv tmp;
1412 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1413 tcg_gen_ori_i32(tmp, tmp, 2);
1414 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1415 }
1416
1417 static void gen_op_iwmmxt_set_cup(void)
1418 {
1419 TCGv tmp;
1420 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1421 tcg_gen_ori_i32(tmp, tmp, 1);
1422 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1423 }
1424
1425 static void gen_op_iwmmxt_setpsr_nz(void)
1426 {
1427 TCGv tmp = new_tmp();
1428 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1429 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1430 }
1431
1432 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1433 {
1434 iwmmxt_load_reg(cpu_V1, rn);
1435 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1436 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1437 }
1438
1439
1440 static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1441 {
1442 iwmmxt_load_reg(cpu_V0, rn);
1443 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1444 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1445 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1446 }
1447
1448 static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1449 {
1450 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
1451 iwmmxt_store_reg(cpu_V0, rn);
1452 }
1453
1454 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1455 {
1456 int rd;
1457 uint32_t offset;
1458
1459 rd = (insn >> 16) & 0xf;
1460 gen_movl_T1_reg(s, rd);
1461
1462 offset = (insn & 0xff) << ((insn >> 7) & 2);
1463 if (insn & (1 << 24)) {
1464 /* Pre indexed */
1465 if (insn & (1 << 23))
1466 gen_op_addl_T1_im(offset);
1467 else
1468 gen_op_addl_T1_im(-offset);
1469
1470 if (insn & (1 << 21))
1471 gen_movl_reg_T1(s, rd);
1472 } else if (insn & (1 << 21)) {
1473 /* Post indexed */
1474 if (insn & (1 << 23))
1475 gen_op_movl_T0_im(offset);
1476 else
1477 gen_op_movl_T0_im(- offset);
1478 gen_op_addl_T0_T1();
1479 gen_movl_reg_T0(s, rd);
1480 } else if (!(insn & (1 << 23)))
1481 return 1;
1482 return 0;
1483 }
1484
1485 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1486 {
1487 int rd = (insn >> 0) & 0xf;
1488
1489 if (insn & (1 << 8))
1490 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1491 return 1;
1492 else
1493 gen_op_iwmmxt_movl_T0_wCx(rd);
1494 else
1495 gen_iwmmxt_movl_T0_T1_wRn(rd);
1496
1497 gen_op_movl_T1_im(mask);
1498 gen_op_andl_T0_T1();
1499 return 0;
1500 }
1501
1502 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1503 (ie. an undefined instruction). */
1504 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1505 {
1506 int rd, wrd;
1507 int rdhi, rdlo, rd0, rd1, i;
1508 TCGv tmp;
1509
1510 if ((insn & 0x0e000e00) == 0x0c000000) {
1511 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1512 wrd = insn & 0xf;
1513 rdlo = (insn >> 12) & 0xf;
1514 rdhi = (insn >> 16) & 0xf;
1515 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1516 gen_iwmmxt_movl_T0_T1_wRn(wrd);
1517 gen_movl_reg_T0(s, rdlo);
1518 gen_movl_reg_T1(s, rdhi);
1519 } else { /* TMCRR */
1520 gen_movl_T0_reg(s, rdlo);
1521 gen_movl_T1_reg(s, rdhi);
1522 gen_iwmmxt_movl_wRn_T0_T1(wrd);
1523 gen_op_iwmmxt_set_mup();
1524 }
1525 return 0;
1526 }
1527
1528 wrd = (insn >> 12) & 0xf;
1529 if (gen_iwmmxt_address(s, insn))
1530 return 1;
1531 if (insn & ARM_CP_RW_BIT) {
1532 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1533 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1534 tcg_gen_mov_i32(cpu_T[0], tmp);
1535 dead_tmp(tmp);
1536 gen_op_iwmmxt_movl_wCx_T0(wrd);
1537 } else {
1538 i = 1;
1539 if (insn & (1 << 8)) {
1540 if (insn & (1 << 22)) { /* WLDRD */
1541 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1542 i = 0;
1543 } else { /* WLDRW wRd */
1544 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1545 }
1546 } else {
1547 if (insn & (1 << 22)) { /* WLDRH */
1548 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1549 } else { /* WLDRB */
1550 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1551 }
1552 }
1553 if (i) {
1554 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1555 dead_tmp(tmp);
1556 }
1557 gen_op_iwmmxt_movq_wRn_M0(wrd);
1558 }
1559 } else {
1560 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1561 gen_op_iwmmxt_movl_T0_wCx(wrd);
1562 tmp = new_tmp();
1563 tcg_gen_mov_i32(tmp, cpu_T[0]);
1564 gen_st32(tmp, cpu_T[1], IS_USER(s));
1565 } else {
1566 gen_op_iwmmxt_movq_M0_wRn(wrd);
1567 tmp = new_tmp();
1568 if (insn & (1 << 8)) {
1569 if (insn & (1 << 22)) { /* WSTRD */
1570 dead_tmp(tmp);
1571 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1572 } else { /* WSTRW wRd */
1573 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1574 gen_st32(tmp, cpu_T[1], IS_USER(s));
1575 }
1576 } else {
1577 if (insn & (1 << 22)) { /* WSTRH */
1578 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1579 gen_st16(tmp, cpu_T[1], IS_USER(s));
1580 } else { /* WSTRB */
1581 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1582 gen_st8(tmp, cpu_T[1], IS_USER(s));
1583 }
1584 }
1585 }
1586 }
1587 return 0;
1588 }
1589
1590 if ((insn & 0x0f000000) != 0x0e000000)
1591 return 1;
1592
1593 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1594 case 0x000: /* WOR */
1595 wrd = (insn >> 12) & 0xf;
1596 rd0 = (insn >> 0) & 0xf;
1597 rd1 = (insn >> 16) & 0xf;
1598 gen_op_iwmmxt_movq_M0_wRn(rd0);
1599 gen_op_iwmmxt_orq_M0_wRn(rd1);
1600 gen_op_iwmmxt_setpsr_nz();
1601 gen_op_iwmmxt_movq_wRn_M0(wrd);
1602 gen_op_iwmmxt_set_mup();
1603 gen_op_iwmmxt_set_cup();
1604 break;
1605 case 0x011: /* TMCR */
1606 if (insn & 0xf)
1607 return 1;
1608 rd = (insn >> 12) & 0xf;
1609 wrd = (insn >> 16) & 0xf;
1610 switch (wrd) {
1611 case ARM_IWMMXT_wCID:
1612 case ARM_IWMMXT_wCASF:
1613 break;
1614 case ARM_IWMMXT_wCon:
1615 gen_op_iwmmxt_set_cup();
1616 /* Fall through. */
1617 case ARM_IWMMXT_wCSSF:
1618 gen_op_iwmmxt_movl_T0_wCx(wrd);
1619 gen_movl_T1_reg(s, rd);
1620 gen_op_bicl_T0_T1();
1621 gen_op_iwmmxt_movl_wCx_T0(wrd);
1622 break;
1623 case ARM_IWMMXT_wCGR0:
1624 case ARM_IWMMXT_wCGR1:
1625 case ARM_IWMMXT_wCGR2:
1626 case ARM_IWMMXT_wCGR3:
1627 gen_op_iwmmxt_set_cup();
1628 gen_movl_reg_T0(s, rd);
1629 gen_op_iwmmxt_movl_wCx_T0(wrd);
1630 break;
1631 default:
1632 return 1;
1633 }
1634 break;
1635 case 0x100: /* WXOR */
1636 wrd = (insn >> 12) & 0xf;
1637 rd0 = (insn >> 0) & 0xf;
1638 rd1 = (insn >> 16) & 0xf;
1639 gen_op_iwmmxt_movq_M0_wRn(rd0);
1640 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1641 gen_op_iwmmxt_setpsr_nz();
1642 gen_op_iwmmxt_movq_wRn_M0(wrd);
1643 gen_op_iwmmxt_set_mup();
1644 gen_op_iwmmxt_set_cup();
1645 break;
1646 case 0x111: /* TMRC */
1647 if (insn & 0xf)
1648 return 1;
1649 rd = (insn >> 12) & 0xf;
1650 wrd = (insn >> 16) & 0xf;
1651 gen_op_iwmmxt_movl_T0_wCx(wrd);
1652 gen_movl_reg_T0(s, rd);
1653 break;
1654 case 0x300: /* WANDN */
1655 wrd = (insn >> 12) & 0xf;
1656 rd0 = (insn >> 0) & 0xf;
1657 rd1 = (insn >> 16) & 0xf;
1658 gen_op_iwmmxt_movq_M0_wRn(rd0);
1659 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1660 gen_op_iwmmxt_andq_M0_wRn(rd1);
1661 gen_op_iwmmxt_setpsr_nz();
1662 gen_op_iwmmxt_movq_wRn_M0(wrd);
1663 gen_op_iwmmxt_set_mup();
1664 gen_op_iwmmxt_set_cup();
1665 break;
1666 case 0x200: /* WAND */
1667 wrd = (insn >> 12) & 0xf;
1668 rd0 = (insn >> 0) & 0xf;
1669 rd1 = (insn >> 16) & 0xf;
1670 gen_op_iwmmxt_movq_M0_wRn(rd0);
1671 gen_op_iwmmxt_andq_M0_wRn(rd1);
1672 gen_op_iwmmxt_setpsr_nz();
1673 gen_op_iwmmxt_movq_wRn_M0(wrd);
1674 gen_op_iwmmxt_set_mup();
1675 gen_op_iwmmxt_set_cup();
1676 break;
1677 case 0x810: case 0xa10: /* WMADD */
1678 wrd = (insn >> 12) & 0xf;
1679 rd0 = (insn >> 0) & 0xf;
1680 rd1 = (insn >> 16) & 0xf;
1681 gen_op_iwmmxt_movq_M0_wRn(rd0);
1682 if (insn & (1 << 21))
1683 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1684 else
1685 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1686 gen_op_iwmmxt_movq_wRn_M0(wrd);
1687 gen_op_iwmmxt_set_mup();
1688 break;
1689 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1690 wrd = (insn >> 12) & 0xf;
1691 rd0 = (insn >> 16) & 0xf;
1692 rd1 = (insn >> 0) & 0xf;
1693 gen_op_iwmmxt_movq_M0_wRn(rd0);
1694 switch ((insn >> 22) & 3) {
1695 case 0:
1696 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1697 break;
1698 case 1:
1699 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1700 break;
1701 case 2:
1702 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1703 break;
1704 case 3:
1705 return 1;
1706 }
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 gen_op_iwmmxt_set_cup();
1710 break;
1711 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1712 wrd = (insn >> 12) & 0xf;
1713 rd0 = (insn >> 16) & 0xf;
1714 rd1 = (insn >> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0);
1716 switch ((insn >> 22) & 3) {
1717 case 0:
1718 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1719 break;
1720 case 1:
1721 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1722 break;
1723 case 2:
1724 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1725 break;
1726 case 3:
1727 return 1;
1728 }
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 gen_op_iwmmxt_set_cup();
1732 break;
1733 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1734 wrd = (insn >> 12) & 0xf;
1735 rd0 = (insn >> 16) & 0xf;
1736 rd1 = (insn >> 0) & 0xf;
1737 gen_op_iwmmxt_movq_M0_wRn(rd0);
1738 if (insn & (1 << 22))
1739 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1740 else
1741 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1742 if (!(insn & (1 << 20)))
1743 gen_op_iwmmxt_addl_M0_wRn(wrd);
1744 gen_op_iwmmxt_movq_wRn_M0(wrd);
1745 gen_op_iwmmxt_set_mup();
1746 break;
1747 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1748 wrd = (insn >> 12) & 0xf;
1749 rd0 = (insn >> 16) & 0xf;
1750 rd1 = (insn >> 0) & 0xf;
1751 gen_op_iwmmxt_movq_M0_wRn(rd0);
1752 if (insn & (1 << 21)) {
1753 if (insn & (1 << 20))
1754 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1755 else
1756 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1757 } else {
1758 if (insn & (1 << 20))
1759 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1760 else
1761 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1762 }
1763 gen_op_iwmmxt_movq_wRn_M0(wrd);
1764 gen_op_iwmmxt_set_mup();
1765 break;
1766 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1767 wrd = (insn >> 12) & 0xf;
1768 rd0 = (insn >> 16) & 0xf;
1769 rd1 = (insn >> 0) & 0xf;
1770 gen_op_iwmmxt_movq_M0_wRn(rd0);
1771 if (insn & (1 << 21))
1772 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1773 else
1774 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1775 if (!(insn & (1 << 20))) {
1776 iwmmxt_load_reg(cpu_V1, wrd);
1777 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1778 }
1779 gen_op_iwmmxt_movq_wRn_M0(wrd);
1780 gen_op_iwmmxt_set_mup();
1781 break;
1782 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1783 wrd = (insn >> 12) & 0xf;
1784 rd0 = (insn >> 16) & 0xf;
1785 rd1 = (insn >> 0) & 0xf;
1786 gen_op_iwmmxt_movq_M0_wRn(rd0);
1787 switch ((insn >> 22) & 3) {
1788 case 0:
1789 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1790 break;
1791 case 1:
1792 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1793 break;
1794 case 2:
1795 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1796 break;
1797 case 3:
1798 return 1;
1799 }
1800 gen_op_iwmmxt_movq_wRn_M0(wrd);
1801 gen_op_iwmmxt_set_mup();
1802 gen_op_iwmmxt_set_cup();
1803 break;
1804 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 rd1 = (insn >> 0) & 0xf;
1808 gen_op_iwmmxt_movq_M0_wRn(rd0);
1809 if (insn & (1 << 22)) {
1810 if (insn & (1 << 20))
1811 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1812 else
1813 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1814 } else {
1815 if (insn & (1 << 20))
1816 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1817 else
1818 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1819 }
1820 gen_op_iwmmxt_movq_wRn_M0(wrd);
1821 gen_op_iwmmxt_set_mup();
1822 gen_op_iwmmxt_set_cup();
1823 break;
1824 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1825 wrd = (insn >> 12) & 0xf;
1826 rd0 = (insn >> 16) & 0xf;
1827 rd1 = (insn >> 0) & 0xf;
1828 gen_op_iwmmxt_movq_M0_wRn(rd0);
1829 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1830 gen_op_movl_T1_im(7);
1831 gen_op_andl_T0_T1();
1832 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1833 gen_op_iwmmxt_movq_wRn_M0(wrd);
1834 gen_op_iwmmxt_set_mup();
1835 break;
1836 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1837 rd = (insn >> 12) & 0xf;
1838 wrd = (insn >> 16) & 0xf;
1839 gen_movl_T0_reg(s, rd);
1840 gen_op_iwmmxt_movq_M0_wRn(wrd);
1841 switch ((insn >> 6) & 3) {
1842 case 0:
1843 gen_op_movl_T1_im(0xff);
1844 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1845 break;
1846 case 1:
1847 gen_op_movl_T1_im(0xffff);
1848 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1849 break;
1850 case 2:
1851 gen_op_movl_T1_im(0xffffffff);
1852 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1853 break;
1854 case 3:
1855 return 1;
1856 }
1857 gen_op_iwmmxt_movq_wRn_M0(wrd);
1858 gen_op_iwmmxt_set_mup();
1859 break;
1860 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1861 rd = (insn >> 12) & 0xf;
1862 wrd = (insn >> 16) & 0xf;
1863 if (rd == 15)
1864 return 1;
1865 gen_op_iwmmxt_movq_M0_wRn(wrd);
1866 switch ((insn >> 22) & 3) {
1867 case 0:
1868 if (insn & 8)
1869 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1870 else {
1871 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
1872 }
1873 break;
1874 case 1:
1875 if (insn & 8)
1876 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1877 else {
1878 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
1879 }
1880 break;
1881 case 2:
1882 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
1883 break;
1884 case 3:
1885 return 1;
1886 }
1887 gen_movl_reg_T0(s, rd);
1888 break;
1889 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1890 if ((insn & 0x000ff008) != 0x0003f000)
1891 return 1;
1892 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1893 switch ((insn >> 22) & 3) {
1894 case 0:
1895 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1896 break;
1897 case 1:
1898 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1899 break;
1900 case 2:
1901 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1902 break;
1903 case 3:
1904 return 1;
1905 }
1906 gen_op_shll_T1_im(28);
1907 gen_set_nzcv(cpu_T[1]);
1908 break;
1909 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1910 rd = (insn >> 12) & 0xf;
1911 wrd = (insn >> 16) & 0xf;
1912 gen_movl_T0_reg(s, rd);
1913 switch ((insn >> 6) & 3) {
1914 case 0:
1915 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
1916 break;
1917 case 1:
1918 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
1919 break;
1920 case 2:
1921 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
1922 break;
1923 case 3:
1924 return 1;
1925 }
1926 gen_op_iwmmxt_movq_wRn_M0(wrd);
1927 gen_op_iwmmxt_set_mup();
1928 break;
1929 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1930 if ((insn & 0x000ff00f) != 0x0003f000)
1931 return 1;
1932 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1933 switch ((insn >> 22) & 3) {
1934 case 0:
1935 for (i = 0; i < 7; i ++) {
1936 gen_op_shll_T1_im(4);
1937 gen_op_andl_T0_T1();
1938 }
1939 break;
1940 case 1:
1941 for (i = 0; i < 3; i ++) {
1942 gen_op_shll_T1_im(8);
1943 gen_op_andl_T0_T1();
1944 }
1945 break;
1946 case 2:
1947 gen_op_shll_T1_im(16);
1948 gen_op_andl_T0_T1();
1949 break;
1950 case 3:
1951 return 1;
1952 }
1953 gen_set_nzcv(cpu_T[0]);
1954 break;
1955 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1956 wrd = (insn >> 12) & 0xf;
1957 rd0 = (insn >> 16) & 0xf;
1958 gen_op_iwmmxt_movq_M0_wRn(rd0);
1959 switch ((insn >> 22) & 3) {
1960 case 0:
1961 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1962 break;
1963 case 1:
1964 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1965 break;
1966 case 2:
1967 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1968 break;
1969 case 3:
1970 return 1;
1971 }
1972 gen_op_iwmmxt_movq_wRn_M0(wrd);
1973 gen_op_iwmmxt_set_mup();
1974 break;
1975 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1976 if ((insn & 0x000ff00f) != 0x0003f000)
1977 return 1;
1978 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1979 switch ((insn >> 22) & 3) {
1980 case 0:
1981 for (i = 0; i < 7; i ++) {
1982 gen_op_shll_T1_im(4);
1983 gen_op_orl_T0_T1();
1984 }
1985 break;
1986 case 1:
1987 for (i = 0; i < 3; i ++) {
1988 gen_op_shll_T1_im(8);
1989 gen_op_orl_T0_T1();
1990 }
1991 break;
1992 case 2:
1993 gen_op_shll_T1_im(16);
1994 gen_op_orl_T0_T1();
1995 break;
1996 case 3:
1997 return 1;
1998 }
1999 gen_set_nzcv(cpu_T[0]);
2000 break;
2001 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2002 rd = (insn >> 12) & 0xf;
2003 rd0 = (insn >> 16) & 0xf;
2004 if ((insn & 0xf) != 0)
2005 return 1;
2006 gen_op_iwmmxt_movq_M0_wRn(rd0);
2007 switch ((insn >> 22) & 3) {
2008 case 0:
2009 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
2010 break;
2011 case 1:
2012 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
2013 break;
2014 case 2:
2015 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
2016 break;
2017 case 3:
2018 return 1;
2019 }
2020 gen_movl_reg_T0(s, rd);
2021 break;
2022 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2023 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2024 wrd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 rd1 = (insn >> 0) & 0xf;
2027 gen_op_iwmmxt_movq_M0_wRn(rd0);
2028 switch ((insn >> 22) & 3) {
2029 case 0:
2030 if (insn & (1 << 21))
2031 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2032 else
2033 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2034 break;
2035 case 1:
2036 if (insn & (1 << 21))
2037 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2038 else
2039 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2040 break;
2041 case 2:
2042 if (insn & (1 << 21))
2043 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2044 else
2045 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2046 break;
2047 case 3:
2048 return 1;
2049 }
2050 gen_op_iwmmxt_movq_wRn_M0(wrd);
2051 gen_op_iwmmxt_set_mup();
2052 gen_op_iwmmxt_set_cup();
2053 break;
2054 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2055 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2056 wrd = (insn >> 12) & 0xf;
2057 rd0 = (insn >> 16) & 0xf;
2058 gen_op_iwmmxt_movq_M0_wRn(rd0);
2059 switch ((insn >> 22) & 3) {
2060 case 0:
2061 if (insn & (1 << 21))
2062 gen_op_iwmmxt_unpacklsb_M0();
2063 else
2064 gen_op_iwmmxt_unpacklub_M0();
2065 break;
2066 case 1:
2067 if (insn & (1 << 21))
2068 gen_op_iwmmxt_unpacklsw_M0();
2069 else
2070 gen_op_iwmmxt_unpackluw_M0();
2071 break;
2072 case 2:
2073 if (insn & (1 << 21))
2074 gen_op_iwmmxt_unpacklsl_M0();
2075 else
2076 gen_op_iwmmxt_unpacklul_M0();
2077 break;
2078 case 3:
2079 return 1;
2080 }
2081 gen_op_iwmmxt_movq_wRn_M0(wrd);
2082 gen_op_iwmmxt_set_mup();
2083 gen_op_iwmmxt_set_cup();
2084 break;
2085 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2086 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2087 wrd = (insn >> 12) & 0xf;
2088 rd0 = (insn >> 16) & 0xf;
2089 gen_op_iwmmxt_movq_M0_wRn(rd0);
2090 switch ((insn >> 22) & 3) {
2091 case 0:
2092 if (insn & (1 << 21))
2093 gen_op_iwmmxt_unpackhsb_M0();
2094 else
2095 gen_op_iwmmxt_unpackhub_M0();
2096 break;
2097 case 1:
2098 if (insn & (1 << 21))
2099 gen_op_iwmmxt_unpackhsw_M0();
2100 else
2101 gen_op_iwmmxt_unpackhuw_M0();
2102 break;
2103 case 2:
2104 if (insn & (1 << 21))
2105 gen_op_iwmmxt_unpackhsl_M0();
2106 else
2107 gen_op_iwmmxt_unpackhul_M0();
2108 break;
2109 case 3:
2110 return 1;
2111 }
2112 gen_op_iwmmxt_movq_wRn_M0(wrd);
2113 gen_op_iwmmxt_set_mup();
2114 gen_op_iwmmxt_set_cup();
2115 break;
2116 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2117 case 0x214: case 0x614: case 0xa14: case 0xe14:
2118 wrd = (insn >> 12) & 0xf;
2119 rd0 = (insn >> 16) & 0xf;
2120 gen_op_iwmmxt_movq_M0_wRn(rd0);
2121 if (gen_iwmmxt_shift(insn, 0xff))
2122 return 1;
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 return 1;
2126 case 1:
2127 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2128 break;
2129 case 2:
2130 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2131 break;
2132 case 3:
2133 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2134 break;
2135 }
2136 gen_op_iwmmxt_movq_wRn_M0(wrd);
2137 gen_op_iwmmxt_set_mup();
2138 gen_op_iwmmxt_set_cup();
2139 break;
2140 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2141 case 0x014: case 0x414: case 0x814: case 0xc14:
2142 wrd = (insn >> 12) & 0xf;
2143 rd0 = (insn >> 16) & 0xf;
2144 gen_op_iwmmxt_movq_M0_wRn(rd0);
2145 if (gen_iwmmxt_shift(insn, 0xff))
2146 return 1;
2147 switch ((insn >> 22) & 3) {
2148 case 0:
2149 return 1;
2150 case 1:
2151 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2152 break;
2153 case 2:
2154 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2155 break;
2156 case 3:
2157 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2158 break;
2159 }
2160 gen_op_iwmmxt_movq_wRn_M0(wrd);
2161 gen_op_iwmmxt_set_mup();
2162 gen_op_iwmmxt_set_cup();
2163 break;
2164 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2165 case 0x114: case 0x514: case 0x914: case 0xd14:
2166 wrd = (insn >> 12) & 0xf;
2167 rd0 = (insn >> 16) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 if (gen_iwmmxt_shift(insn, 0xff))
2170 return 1;
2171 switch ((insn >> 22) & 3) {
2172 case 0:
2173 return 1;
2174 case 1:
2175 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2176 break;
2177 case 2:
2178 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2179 break;
2180 case 3:
2181 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2182 break;
2183 }
2184 gen_op_iwmmxt_movq_wRn_M0(wrd);
2185 gen_op_iwmmxt_set_mup();
2186 gen_op_iwmmxt_set_cup();
2187 break;
2188 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2189 case 0x314: case 0x714: case 0xb14: case 0xf14:
2190 wrd = (insn >> 12) & 0xf;
2191 rd0 = (insn >> 16) & 0xf;
2192 gen_op_iwmmxt_movq_M0_wRn(rd0);
2193 switch ((insn >> 22) & 3) {
2194 case 0:
2195 return 1;
2196 case 1:
2197 if (gen_iwmmxt_shift(insn, 0xf))
2198 return 1;
2199 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2200 break;
2201 case 2:
2202 if (gen_iwmmxt_shift(insn, 0x1f))
2203 return 1;
2204 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2205 break;
2206 case 3:
2207 if (gen_iwmmxt_shift(insn, 0x3f))
2208 return 1;
2209 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2210 break;
2211 }
2212 gen_op_iwmmxt_movq_wRn_M0(wrd);
2213 gen_op_iwmmxt_set_mup();
2214 gen_op_iwmmxt_set_cup();
2215 break;
2216 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2217 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2218 wrd = (insn >> 12) & 0xf;
2219 rd0 = (insn >> 16) & 0xf;
2220 rd1 = (insn >> 0) & 0xf;
2221 gen_op_iwmmxt_movq_M0_wRn(rd0);
2222 switch ((insn >> 22) & 3) {
2223 case 0:
2224 if (insn & (1 << 21))
2225 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2226 else
2227 gen_op_iwmmxt_minub_M0_wRn(rd1);
2228 break;
2229 case 1:
2230 if (insn & (1 << 21))
2231 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2232 else
2233 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2234 break;
2235 case 2:
2236 if (insn & (1 << 21))
2237 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2238 else
2239 gen_op_iwmmxt_minul_M0_wRn(rd1);
2240 break;
2241 case 3:
2242 return 1;
2243 }
2244 gen_op_iwmmxt_movq_wRn_M0(wrd);
2245 gen_op_iwmmxt_set_mup();
2246 break;
2247 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2248 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2249 wrd = (insn >> 12) & 0xf;
2250 rd0 = (insn >> 16) & 0xf;
2251 rd1 = (insn >> 0) & 0xf;
2252 gen_op_iwmmxt_movq_M0_wRn(rd0);
2253 switch ((insn >> 22) & 3) {
2254 case 0:
2255 if (insn & (1 << 21))
2256 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2257 else
2258 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2259 break;
2260 case 1:
2261 if (insn & (1 << 21))
2262 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2263 else
2264 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2265 break;
2266 case 2:
2267 if (insn & (1 << 21))
2268 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2269 else
2270 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2271 break;
2272 case 3:
2273 return 1;
2274 }
2275 gen_op_iwmmxt_movq_wRn_M0(wrd);
2276 gen_op_iwmmxt_set_mup();
2277 break;
2278 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2279 case 0x402: case 0x502: case 0x602: case 0x702:
2280 wrd = (insn >> 12) & 0xf;
2281 rd0 = (insn >> 16) & 0xf;
2282 rd1 = (insn >> 0) & 0xf;
2283 gen_op_iwmmxt_movq_M0_wRn(rd0);
2284 gen_op_movl_T0_im((insn >> 20) & 3);
2285 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2286 gen_op_iwmmxt_movq_wRn_M0(wrd);
2287 gen_op_iwmmxt_set_mup();
2288 break;
2289 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2290 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2291 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2292 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2293 wrd = (insn >> 12) & 0xf;
2294 rd0 = (insn >> 16) & 0xf;
2295 rd1 = (insn >> 0) & 0xf;
2296 gen_op_iwmmxt_movq_M0_wRn(rd0);
2297 switch ((insn >> 20) & 0xf) {
2298 case 0x0:
2299 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2300 break;
2301 case 0x1:
2302 gen_op_iwmmxt_subub_M0_wRn(rd1);
2303 break;
2304 case 0x3:
2305 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2306 break;
2307 case 0x4:
2308 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2309 break;
2310 case 0x5:
2311 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2312 break;
2313 case 0x7:
2314 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2315 break;
2316 case 0x8:
2317 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2318 break;
2319 case 0x9:
2320 gen_op_iwmmxt_subul_M0_wRn(rd1);
2321 break;
2322 case 0xb:
2323 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2324 break;
2325 default:
2326 return 1;
2327 }
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 gen_op_iwmmxt_set_cup();
2331 break;
2332 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2333 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2334 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2335 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2336 wrd = (insn >> 12) & 0xf;
2337 rd0 = (insn >> 16) & 0xf;
2338 gen_op_iwmmxt_movq_M0_wRn(rd0);
2339 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2340 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
2341 gen_op_iwmmxt_movq_wRn_M0(wrd);
2342 gen_op_iwmmxt_set_mup();
2343 gen_op_iwmmxt_set_cup();
2344 break;
2345 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2346 case 0x418: case 0x518: case 0x618: case 0x718:
2347 case 0x818: case 0x918: case 0xa18: case 0xb18:
2348 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2349 wrd = (insn >> 12) & 0xf;
2350 rd0 = (insn >> 16) & 0xf;
2351 rd1 = (insn >> 0) & 0xf;
2352 gen_op_iwmmxt_movq_M0_wRn(rd0);
2353 switch ((insn >> 20) & 0xf) {
2354 case 0x0:
2355 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2356 break;
2357 case 0x1:
2358 gen_op_iwmmxt_addub_M0_wRn(rd1);
2359 break;
2360 case 0x3:
2361 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2362 break;
2363 case 0x4:
2364 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2365 break;
2366 case 0x5:
2367 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2368 break;
2369 case 0x7:
2370 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2371 break;
2372 case 0x8:
2373 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2374 break;
2375 case 0x9:
2376 gen_op_iwmmxt_addul_M0_wRn(rd1);
2377 break;
2378 case 0xb:
2379 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2380 break;
2381 default:
2382 return 1;
2383 }
2384 gen_op_iwmmxt_movq_wRn_M0(wrd);
2385 gen_op_iwmmxt_set_mup();
2386 gen_op_iwmmxt_set_cup();
2387 break;
2388 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2389 case 0x408: case 0x508: case 0x608: case 0x708:
2390 case 0x808: case 0x908: case 0xa08: case 0xb08:
2391 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2392 wrd = (insn >> 12) & 0xf;
2393 rd0 = (insn >> 16) & 0xf;
2394 rd1 = (insn >> 0) & 0xf;
2395 gen_op_iwmmxt_movq_M0_wRn(rd0);
2396 if (!(insn & (1 << 20)))
2397 return 1;
2398 switch ((insn >> 22) & 3) {
2399 case 0:
2400 return 1;
2401 case 1:
2402 if (insn & (1 << 21))
2403 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2404 else
2405 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2406 break;
2407 case 2:
2408 if (insn & (1 << 21))
2409 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2410 else
2411 gen_op_iwmmxt_packul_M0_wRn(rd1);
2412 break;
2413 case 3:
2414 if (insn & (1 << 21))
2415 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2416 else
2417 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2418 break;
2419 }
2420 gen_op_iwmmxt_movq_wRn_M0(wrd);
2421 gen_op_iwmmxt_set_mup();
2422 gen_op_iwmmxt_set_cup();
2423 break;
2424 case 0x201: case 0x203: case 0x205: case 0x207:
2425 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2426 case 0x211: case 0x213: case 0x215: case 0x217:
2427 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2428 wrd = (insn >> 5) & 0xf;
2429 rd0 = (insn >> 12) & 0xf;
2430 rd1 = (insn >> 0) & 0xf;
2431 if (rd0 == 0xf || rd1 == 0xf)
2432 return 1;
2433 gen_op_iwmmxt_movq_M0_wRn(wrd);
2434 switch ((insn >> 16) & 0xf) {
2435 case 0x0: /* TMIA */
2436 gen_movl_T0_reg(s, rd0);
2437 gen_movl_T1_reg(s, rd1);
2438 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2439 break;
2440 case 0x8: /* TMIAPH */
2441 gen_movl_T0_reg(s, rd0);
2442 gen_movl_T1_reg(s, rd1);
2443 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2444 break;
2445 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2446 gen_movl_T1_reg(s, rd0);
2447 if (insn & (1 << 16))
2448 gen_op_shrl_T1_im(16);
2449 gen_op_movl_T0_T1();
2450 gen_movl_T1_reg(s, rd1);
2451 if (insn & (1 << 17))
2452 gen_op_shrl_T1_im(16);
2453 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2454 break;
2455 default:
2456 return 1;
2457 }
2458 gen_op_iwmmxt_movq_wRn_M0(wrd);
2459 gen_op_iwmmxt_set_mup();
2460 break;
2461 default:
2462 return 1;
2463 }
2464
2465 return 0;
2466 }
2467
2468 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2469 (ie. an undefined instruction). */
2470 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2471 {
2472 int acc, rd0, rd1, rdhi, rdlo;
2473
2474 if ((insn & 0x0ff00f10) == 0x0e200010) {
2475 /* Multiply with Internal Accumulate Format */
2476 rd0 = (insn >> 12) & 0xf;
2477 rd1 = insn & 0xf;
2478 acc = (insn >> 5) & 7;
2479
2480 if (acc != 0)
2481 return 1;
2482
2483 switch ((insn >> 16) & 0xf) {
2484 case 0x0: /* MIA */
2485 gen_movl_T0_reg(s, rd0);
2486 gen_movl_T1_reg(s, rd1);
2487 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2488 break;
2489 case 0x8: /* MIAPH */
2490 gen_movl_T0_reg(s, rd0);
2491 gen_movl_T1_reg(s, rd1);
2492 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2493 break;
2494 case 0xc: /* MIABB */
2495 case 0xd: /* MIABT */
2496 case 0xe: /* MIATB */
2497 case 0xf: /* MIATT */
2498 gen_movl_T1_reg(s, rd0);
2499 if (insn & (1 << 16))
2500 gen_op_shrl_T1_im(16);
2501 gen_op_movl_T0_T1();
2502 gen_movl_T1_reg(s, rd1);
2503 if (insn & (1 << 17))
2504 gen_op_shrl_T1_im(16);
2505 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2506 break;
2507 default:
2508 return 1;
2509 }
2510
2511 gen_op_iwmmxt_movq_wRn_M0(acc);
2512 return 0;
2513 }
2514
2515 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2516 /* Internal Accumulator Access Format */
2517 rdhi = (insn >> 16) & 0xf;
2518 rdlo = (insn >> 12) & 0xf;
2519 acc = insn & 7;
2520
2521 if (acc != 0)
2522 return 1;
2523
2524 if (insn & ARM_CP_RW_BIT) { /* MRA */
2525 gen_iwmmxt_movl_T0_T1_wRn(acc);
2526 gen_movl_reg_T0(s, rdlo);
2527 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2528 gen_op_andl_T0_T1();
2529 gen_movl_reg_T0(s, rdhi);
2530 } else { /* MAR */
2531 gen_movl_T0_reg(s, rdlo);
2532 gen_movl_T1_reg(s, rdhi);
2533 gen_iwmmxt_movl_wRn_T0_T1(acc);
2534 }
2535 return 0;
2536 }
2537
2538 return 1;
2539 }
2540
2541 /* Disassemble system coprocessor instruction. Return nonzero if
2542 instruction is not defined. */
2543 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2544 {
2545 TCGv tmp;
2546 uint32_t rd = (insn >> 12) & 0xf;
2547 uint32_t cp = (insn >> 8) & 0xf;
2548 if (IS_USER(s)) {
2549 return 1;
2550 }
2551
2552 if (insn & ARM_CP_RW_BIT) {
2553 if (!env->cp[cp].cp_read)
2554 return 1;
2555 gen_set_pc_im(s->pc);
2556 tmp = new_tmp();
2557 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2558 store_reg(s, rd, tmp);
2559 } else {
2560 if (!env->cp[cp].cp_write)
2561 return 1;
2562 gen_set_pc_im(s->pc);
2563 tmp = load_reg(s, rd);
2564 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
2565 dead_tmp(tmp);
2566 }
2567 return 0;
2568 }
2569
2570 static int cp15_user_ok(uint32_t insn)
2571 {
2572 int cpn = (insn >> 16) & 0xf;
2573 int cpm = insn & 0xf;
2574 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2575
2576 if (cpn == 13 && cpm == 0) {
2577 /* TLS register. */
2578 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2579 return 1;
2580 }
2581 if (cpn == 7) {
2582 /* ISB, DSB, DMB. */
2583 if ((cpm == 5 && op == 4)
2584 || (cpm == 10 && (op == 4 || op == 5)))
2585 return 1;
2586 }
2587 return 0;
2588 }
2589
2590 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2591 instruction is not defined. */
2592 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2593 {
2594 uint32_t rd;
2595 TCGv tmp;
2596
2597 /* M profile cores use memory mapped registers instead of cp15. */
2598 if (arm_feature(env, ARM_FEATURE_M))
2599 return 1;
2600
2601 if ((insn & (1 << 25)) == 0) {
2602 if (insn & (1 << 20)) {
2603 /* mrrc */
2604 return 1;
2605 }
2606 /* mcrr. Used for block cache operations, so implement as no-op. */
2607 return 0;
2608 }
2609 if ((insn & (1 << 4)) == 0) {
2610 /* cdp */
2611 return 1;
2612 }
2613 if (IS_USER(s) && !cp15_user_ok(insn)) {
2614 return 1;
2615 }
2616 if ((insn & 0x0fff0fff) == 0x0e070f90
2617 || (insn & 0x0fff0fff) == 0x0e070f58) {
2618 /* Wait for interrupt. */
2619 gen_set_pc_im(s->pc);
2620 s->is_jmp = DISAS_WFI;
2621 return 0;
2622 }
2623 rd = (insn >> 12) & 0xf;
2624 if (insn & ARM_CP_RW_BIT) {
2625 tmp = new_tmp();
2626 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
2627 /* If the destination register is r15 then sets condition codes. */
2628 if (rd != 15)
2629 store_reg(s, rd, tmp);
2630 else
2631 dead_tmp(tmp);
2632 } else {
2633 tmp = load_reg(s, rd);
2634 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2635 dead_tmp(tmp);
2636 /* Normally we would always end the TB here, but Linux
2637 * arch/arm/mach-pxa/sleep.S expects two instructions following
2638 * an MMU enable to execute from cache. Imitate this behaviour. */
2639 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2640 (insn & 0x0fff0fff) != 0x0e010f10)
2641 gen_lookup_tb(s);
2642 }
2643 return 0;
2644 }
2645
2646 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2647 #define VFP_SREG(insn, bigbit, smallbit) \
2648 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2649 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2650 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2651 reg = (((insn) >> (bigbit)) & 0x0f) \
2652 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2653 } else { \
2654 if (insn & (1 << (smallbit))) \
2655 return 1; \
2656 reg = ((insn) >> (bigbit)) & 0x0f; \
2657 }} while (0)
2658
2659 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2660 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2661 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2662 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2663 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2664 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2665
2666 /* Move between integer and VFP cores. */
2667 static TCGv gen_vfp_mrs(void)
2668 {
2669 TCGv tmp = new_tmp();
2670 tcg_gen_mov_i32(tmp, cpu_F0s);
2671 return tmp;
2672 }
2673
2674 static void gen_vfp_msr(TCGv tmp)
2675 {
2676 tcg_gen_mov_i32(cpu_F0s, tmp);
2677 dead_tmp(tmp);
2678 }
2679
2680 static inline int
2681 vfp_enabled(CPUState * env)
2682 {
2683 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2684 }
2685
2686 static void gen_neon_dup_u8(TCGv var, int shift)
2687 {
2688 TCGv tmp = new_tmp();
2689 if (shift)
2690 tcg_gen_shri_i32(var, var, shift);
2691 tcg_gen_ext8u_i32(var, var);
2692 tcg_gen_shli_i32(tmp, var, 8);
2693 tcg_gen_or_i32(var, var, tmp);
2694 tcg_gen_shli_i32(tmp, var, 16);
2695 tcg_gen_or_i32(var, var, tmp);
2696 dead_tmp(tmp);
2697 }
2698
2699 static void gen_neon_dup_low16(TCGv var)
2700 {
2701 TCGv tmp = new_tmp();
2702 tcg_gen_ext16u_i32(var, var);
2703 tcg_gen_shli_i32(tmp, var, 16);
2704 tcg_gen_or_i32(var, var, tmp);
2705 dead_tmp(tmp);
2706 }
2707
2708 static void gen_neon_dup_high16(TCGv var)
2709 {
2710 TCGv tmp = new_tmp();
2711 tcg_gen_andi_i32(var, var, 0xffff0000);
2712 tcg_gen_shri_i32(tmp, var, 16);
2713 tcg_gen_or_i32(var, var, tmp);
2714 dead_tmp(tmp);
2715 }
2716
2717 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2718 (ie. an undefined instruction). */
2719 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2720 {
2721 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2722 int dp, veclen;
2723 TCGv tmp;
2724 TCGv tmp2;
2725
2726 if (!arm_feature(env, ARM_FEATURE_VFP))
2727 return 1;
2728
2729 if (!vfp_enabled(env)) {
2730 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2731 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2732 return 1;
2733 rn = (insn >> 16) & 0xf;
2734 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2735 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2736 return 1;
2737 }
2738 dp = ((insn & 0xf00) == 0xb00);
2739 switch ((insn >> 24) & 0xf) {
2740 case 0xe:
2741 if (insn & (1 << 4)) {
2742 /* single register transfer */
2743 rd = (insn >> 12) & 0xf;
2744 if (dp) {
2745 int size;
2746 int pass;
2747
2748 VFP_DREG_N(rn, insn);
2749 if (insn & 0xf)
2750 return 1;
2751 if (insn & 0x00c00060
2752 && !arm_feature(env, ARM_FEATURE_NEON))
2753 return 1;
2754
2755 pass = (insn >> 21) & 1;
2756 if (insn & (1 << 22)) {
2757 size = 0;
2758 offset = ((insn >> 5) & 3) * 8;
2759 } else if (insn & (1 << 5)) {
2760 size = 1;
2761 offset = (insn & (1 << 6)) ? 16 : 0;
2762 } else {
2763 size = 2;
2764 offset = 0;
2765 }
2766 if (insn & ARM_CP_RW_BIT) {
2767 /* vfp->arm */
2768 tmp = neon_load_reg(rn, pass);
2769 switch (size) {
2770 case 0:
2771 if (offset)
2772 tcg_gen_shri_i32(tmp, tmp, offset);
2773 if (insn & (1 << 23))
2774 gen_uxtb(tmp);
2775 else
2776 gen_sxtb(tmp);
2777 break;
2778 case 1:
2779 if (insn & (1 << 23)) {
2780 if (offset) {
2781 tcg_gen_shri_i32(tmp, tmp, 16);
2782 } else {
2783 gen_uxth(tmp);
2784 }
2785 } else {
2786 if (offset) {
2787 tcg_gen_sari_i32(tmp, tmp, 16);
2788 } else {
2789 gen_sxth(tmp);
2790 }
2791 }
2792 break;
2793 case 2:
2794 break;
2795 }
2796 store_reg(s, rd, tmp);
2797 } else {
2798 /* arm->vfp */
2799 tmp = load_reg(s, rd);
2800 if (insn & (1 << 23)) {
2801 /* VDUP */
2802 if (size == 0) {
2803 gen_neon_dup_u8(tmp, 0);
2804 } else if (size == 1) {
2805 gen_neon_dup_low16(tmp);
2806 }
2807 tmp2 = new_tmp();
2808 tcg_gen_mov_i32(tmp2, tmp);
2809 neon_store_reg(rn, 0, tmp2);
2810 neon_store_reg(rn, 1, tmp);
2811 } else {
2812 /* VMOV */
2813 switch (size) {
2814 case 0:
2815 tmp2 = neon_load_reg(rn, pass);
2816 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2817 dead_tmp(tmp2);
2818 break;
2819 case 1:
2820 tmp2 = neon_load_reg(rn, pass);
2821 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2822 dead_tmp(tmp2);
2823 break;
2824 case 2:
2825 break;
2826 }
2827 neon_store_reg(rn, pass, tmp);
2828 }
2829 }
2830 } else { /* !dp */
2831 if ((insn & 0x6f) != 0x00)
2832 return 1;
2833 rn = VFP_SREG_N(insn);
2834 if (insn & ARM_CP_RW_BIT) {
2835 /* vfp->arm */
2836 if (insn & (1 << 21)) {
2837 /* system register */
2838 rn >>= 1;
2839
2840 switch (rn) {
2841 case ARM_VFP_FPSID:
2842 /* VFP2 allows access to FSID from userspace.
2843 VFP3 restricts all id registers to privileged
2844 accesses. */
2845 if (IS_USER(s)
2846 && arm_feature(env, ARM_FEATURE_VFP3))
2847 return 1;
2848 tmp = load_cpu_field(vfp.xregs[rn]);
2849 break;
2850 case ARM_VFP_FPEXC:
2851 if (IS_USER(s))
2852 return 1;
2853 tmp = load_cpu_field(vfp.xregs[rn]);
2854 break;
2855 case ARM_VFP_FPINST:
2856 case ARM_VFP_FPINST2:
2857 /* Not present in VFP3. */
2858 if (IS_USER(s)
2859 || arm_feature(env, ARM_FEATURE_VFP3))
2860 return 1;
2861 tmp = load_cpu_field(vfp.xregs[rn]);
2862 break;
2863 case ARM_VFP_FPSCR:
2864 if (rd == 15) {
2865 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2866 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2867 } else {
2868 tmp = new_tmp();
2869 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2870 }
2871 break;
2872 case ARM_VFP_MVFR0:
2873 case ARM_VFP_MVFR1:
2874 if (IS_USER(s)
2875 || !arm_feature(env, ARM_FEATURE_VFP3))
2876 return 1;
2877 tmp = load_cpu_field(vfp.xregs[rn]);
2878 break;
2879 default:
2880 return 1;
2881 }
2882 } else {
2883 gen_mov_F0_vreg(0, rn);
2884 tmp = gen_vfp_mrs();
2885 }
2886 if (rd == 15) {
2887 /* Set the 4 flag bits in the CPSR. */
2888 gen_set_nzcv(tmp);
2889 dead_tmp(tmp);
2890 } else {
2891 store_reg(s, rd, tmp);
2892 }
2893 } else {
2894 /* arm->vfp */
2895 tmp = load_reg(s, rd);
2896 if (insn & (1 << 21)) {
2897 rn >>= 1;
2898 /* system register */
2899 switch (rn) {
2900 case ARM_VFP_FPSID:
2901 case ARM_VFP_MVFR0:
2902 case ARM_VFP_MVFR1:
2903 /* Writes are ignored. */
2904 break;
2905 case ARM_VFP_FPSCR:
2906 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2907 dead_tmp(tmp);
2908 gen_lookup_tb(s);
2909 break;
2910 case ARM_VFP_FPEXC:
2911 if (IS_USER(s))
2912 return 1;
2913 store_cpu_field(tmp, vfp.xregs[rn]);
2914 gen_lookup_tb(s);
2915 break;
2916 case ARM_VFP_FPINST:
2917 case ARM_VFP_FPINST2:
2918 store_cpu_field(tmp, vfp.xregs[rn]);
2919 break;
2920 default:
2921 return 1;
2922 }
2923 } else {
2924 gen_vfp_msr(tmp);
2925 gen_mov_vreg_F0(0, rn);
2926 }
2927 }
2928 }
2929 } else {
2930 /* data processing */
2931 /* The opcode is in bits 23, 21, 20 and 6. */
2932 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2933 if (dp) {
2934 if (op == 15) {
2935 /* rn is opcode */
2936 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2937 } else {
2938 /* rn is register number */
2939 VFP_DREG_N(rn, insn);
2940 }
2941
2942 if (op == 15 && (rn == 15 || rn > 17)) {
2943 /* Integer or single precision destination. */
2944 rd = VFP_SREG_D(insn);
2945 } else {
2946 VFP_DREG_D(rd, insn);
2947 }
2948
2949 if (op == 15 && (rn == 16 || rn == 17)) {
2950 /* Integer source. */
2951 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2952 } else {
2953 VFP_DREG_M(rm, insn);
2954 }
2955 } else {
2956 rn = VFP_SREG_N(insn);
2957 if (op == 15 && rn == 15) {
2958 /* Double precision destination. */
2959 VFP_DREG_D(rd, insn);
2960 } else {
2961 rd = VFP_SREG_D(insn);
2962 }
2963 rm = VFP_SREG_M(insn);
2964 }
2965
2966 veclen = env->vfp.vec_len;
2967 if (op == 15 && rn > 3)
2968 veclen = 0;
2969
2970 /* Shut up compiler warnings. */
2971 delta_m = 0;
2972 delta_d = 0;
2973 bank_mask = 0;
2974
2975 if (veclen > 0) {
2976 if (dp)
2977 bank_mask = 0xc;
2978 else
2979 bank_mask = 0x18;
2980
2981 /* Figure out what type of vector operation this is. */
2982 if ((rd & bank_mask) == 0) {
2983 /* scalar */
2984 veclen = 0;
2985 } else {
2986 if (dp)
2987 delta_d = (env->vfp.vec_stride >> 1) + 1;
2988 else
2989 delta_d = env->vfp.vec_stride + 1;
2990
2991 if ((rm & bank_mask) == 0) {
2992 /* mixed scalar/vector */
2993 delta_m = 0;
2994 } else {
2995 /* vector */
2996 delta_m = delta_d;
2997 }
2998 }
2999 }
3000
3001 /* Load the initial operands. */
3002 if (op == 15) {
3003 switch (rn) {
3004 case 16:
3005 case 17:
3006 /* Integer source */
3007 gen_mov_F0_vreg(0, rm);
3008 break;
3009 case 8:
3010 case 9:
3011 /* Compare */
3012 gen_mov_F0_vreg(dp, rd);
3013 gen_mov_F1_vreg(dp, rm);
3014 break;
3015 case 10:
3016 case 11:
3017 /* Compare with zero */
3018 gen_mov_F0_vreg(dp, rd);
3019 gen_vfp_F1_ld0(dp);
3020 break;
3021 case 20:
3022 case 21:
3023 case 22:
3024 case 23:
3025 /* Source and destination the same. */
3026 gen_mov_F0_vreg(dp, rd);
3027 break;
3028 default:
3029 /* One source operand. */
3030 gen_mov_F0_vreg(dp, rm);
3031 break;
3032 }
3033 } else {
3034 /* Two source operands. */
3035 gen_mov_F0_vreg(dp, rn);
3036 gen_mov_F1_vreg(dp, rm);
3037 }
3038
3039 for (;;) {
3040 /* Perform the calculation. */
3041 switch (op) {
3042 case 0: /* mac: fd + (fn * fm) */
3043 gen_vfp_mul(dp);
3044 gen_mov_F1_vreg(dp, rd);
3045 gen_vfp_add(dp);
3046 break;
3047 case 1: /* nmac: fd - (fn * fm) */
3048 gen_vfp_mul(dp);
3049 gen_vfp_neg(dp);
3050 gen_mov_F1_vreg(dp, rd);
3051 gen_vfp_add(dp);
3052 break;
3053 case 2: /* msc: -fd + (fn * fm) */
3054 gen_vfp_mul(dp);
3055 gen_mov_F1_vreg(dp, rd);
3056 gen_vfp_sub(dp);
3057 break;
3058 case 3: /* nmsc: -fd - (fn * fm) */
3059 gen_vfp_mul(dp);
3060 gen_mov_F1_vreg(dp, rd);
3061 gen_vfp_add(dp);
3062 gen_vfp_neg(dp);
3063 break;
3064 case 4: /* mul: fn * fm */
3065 gen_vfp_mul(dp);
3066 break;
3067 case 5: /* nmul: -(fn * fm) */
3068 gen_vfp_mul(dp);
3069 gen_vfp_neg(dp);
3070 break;
3071 case 6: /* add: fn + fm */
3072 gen_vfp_add(dp);
3073 break;
3074 case 7: /* sub: fn - fm */
3075 gen_vfp_sub(dp);
3076 break;
3077 case 8: /* div: fn / fm */
3078 gen_vfp_div(dp);
3079 break;
3080 case 14: /* fconst */
3081 if (!arm_feature(env, ARM_FEATURE_VFP3))
3082 return 1;
3083
3084 n = (insn << 12) & 0x80000000;
3085 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3086 if (dp) {
3087 if (i & 0x40)
3088 i |= 0x3f80;
3089 else
3090 i |= 0x4000;
3091 n |= i << 16;
3092 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3093 } else {
3094 if (i & 0x40)
3095 i |= 0x780;
3096 else
3097 i |= 0x800;
3098 n |= i << 19;
3099 tcg_gen_movi_i32(cpu_F0s, n);
3100 }
3101 break;
3102 case 15: /* extension space */
3103 switch (rn) {
3104 case 0: /* cpy */
3105 /* no-op */
3106 break;
3107 case 1: /* abs */
3108 gen_vfp_abs(dp);
3109 break;
3110 case 2: /* neg */
3111 gen_vfp_neg(dp);
3112 break;
3113 case 3: /* sqrt */
3114 gen_vfp_sqrt(dp);
3115 break;
3116 case 8: /* cmp */
3117 gen_vfp_cmp(dp);
3118 break;
3119 case 9: /* cmpe */
3120 gen_vfp_cmpe(dp);
3121 break;
3122 case 10: /* cmpz */
3123 gen_vfp_cmp(dp);
3124 break;
3125 case 11: /* cmpez */
3126 gen_vfp_F1_ld0(dp);
3127 gen_vfp_cmpe(dp);
3128 break;
3129 case 15: /* single<->double conversion */
3130 if (dp)
3131 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3132 else
3133 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3134 break;
3135 case 16: /* fuito */
3136 gen_vfp_uito(dp);
3137 break;
3138 case 17: /* fsito */
3139 gen_vfp_sito(dp);
3140 break;
3141 case 20: /* fshto */
3142 if (!arm_feature(env, ARM_FEATURE_VFP3))
3143 return 1;
3144 gen_vfp_shto(dp, rm);
3145 break;
3146 case 21: /* fslto */
3147 if (!arm_feature(env, ARM_FEATURE_VFP3))
3148 return 1;
3149 gen_vfp_slto(dp, rm);
3150 break;
3151 case 22: /* fuhto */
3152 if (!arm_feature(env, ARM_FEATURE_VFP3))
3153 return 1;
3154 gen_vfp_uhto(dp, rm);
3155 break;
3156 case 23: /* fulto */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3158 return 1;
3159 gen_vfp_ulto(dp, rm);
3160 break;
3161 case 24: /* ftoui */
3162 gen_vfp_toui(dp);
3163 break;
3164 case 25: /* ftouiz */
3165 gen_vfp_touiz(dp);
3166 break;
3167 case 26: /* ftosi */
3168 gen_vfp_tosi(dp);
3169 break;
3170 case 27: /* ftosiz */
3171 gen_vfp_tosiz(dp);
3172 break;
3173 case 28: /* ftosh */
3174 if (!arm_feature(env, ARM_FEATURE_VFP3))
3175 return 1;
3176 gen_vfp_tosh(dp, rm);
3177 break;
3178 case 29: /* ftosl */
3179 if (!arm_feature(env, ARM_FEATURE_VFP3))
3180 return 1;
3181 gen_vfp_tosl(dp, rm);
3182 break;
3183 case 30: /* ftouh */
3184 if (!arm_feature(env, ARM_FEATURE_VFP3))
3185 return 1;
3186 gen_vfp_touh(dp, rm);
3187 break;
3188 case 31: /* ftoul */
3189 if (!arm_feature(env, ARM_FEATURE_VFP3))
3190 return 1;
3191 gen_vfp_toul(dp, rm);
3192 break;
3193 default: /* undefined */
3194 printf ("rn:%d\n", rn);
3195 return 1;
3196 }
3197 break;
3198 default: /* undefined */
3199 printf ("op:%d\n", op);
3200 return 1;
3201 }
3202
3203 /* Write back the result. */
3204 if (op == 15 && (rn >= 8 && rn <= 11))
3205 ; /* Comparison, do nothing. */
3206 else if (op == 15 && rn > 17)
3207 /* Integer result. */
3208 gen_mov_vreg_F0(0, rd);
3209 else if (op == 15 && rn == 15)
3210 /* conversion */
3211 gen_mov_vreg_F0(!dp, rd);
3212 else
3213 gen_mov_vreg_F0(dp, rd);
3214
3215 /* break out of the loop if we have finished */
3216 if (veclen == 0)
3217 break;
3218
3219 if (op == 15 && delta_m == 0) {
3220 /* single source one-many */
3221 while (veclen--) {
3222 rd = ((rd + delta_d) & (bank_mask - 1))
3223 | (rd & bank_mask);
3224 gen_mov_vreg_F0(dp, rd);
3225 }
3226 break;
3227 }
3228 /* Setup the next operands. */
3229 veclen--;
3230 rd = ((rd + delta_d) & (bank_mask - 1))
3231 | (rd & bank_mask);
3232
3233 if (op == 15) {
3234 /* One source operand. */
3235 rm = ((rm + delta_m) & (bank_mask - 1))
3236 | (rm & bank_mask);
3237 gen_mov_F0_vreg(dp, rm);
3238 } else {
3239 /* Two source operands. */
3240 rn = ((rn + delta_d) & (bank_mask - 1))
3241 | (rn & bank_mask);
3242 gen_mov_F0_vreg(dp, rn);
3243 if (delta_m) {
3244 rm = ((rm + delta_m) & (bank_mask - 1))
3245 | (rm & bank_mask);
3246 gen_mov_F1_vreg(dp, rm);
3247 }
3248 }
3249 }
3250 }
3251 break;
3252 case 0xc:
3253 case 0xd:
3254 if (dp && (insn & 0x03e00000) == 0x00400000) {
3255 /* two-register transfer */
3256 rn = (insn >> 16) & 0xf;
3257 rd = (insn >> 12) & 0xf;
3258 if (dp) {
3259 VFP_DREG_M(rm, insn);
3260 } else {
3261 rm = VFP_SREG_M(insn);
3262 }
3263
3264 if (insn & ARM_CP_RW_BIT) {
3265 /* vfp->arm */
3266 if (dp) {
3267 gen_mov_F0_vreg(0, rm * 2);
3268 tmp = gen_vfp_mrs();
3269 store_reg(s, rd, tmp);
3270 gen_mov_F0_vreg(0, rm * 2 + 1);
3271 tmp = gen_vfp_mrs();
3272 store_reg(s, rn, tmp);
3273 } else {
3274 gen_mov_F0_vreg(0, rm);
3275 tmp = gen_vfp_mrs();
3276 store_reg(s, rn, tmp);
3277 gen_mov_F0_vreg(0, rm + 1);
3278 tmp = gen_vfp_mrs();
3279 store_reg(s, rd, tmp);
3280 }
3281 } else {
3282 /* arm->vfp */
3283 if (dp) {
3284 tmp = load_reg(s, rd);
3285 gen_vfp_msr(tmp);
3286 gen_mov_vreg_F0(0, rm * 2);
3287 tmp = load_reg(s, rn);
3288 gen_vfp_msr(tmp);
3289 gen_mov_vreg_F0(0, rm * 2 + 1);
3290 } else {
3291 tmp = load_reg(s, rn);
3292 gen_vfp_msr(tmp);
3293 gen_mov_vreg_F0(0, rm);
3294 tmp = load_reg(s, rd);
3295 gen_vfp_msr(tmp);
3296 gen_mov_vreg_F0(0, rm + 1);
3297 }
3298 }
3299 } else {
3300 /* Load/store */
3301 rn = (insn >> 16) & 0xf;
3302 if (dp)
3303 VFP_DREG_D(rd, insn);
3304 else
3305 rd = VFP_SREG_D(insn);
3306 if (s->thumb && rn == 15) {
3307 gen_op_movl_T1_im(s->pc & ~2);
3308 } else {
3309 gen_movl_T1_reg(s, rn);
3310 }
3311 if ((insn & 0x01200000) == 0x01000000) {
3312 /* Single load/store */
3313 offset = (insn & 0xff) << 2;
3314 if ((insn & (1 << 23)) == 0)
3315 offset = -offset;
3316 gen_op_addl_T1_im(offset);
3317 if (insn & (1 << 20)) {
3318 gen_vfp_ld(s, dp);
3319 gen_mov_vreg_F0(dp, rd);
3320 } else {
3321 gen_mov_F0_vreg(dp, rd);
3322 gen_vfp_st(s, dp);
3323 }
3324 } else {
3325 /* load/store multiple */
3326 if (dp)
3327 n = (insn >> 1) & 0x7f;
3328 else
3329 n = insn & 0xff;
3330
3331 if (insn & (1 << 24)) /* pre-decrement */
3332 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3333
3334 if (dp)
3335 offset = 8;
3336 else
3337 offset = 4;
3338 for (i = 0; i < n; i++) {
3339 if (insn & ARM_CP_RW_BIT) {
3340 /* load */
3341 gen_vfp_ld(s, dp);
3342 gen_mov_vreg_F0(dp, rd + i);
3343 } else {
3344 /* store */
3345 gen_mov_F0_vreg(dp, rd + i);
3346 gen_vfp_st(s, dp);
3347 }
3348 gen_op_addl_T1_im(offset);
3349 }
3350 if (insn & (1 << 21)) {
3351 /* writeback */
3352 if (insn & (1 << 24))
3353 offset = -offset * n;
3354 else if (dp && (insn & 1))
3355 offset = 4;
3356 else
3357 offset = 0;
3358
3359 if (offset != 0)
3360 gen_op_addl_T1_im(offset);
3361 gen_movl_reg_T1(s, rn);
3362 }
3363 }
3364 }
3365 break;
3366 default:
3367 /* Should never happen. */
3368 return 1;
3369 }
3370 return 0;
3371 }
3372
3373 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3374 {
3375 TranslationBlock *tb;
3376
3377 tb = s->tb;
3378 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3379 tcg_gen_goto_tb(n);
3380 gen_set_pc_im(dest);
3381 tcg_gen_exit_tb((long)tb + n);
3382 } else {
3383 gen_set_pc_im(dest);
3384 tcg_gen_exit_tb(0);
3385 }
3386 }
3387
3388 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3389 {
3390 if (unlikely(s->singlestep_enabled)) {
3391 /* An indirect jump so that we still trigger the debug exception. */
3392 if (s->thumb)
3393 dest |= 1;
3394 gen_bx_im(s, dest);
3395 } else {
3396 gen_goto_tb(s, 0, dest);
3397 s->is_jmp = DISAS_TB_JUMP;
3398 }
3399 }
3400
3401 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3402 {
3403 if (x)
3404 tcg_gen_sari_i32(t0, t0, 16);
3405 else
3406 gen_sxth(t0);
3407 if (y)
3408 tcg_gen_sari_i32(t1, t1, 16);
3409 else
3410 gen_sxth(t1);
3411 tcg_gen_mul_i32(t0, t0, t1);
3412 }
3413
3414 /* Return the mask of PSR bits set by a MSR instruction. */
3415 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3416 uint32_t mask;
3417
3418 mask = 0;
3419 if (flags & (1 << 0))
3420 mask |= 0xff;
3421 if (flags & (1 << 1))
3422 mask |= 0xff00;
3423 if (flags & (1 << 2))
3424 mask |= 0xff0000;
3425 if (flags & (1 << 3))
3426 mask |= 0xff000000;
3427
3428 /* Mask out undefined bits. */
3429 mask &= ~CPSR_RESERVED;
3430 if (!arm_feature(env, ARM_FEATURE_V6))
3431 mask &= ~(CPSR_E | CPSR_GE);
3432 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3433 mask &= ~CPSR_IT;
3434 /* Mask out execution state bits. */
3435 if (!spsr)
3436 mask &= ~CPSR_EXEC;
3437 /* Mask out privileged bits. */
3438 if (IS_USER(s))
3439 mask &= CPSR_USER;
3440 return mask;
3441 }
3442
3443 /* Returns nonzero if access to the PSR is not permitted. */
3444 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3445 {
3446 TCGv tmp;
3447 if (spsr) {
3448 /* ??? This is also undefined in system mode. */
3449 if (IS_USER(s))
3450 return 1;
3451
3452 tmp = load_cpu_field(spsr);
3453 tcg_gen_andi_i32(tmp, tmp, ~mask);
3454 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3455 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3456 store_cpu_field(tmp, spsr);
3457 } else {
3458 gen_set_cpsr(cpu_T[0], mask);
3459 }
3460 gen_lookup_tb(s);
3461 return 0;
3462 }
3463
3464 /* Generate an old-style exception return. */
3465 static void gen_exception_return(DisasContext *s)
3466 {
3467 TCGv tmp;
3468 gen_movl_reg_T0(s, 15);
3469 tmp = load_cpu_field(spsr);
3470 gen_set_cpsr(tmp, 0xffffffff);
3471 dead_tmp(tmp);
3472 s->is_jmp = DISAS_UPDATE;
3473 }
3474
3475 /* Generate a v6 exception return. Marks both values as dead. */
3476 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3477 {
3478 gen_set_cpsr(cpsr, 0xffffffff);
3479 dead_tmp(cpsr);
3480 store_reg(s, 15, pc);
3481 s->is_jmp = DISAS_UPDATE;
3482 }
3483
3484 static inline void
3485 gen_set_condexec (DisasContext *s)
3486 {
3487 if (s->condexec_mask) {
3488 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3489 TCGv tmp = new_tmp();
3490 tcg_gen_movi_i32(tmp, val);
3491 store_cpu_field(tmp, condexec_bits);
3492 }
3493 }
3494
3495 static void gen_nop_hint(DisasContext *s, int val)
3496 {
3497 switch (val) {
3498 case 3: /* wfi */
3499 gen_set_pc_im(s->pc);
3500 s->is_jmp = DISAS_WFI;
3501 break;
3502 case 2: /* wfe */
3503 case 4: /* sev */
3504 /* TODO: Implement SEV and WFE. May help SMP performance. */
3505 default: /* nop */
3506 break;
3507 }
3508 }
3509
3510 /* These macros help make the code more readable when migrating from the
3511 old dyngen helpers. They should probably be removed when
3512 T0/T1 are removed. */
3513 #define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3514 #define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
3515
3516 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3517
3518 static inline int gen_neon_add(int size)
3519 {
3520 switch (size) {
3521 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3522 case 1: gen_helper_neon_add_u16(CPU_T001); break;
3523 case 2: gen_op_addl_T0_T1(); break;
3524 default: return 1;
3525 }
3526 return 0;
3527 }
3528
3529 static inline void gen_neon_rsb(int size)
3530 {
3531 switch (size) {
3532 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3533 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3534 case 2: gen_op_rsbl_T0_T1(); break;
3535 default: return;
3536 }
3537 }
3538
3539 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3540 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3541 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3542 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3543 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3544
3545 /* FIXME: This is wrong. They set the wrong overflow bit. */
3546 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3547 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3548 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3549 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3550
3551 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3552 switch ((size << 1) | u) { \
3553 case 0: \
3554 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3555 break; \
3556 case 1: \
3557 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3558 break; \
3559 case 2: \
3560 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3561 break; \
3562 case 3: \
3563 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3564 break; \
3565 case 4: \
3566 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3567 break; \
3568 case 5: \
3569 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3570 break; \
3571 default: return 1; \
3572 }} while (0)
3573
3574 #define GEN_NEON_INTEGER_OP(name) do { \
3575 switch ((size << 1) | u) { \
3576 case 0: \
3577 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3578 break; \
3579 case 1: \
3580 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3581 break; \
3582 case 2: \
3583 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3584 break; \
3585 case 3: \
3586 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3587 break; \
3588 case 4: \
3589 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3590 break; \
3591 case 5: \
3592 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3593 break; \
3594 default: return 1; \
3595 }} while (0)
3596
3597 static inline void
3598 gen_neon_movl_scratch_T0(int scratch)
3599 {
3600 uint32_t offset;
3601
3602 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3603 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
3604 }
3605
3606 static inline void
3607 gen_neon_movl_scratch_T1(int scratch)
3608 {
3609 uint32_t offset;
3610
3611 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3612 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
3613 }
3614
3615 static inline void
3616 gen_neon_movl_T0_scratch(int scratch)
3617 {
3618 uint32_t offset;
3619
3620 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3621 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
3622 }
3623
3624 static inline void
3625 gen_neon_movl_T1_scratch(int scratch)
3626 {
3627 uint32_t offset;
3628
3629 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3630 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
3631 }
3632
3633 static inline void gen_neon_get_scalar(int size, int reg)
3634 {
3635 if (size == 1) {
3636 NEON_GET_REG(T0, reg >> 1, reg & 1);
3637 } else {
3638 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3639 if (reg & 1)
3640 gen_neon_dup_low16(cpu_T[0]);
3641 else
3642 gen_neon_dup_high16(cpu_T[0]);
3643 }
3644 }
3645
3646 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3647 {
3648 int n;
3649
3650 for (n = 0; n < q + 1; n += 2) {
3651 NEON_GET_REG(T0, reg, n);
3652 NEON_GET_REG(T0, reg, n + n);
3653 switch (size) {
3654 case 0: gen_helper_neon_unzip_u8(); break;
3655 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
3656 case 2: /* no-op */; break;
3657 default: abort();
3658 }
3659 gen_neon_movl_scratch_T0(tmp + n);
3660 gen_neon_movl_scratch_T1(tmp + n + 1);
3661 }
3662 }
3663
3664 static struct {
3665 int nregs;
3666 int interleave;
3667 int spacing;
3668 } neon_ls_element_type[11] = {
3669 {4, 4, 1},
3670 {4, 4, 2},
3671 {4, 1, 1},
3672 {4, 2, 1},
3673 {3, 3, 1},
3674 {3, 3, 2},
3675 {3, 1, 1},
3676 {1, 1, 1},
3677 {2, 2, 1},
3678 {2, 2, 2},
3679 {2, 1, 1}
3680 };
3681
3682 /* Translate a NEON load/store element instruction. Return nonzero if the
3683 instruction is invalid. */
3684 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3685 {
3686 int rd, rn, rm;
3687 int op;
3688 int nregs;
3689 int interleave;
3690 int stride;
3691 int size;
3692 int reg;
3693 int pass;
3694 int load;
3695 int shift;
3696 int n;
3697 TCGv tmp;
3698 TCGv tmp2;
3699
3700 if (!vfp_enabled(env))
3701 return 1;
3702 VFP_DREG_D(rd, insn);
3703 rn = (insn >> 16) & 0xf;
3704 rm = insn & 0xf;
3705 load = (insn & (1 << 21)) != 0;
3706 if ((insn & (1 << 23)) == 0) {
3707 /* Load store all elements. */
3708 op = (insn >> 8) & 0xf;
3709 size = (insn >> 6) & 3;
3710 if (op > 10 || size == 3)
3711 return 1;
3712 nregs = neon_ls_element_type[op].nregs;
3713 interleave = neon_ls_element_type[op].interleave;
3714 gen_movl_T1_reg(s, rn);
3715 stride = (1 << size) * interleave;
3716 for (reg = 0; reg < nregs; reg++) {
3717 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3718 gen_movl_T1_reg(s, rn);
3719 gen_op_addl_T1_im((1 << size) * reg);
3720 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3721 gen_movl_T1_reg(s, rn);
3722 gen_op_addl_T1_im(1 << size);
3723 }
3724 for (pass = 0; pass < 2; pass++) {
3725 if (size == 2) {
3726 if (load) {
3727 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3728 neon_store_reg(rd, pass, tmp);
3729 } else {
3730 tmp = neon_load_reg(rd, pass);
3731 gen_st32(tmp, cpu_T[1], IS_USER(s));
3732 }
3733 gen_op_addl_T1_im(stride);
3734 } else if (size == 1) {
3735 if (load) {
3736 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3737 gen_op_addl_T1_im(stride);
3738 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
3739 gen_op_addl_T1_im(stride);
3740 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3741 dead_tmp(tmp2);
3742 neon_store_reg(rd, pass, tmp);
3743 } else {
3744 tmp = neon_load_reg(rd, pass);
3745 tmp2 = new_tmp();
3746 tcg_gen_shri_i32(tmp2, tmp, 16);
3747 gen_st16(tmp, cpu_T[1], IS_USER(s));
3748 gen_op_addl_T1_im(stride);
3749 gen_st16(tmp2, cpu_T[1], IS_USER(s));
3750 gen_op_addl_T1_im(stride);
3751 }
3752 } else /* size == 0 */ {
3753 if (load) {
3754 TCGV_UNUSED(tmp2);
3755 for (n = 0; n < 4; n++) {
3756 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3757 gen_op_addl_T1_im(stride);
3758 if (n == 0) {
3759 tmp2 = tmp;
3760 } else {
3761 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3762 dead_tmp(tmp);
3763 }
3764 }
3765 neon_store_reg(rd, pass, tmp2);
3766 } else {
3767 tmp2 = neon_load_reg(rd, pass);
3768 for (n = 0; n < 4; n++) {
3769 tmp = new_tmp();
3770 if (n == 0) {
3771 tcg_gen_mov_i32(tmp, tmp2);
3772 } else {
3773 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3774 }
3775 gen_st8(tmp, cpu_T[1], IS_USER(s));
3776 gen_op_addl_T1_im(stride);
3777 }
3778 dead_tmp(tmp2);
3779 }
3780 }
3781 }
3782 rd += neon_ls_element_type[op].spacing;
3783 }
3784 stride = nregs * 8;
3785 } else {
3786 size = (insn >> 10) & 3;
3787 if (size == 3) {
3788 /* Load single element to all lanes. */
3789 if (!load)
3790 return 1;
3791 size = (insn >> 6) & 3;
3792 nregs = ((insn >> 8) & 3) + 1;
3793 stride = (insn & (1 << 5)) ? 2 : 1;
3794 gen_movl_T1_reg(s, rn);
3795 for (reg = 0; reg < nregs; reg++) {
3796 switch (size) {
3797 case 0:
3798 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3799 gen_neon_dup_u8(tmp, 0);
3800 break;
3801 case 1:
3802 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3803 gen_neon_dup_low16(tmp);
3804 break;
3805 case 2:
3806 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3807 break;
3808 case 3:
3809 return 1;
3810 default: /* Avoid compiler warnings. */
3811 abort();
3812 }
3813 gen_op_addl_T1_im(1 << size);
3814 tmp2 = new_tmp();
3815 tcg_gen_mov_i32(tmp2, tmp);
3816 neon_store_reg(rd, 0, tmp2);
3817 neon_store_reg(rd, 1, tmp);
3818 rd += stride;
3819 }
3820 stride = (1 << size) * nregs;
3821 } else {
3822 /* Single element. */
3823 pass = (insn >> 7) & 1;
3824 switch (size) {
3825 case 0:
3826 shift = ((insn >> 5) & 3) * 8;
3827 stride = 1;
3828 break;
3829 case 1:
3830 shift = ((insn >> 6) & 1) * 16;
3831 stride = (insn & (1 << 5)) ? 2 : 1;
3832 break;
3833 case 2:
3834 shift = 0;
3835 stride = (insn & (1 << 6)) ? 2 : 1;
3836 break;
3837 default:
3838 abort();
3839 }
3840 nregs = ((insn >> 8) & 3) + 1;
3841 gen_movl_T1_reg(s, rn);
3842 for (reg = 0; reg < nregs; reg++) {
3843 if (load) {
3844 switch (size) {
3845 case 0:
3846 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3847 break;
3848 case 1:
3849 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3850 break;
3851 case 2:
3852 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3853 break;
3854 default: /* Avoid compiler warnings. */
3855 abort();
3856 }
3857 if (size != 2) {
3858 tmp2 = neon_load_reg(rd, pass);
3859 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3860 dead_tmp(tmp2);
3861 }
3862 neon_store_reg(rd, pass, tmp);
3863 } else { /* Store */
3864 tmp = neon_load_reg(rd, pass);
3865 if (shift)
3866 tcg_gen_shri_i32(tmp, tmp, shift);
3867 switch (size) {
3868 case 0:
3869 gen_st8(tmp, cpu_T[1], IS_USER(s));
3870 break;
3871 case 1:
3872 gen_st16(tmp, cpu_T[1], IS_USER(s));
3873 break;
3874 case 2:
3875 gen_st32(tmp, cpu_T[1], IS_USER(s));
3876 break;
3877 }
3878 }
3879 rd += stride;
3880 gen_op_addl_T1_im(1 << size);
3881 }
3882 stride = nregs * (1 << size);
3883 }
3884 }
3885 if (rm != 15) {
3886 TCGv base;
3887
3888 base = load_reg(s, rn);
3889 if (rm == 13) {
3890 tcg_gen_addi_i32(base, base, stride);
3891 } else {
3892 TCGv index;
3893 index = load_reg(s, rm);
3894 tcg_gen_add_i32(base, base, index);
3895 dead_tmp(index);
3896 }
3897 store_reg(s, rn, base);
3898 }
3899 return 0;
3900 }
3901
3902 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3903 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3904 {
3905 tcg_gen_and_i32(t, t, c);
3906 tcg_gen_bic_i32(f, f, c);
3907 tcg_gen_or_i32(dest, t, f);
3908 }
3909
3910 static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3911 {
3912 switch (size) {
3913 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3914 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3915 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3916 default: abort();
3917 }
3918 }
3919
3920 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3921 {
3922 switch (size) {
3923 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3924 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3925 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3926 default: abort();
3927 }
3928 }
3929
3930 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3931 {
3932 switch (size) {
3933 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3934 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3935 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3936 default: abort();
3937 }
3938 }
3939
3940 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3941 int q, int u)
3942 {
3943 if (q) {
3944 if (u) {
3945 switch (size) {
3946 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3947 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3948 default: abort();
3949 }
3950 } else {
3951 switch (size) {
3952 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3953 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3954 default: abort();
3955 }
3956 }
3957 } else {
3958 if (u) {
3959 switch (size) {
3960 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3961 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3962 default: abort();
3963 }
3964 } else {
3965 switch (size) {
3966 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3967 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3968 default: abort();
3969 }
3970 }
3971 }
3972 }
3973
3974 static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3975 {
3976 if (u) {
3977 switch (size) {
3978 case 0: gen_helper_neon_widen_u8(dest, src); break;
3979 case 1: gen_helper_neon_widen_u16(dest, src); break;
3980 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3981 default: abort();
3982 }
3983 } else {
3984 switch (size) {
3985 case 0: gen_helper_neon_widen_s8(dest, src); break;
3986 case 1: gen_helper_neon_widen_s16(dest, src); break;
3987 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3988 default: abort();
3989 }
3990 }
3991 dead_tmp(src);
3992 }
3993
3994 static inline void gen_neon_addl(int size)
3995 {
3996 switch (size) {
3997 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3998 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3999 case 2: tcg_gen_add_i64(CPU_V001); break;
4000 default: abort();
4001 }
4002 }
4003
4004 static inline void gen_neon_subl(int size)
4005 {
4006 switch (size) {
4007 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4008 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4009 case 2: tcg_gen_sub_i64(CPU_V001); break;
4010 default: abort();
4011 }
4012 }
4013
4014 static inline void gen_neon_negl(TCGv var, int size)
4015 {
4016 switch (size) {
4017 case 0: gen_helper_neon_negl_u16(var, var); break;
4018 case 1: gen_helper_neon_negl_u32(var, var); break;
4019 case 2: gen_helper_neon_negl_u64(var, var); break;
4020 default: abort();
4021 }
4022 }
4023
4024 static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4025 {
4026 switch (size) {
4027 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4028 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4029 default: abort();
4030 }
4031 }
4032
4033 static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4034 {
4035 TCGv tmp;
4036
4037 switch ((size << 1) | u) {
4038 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4039 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4040 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4041 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4042 case 4:
4043 tmp = gen_muls_i64_i32(a, b);
4044 tcg_gen_mov_i64(dest, tmp);
4045 break;
4046 case 5:
4047 tmp = gen_mulu_i64_i32(a, b);
4048 tcg_gen_mov_i64(dest, tmp);
4049 break;
4050 default: abort();
4051 }
4052 if (size < 2) {
4053 dead_tmp(b);
4054 dead_tmp(a);
4055 }
4056 }
4057
4058 /* Translate a NEON data processing instruction. Return nonzero if the
4059 instruction is invalid.
4060 We process data in a mixture of 32-bit and 64-bit chunks.
4061 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4062
4063 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4064 {
4065 int op;
4066 int q;
4067 int rd, rn, rm;
4068 int size;
4069 int shift;
4070 int pass;
4071 int count;
4072 int pairwise;
4073 int u;
4074 int n;
4075 uint32_t imm;
4076 TCGv tmp;
4077 TCGv tmp2;
4078 TCGv tmp3;
4079
4080 if (!vfp_enabled(env))
4081 return 1;
4082 q = (insn & (1 << 6)) != 0;
4083 u = (insn >> 24) & 1;
4084 VFP_DREG_D(rd, insn);
4085 VFP_DREG_N(rn, insn);
4086 VFP_DREG_M(rm, insn);
4087 size = (insn >> 20) & 3;
4088 if ((insn & (1 << 23)) == 0) {
4089 /* Three register same length. */
4090 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4091 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4092 || op == 10 || op == 11 || op == 16)) {
4093 /* 64-bit element instructions. */
4094 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4095 neon_load_reg64(cpu_V0, rn + pass);
4096 neon_load_reg64(cpu_V1, rm + pass);
4097 switch (op) {
4098 case 1: /* VQADD */
4099 if (u) {
4100 gen_helper_neon_add_saturate_u64(CPU_V001);
4101 } else {
4102 gen_helper_neon_add_saturate_s64(CPU_V001);
4103 }
4104 break;
4105 case 5: /* VQSUB */
4106 if (u) {
4107 gen_helper_neon_sub_saturate_u64(CPU_V001);
4108 } else {
4109 gen_helper_neon_sub_saturate_s64(CPU_V001);
4110 }
4111 break;
4112 case 8: /* VSHL */
4113 if (u) {
4114 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4115 } else {
4116 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4117 }
4118 break;
4119 case 9: /* VQSHL */
4120 if (u) {
4121 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4122 cpu_V0, cpu_V0);
4123 } else {
4124 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4125 cpu_V1, cpu_V0);
4126 }
4127 break;
4128 case 10: /* VRSHL */
4129 if (u) {
4130 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4131 } else {
4132 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4133 }
4134 break;
4135 case 11: /* VQRSHL */
4136 if (u) {
4137 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4138 cpu_V1, cpu_V0);
4139 } else {
4140 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4141 cpu_V1, cpu_V0);
4142 }
4143 break;
4144 case 16:
4145 if (u) {
4146 tcg_gen_sub_i64(CPU_V001);
4147 } else {
4148 tcg_gen_add_i64(CPU_V001);
4149 }
4150 break;
4151 default:
4152 abort();
4153 }
4154 neon_store_reg64(cpu_V0, rd + pass);
4155 }
4156 return 0;
4157 }
4158 switch (op) {
4159 case 8: /* VSHL */
4160 case 9: /* VQSHL */
4161 case 10: /* VRSHL */
4162 case 11: /* VQRSHL */
4163 {
4164 int rtmp;
4165 /* Shift instruction operands are reversed. */
4166 rtmp = rn;
4167 rn = rm;
4168 rm = rtmp;
4169 pairwise = 0;
4170 }
4171 break;
4172 case 20: /* VPMAX */
4173 case 21: /* VPMIN */
4174 case 23: /* VPADD */
4175 pairwise = 1;
4176 break;
4177 case 26: /* VPADD (float) */
4178 pairwise = (u && size < 2);
4179 break;
4180 case 30: /* VPMIN/VPMAX (float) */
4181 pairwise = u;
4182 break;
4183 default:
4184 pairwise = 0;
4185 break;
4186 }
4187 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4188
4189 if (pairwise) {
4190 /* Pairwise. */
4191 if (q)
4192 n = (pass & 1) * 2;
4193 else
4194 n = 0;
4195 if (pass < q + 1) {
4196 NEON_GET_REG(T0, rn, n);
4197 NEON_GET_REG(T1, rn, n + 1);
4198 } else {
4199 NEON_GET_REG(T0, rm, n);
4200 NEON_GET_REG(T1, rm, n + 1);
4201 }
4202 } else {
4203 /* Elementwise. */
4204 NEON_GET_REG(T0, rn, pass);
4205 NEON_GET_REG(T1, rm, pass);
4206 }
4207 switch (op) {
4208 case 0: /* VHADD */
4209 GEN_NEON_INTEGER_OP(hadd);
4210 break;
4211 case 1: /* VQADD */
4212 GEN_NEON_INTEGER_OP_ENV(qadd);
4213 break;
4214 case 2: /* VRHADD */
4215 GEN_NEON_INTEGER_OP(rhadd);
4216 break;
4217 case 3: /* Logic ops. */
4218 switch ((u << 2) | size) {
4219 case 0: /* VAND */
4220 gen_op_andl_T0_T1();
4221 break;
4222 case 1: /* BIC */
4223 gen_op_bicl_T0_T1();
4224 break;
4225 case 2: /* VORR */
4226 gen_op_orl_T0_T1();
4227 break;
4228 case 3: /* VORN */
4229 gen_op_notl_T1();
4230 gen_op_orl_T0_T1();
4231 break;
4232 case 4: /* VEOR */
4233 gen_op_xorl_T0_T1();
4234 break;
4235 case 5: /* VBSL */
4236 tmp = neon_load_reg(rd, pass);
4237 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4238 dead_tmp(tmp);
4239 break;
4240 case 6: /* VBIT */
4241 tmp = neon_load_reg(rd, pass);
4242 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4243 dead_tmp(tmp);
4244 break;
4245 case 7: /* VBIF */
4246 tmp = neon_load_reg(rd, pass);
4247 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4248 dead_tmp(tmp);
4249 break;
4250 }
4251 break;
4252 case 4: /* VHSUB */
4253 GEN_NEON_INTEGER_OP(hsub);
4254 break;
4255 case 5: /* VQSUB */
4256 GEN_NEON_INTEGER_OP_ENV(qsub);
4257 break;
4258 case 6: /* VCGT */
4259 GEN_NEON_INTEGER_OP(cgt);
4260 break;
4261 case 7: /* VCGE */
4262 GEN_NEON_INTEGER_OP(cge);
4263 break;
4264 case 8: /* VSHL */
4265 GEN_NEON_INTEGER_OP(shl);
4266 break;
4267 case 9: /* VQSHL */
4268 GEN_NEON_INTEGER_OP_ENV(qshl);
4269 break;
4270 case 10: /* VRSHL */
4271 GEN_NEON_INTEGER_OP(rshl);
4272 break;
4273 case 11: /* VQRSHL */
4274 GEN_NEON_INTEGER_OP_ENV(qrshl);
4275 break;
4276 case 12: /* VMAX */
4277 GEN_NEON_INTEGER_OP(max);
4278 break;
4279 case 13: /* VMIN */
4280 GEN_NEON_INTEGER_OP(min);
4281 break;
4282 case 14: /* VABD */
4283 GEN_NEON_INTEGER_OP(abd);
4284 break;
4285 case 15: /* VABA */
4286 GEN_NEON_INTEGER_OP(abd);
4287 NEON_GET_REG(T1, rd, pass);
4288 gen_neon_add(size);
4289 break;
4290 case 16:
4291 if (!u) { /* VADD */
4292 if (gen_neon_add(size))
4293 return 1;
4294 } else { /* VSUB */
4295 switch (size) {
4296 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4297 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
4298 case 2: gen_op_subl_T0_T1(); break;
4299 default: return 1;
4300 }
4301 }
4302 break;
4303 case 17:
4304 if (!u) { /* VTST */
4305 switch (size) {
4306 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4307 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4308 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
4309 default: return 1;
4310 }
4311 } else { /* VCEQ */
4312 switch (size) {
4313 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4314 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4315 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
4316 default: return 1;
4317 }
4318 }
4319 break;
4320 case 18: /* Multiply. */
4321 switch (size) {
4322 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4323 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4324 case 2: gen_op_mul_T0_T1(); break;
4325 default: return 1;
4326 }
4327 NEON_GET_REG(T1, rd, pass);
4328 if (u) { /* VMLS */
4329 gen_neon_rsb(size);
4330 } else { /* VMLA */
4331 gen_neon_add(size);
4332 }
4333 break;
4334 case 19: /* VMUL */
4335 if (u) { /* polynomial */
4336 gen_helper_neon_mul_p8(CPU_T001);
4337 } else { /* Integer */
4338 switch (size) {
4339 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4340 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
4341 case 2: gen_op_mul_T0_T1(); break;
4342 default: return 1;
4343 }
4344 }
4345 break;
4346 case 20: /* VPMAX */
4347 GEN_NEON_INTEGER_OP(pmax);
4348 break;
4349 case 21: /* VPMIN */
4350 GEN_NEON_INTEGER_OP(pmin);
4351 break;
4352 case 22: /* Hultiply high. */
4353 if (!u) { /* VQDMULH */
4354 switch (size) {
4355 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4356 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
4357 default: return 1;
4358 }
4359 } else { /* VQRDHMUL */
4360 switch (size) {
4361 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4362 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
4363 default: return 1;
4364 }
4365 }
4366 break;
4367 case 23: /* VPADD */
4368 if (u)
4369 return 1;
4370 switch (size) {
4371 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4372 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
4373 case 2: gen_op_addl_T0_T1(); break;
4374 default: return 1;
4375 }
4376 break;
4377 case 26: /* Floating point arithnetic. */
4378 switch ((u << 2) | size) {
4379 case 0: /* VADD */
4380 gen_helper_neon_add_f32(CPU_T001);
4381 break;
4382 case 2: /* VSUB */
4383 gen_helper_neon_sub_f32(CPU_T001);
4384 break;
4385 case 4: /* VPADD */
4386 gen_helper_neon_add_f32(CPU_T001);
4387 break;
4388 case 6: /* VABD */
4389 gen_helper_neon_abd_f32(CPU_T001);
4390 break;
4391 default:
4392 return 1;
4393 }
4394 break;
4395 case 27: /* Float multiply. */
4396 gen_helper_neon_mul_f32(CPU_T001);
4397 if (!u) {
4398 NEON_GET_REG(T1, rd, pass);
4399 if (size == 0) {
4400 gen_helper_neon_add_f32(CPU_T001);
4401 } else {
4402 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
4403 }
4404 }
4405 break;
4406 case 28: /* Float compare. */
4407 if (!u) {
4408 gen_helper_neon_ceq_f32(CPU_T001);
4409 } else {
4410 if (size == 0)
4411 gen_helper_neon_cge_f32(CPU_T001);
4412 else
4413 gen_helper_neon_cgt_f32(CPU_T001);
4414 }
4415 break;
4416 case 29: /* Float compare absolute. */
4417 if (!u)
4418 return 1;
4419 if (size == 0)
4420 gen_helper_neon_acge_f32(CPU_T001);
4421 else
4422 gen_helper_neon_acgt_f32(CPU_T001);
4423 break;
4424 case 30: /* Float min/max. */
4425 if (size == 0)
4426 gen_helper_neon_max_f32(CPU_T001);
4427 else
4428 gen_helper_neon_min_f32(CPU_T001);
4429 break;
4430 case 31:
4431 if (size == 0)
4432 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4433 else
4434 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
4435 break;
4436 default:
4437 abort();
4438 }
4439 /* Save the result. For elementwise operations we can put it
4440 straight into the destination register. For pairwise operations
4441 we have to be careful to avoid clobbering the source operands. */
4442 if (pairwise && rd == rm) {
4443 gen_neon_movl_scratch_T0(pass);
4444 } else {
4445 NEON_SET_REG(T0, rd, pass);
4446 }
4447
4448 } /* for pass */
4449 if (pairwise && rd == rm) {
4450 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4451 gen_neon_movl_T0_scratch(pass);
4452 NEON_SET_REG(T0, rd, pass);
4453 }
4454 }
4455 /* End of 3 register same size operations. */
4456 } else if (insn & (1 << 4)) {
4457 if ((insn & 0x00380080) != 0) {
4458 /* Two registers and shift. */
4459 op = (insn >> 8) & 0xf;
4460 if (insn & (1 << 7)) {
4461 /* 64-bit shift. */
4462 size = 3;
4463 } else {
4464 size = 2;
4465 while ((insn & (1 << (size + 19))) == 0)
4466 size--;
4467 }
4468 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4469 /* To avoid excessive dumplication of ops we implement shift
4470 by immediate using the variable shift operations. */
4471 if (op < 8) {
4472 /* Shift by immediate:
4473 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4474 /* Right shifts are encoded as N - shift, where N is the
4475 element size in bits. */
4476 if (op <= 4)
4477 shift = shift - (1 << (size + 3));
4478 if (size == 3) {
4479 count = q + 1;
4480 } else {
4481 count = q ? 4: 2;
4482 }
4483 switch (size) {
4484 case 0:
4485 imm = (uint8_t) shift;
4486 imm |= imm << 8;
4487 imm |= imm << 16;
4488 break;
4489 case 1:
4490 imm = (uint16_t) shift;
4491 imm |= imm << 16;
4492 break;
4493 case 2:
4494 case 3:
4495 imm = shift;
4496 break;
4497 default:
4498 abort();
4499 }
4500
4501 for (pass = 0; pass < count; pass++) {
4502 if (size == 3) {
4503 neon_load_reg64(cpu_V0, rm + pass);
4504 tcg_gen_movi_i64(cpu_V1, imm);
4505 switch (op) {
4506 case 0: /* VSHR */
4507 case 1: /* VSRA */
4508 if (u)
4509 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4510 else
4511 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4512 break;
4513 case 2: /* VRSHR */
4514 case 3: /* VRSRA */
4515 if (u)
4516 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4517 else
4518 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4519 break;
4520 case 4: /* VSRI */
4521 if (!u)
4522 return 1;
4523 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4524 break;
4525 case 5: /* VSHL, VSLI */
4526 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4527 break;
4528 case 6: /* VQSHL */
4529 if (u)
4530 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4531 else
4532 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4533 break;
4534 case 7: /* VQSHLU */
4535 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4536 break;
4537 }
4538 if (op == 1 || op == 3) {
4539 /* Accumulate. */
4540 neon_load_reg64(cpu_V0, rd + pass);
4541 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4542 } else if (op == 4 || (op == 5 && u)) {
4543 /* Insert */
4544 cpu_abort(env, "VS[LR]I.64 not implemented");
4545 }
4546 neon_store_reg64(cpu_V0, rd + pass);
4547 } else { /* size < 3 */
4548 /* Operands in T0 and T1. */
4549 gen_op_movl_T1_im(imm);
4550 NEON_GET_REG(T0, rm, pass);
4551 switch (op) {
4552 case 0: /* VSHR */
4553 case 1: /* VSRA */
4554 GEN_NEON_INTEGER_OP(shl);
4555 break;
4556 case 2: /* VRSHR */
4557 case 3: /* VRSRA */
4558 GEN_NEON_INTEGER_OP(rshl);
4559 break;
4560 case 4: /* VSRI */
4561 if (!u)
4562 return 1;
4563 GEN_NEON_INTEGER_OP(shl);
4564 break;
4565 case 5: /* VSHL, VSLI */
4566 switch (size) {
4567 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4568 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4569 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4570 default: return 1;
4571 }
4572 break;
4573 case 6: /* VQSHL */
4574 GEN_NEON_INTEGER_OP_ENV(qshl);
4575 break;
4576 case 7: /* VQSHLU */
4577 switch (size) {
4578 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4579 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4580 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4581 default: return 1;
4582 }
4583 break;
4584 }
4585
4586 if (op == 1 || op == 3) {
4587 /* Accumulate. */
4588 NEON_GET_REG(T1, rd, pass);
4589 gen_neon_add(size);
4590 } else if (op == 4 || (op == 5 && u)) {
4591 /* Insert */
4592 switch (size) {
4593 case 0:
4594 if (op == 4)
4595 imm = 0xff >> -shift;
4596 else
4597 imm = (uint8_t)(0xff << shift);
4598 imm |= imm << 8;
4599 imm |= imm << 16;
4600 break;
4601 case 1:
4602 if (op == 4)
4603 imm = 0xffff >> -shift;
4604 else
4605 imm = (uint16_t)(0xffff << shift);
4606 imm |= imm << 16;
4607 break;
4608 case 2:
4609 if (op == 4)
4610 imm = 0xffffffffu >> -shift;
4611 else
4612 imm = 0xffffffffu << shift;
4613 break;
4614 default:
4615 abort();
4616 }
4617 tmp = neon_load_reg(rd, pass);
4618 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4619 tcg_gen_andi_i32(tmp, tmp, ~imm);
4620 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4621 }
4622 NEON_SET_REG(T0, rd, pass);
4623 }
4624 } /* for pass */
4625 } else if (op < 10) {
4626 /* Shift by immediate and narrow:
4627 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4628 shift = shift - (1 << (size + 3));
4629 size++;
4630 switch (size) {
4631 case 1:
4632 imm = (uint16_t)shift;
4633 imm |= imm << 16;
4634 tmp2 = tcg_const_i32(imm);
4635 break;
4636 case 2:
4637 imm = (uint32_t)shift;
4638 tmp2 = tcg_const_i32(imm);
4639 case 3:
4640 tmp2 = tcg_const_i64(shift);
4641 break;
4642 default:
4643 abort();
4644 }
4645
4646 for (pass = 0; pass < 2; pass++) {
4647 if (size == 3) {
4648 neon_load_reg64(cpu_V0, rm + pass);
4649 if (q) {
4650 if (u)
4651 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4652 else
4653 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4654 } else {
4655 if (u)
4656 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4657 else
4658 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4659 }
4660 } else {
4661 tmp = neon_load_reg(rm + pass, 0);
4662 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4663 tmp3 = neon_load_reg(rm + pass, 1);
4664 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4665 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4666 dead_tmp(tmp);
4667 dead_tmp(tmp3);
4668 }
4669 tmp = new_tmp();
4670 if (op == 8 && !u) {
4671 gen_neon_narrow(size - 1, tmp, cpu_V0);
4672 } else {
4673 if (op == 8)
4674 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4675 else
4676 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4677 }
4678 if (pass == 0) {
4679 tmp2 = tmp;
4680 } else {
4681 neon_store_reg(rd, 0, tmp2);
4682 neon_store_reg(rd, 1, tmp);
4683 }
4684 } /* for pass */
4685 } else if (op == 10) {
4686 /* VSHLL */
4687 if (q || size == 3)
4688 return 1;
4689 tmp = neon_load_reg(rm, 0);
4690 tmp2 = neon_load_reg(rm, 1);
4691 for (pass = 0; pass < 2; pass++) {
4692 if (pass == 1)
4693 tmp = tmp2;
4694
4695 gen_neon_widen(cpu_V0, tmp, size, u);
4696
4697 if (shift != 0) {
4698 /* The shift is less than the width of the source
4699 type, so we can just shift the whole register. */
4700 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4701 if (size < 2 || !u) {
4702 uint64_t imm64;
4703 if (size == 0) {
4704 imm = (0xffu >> (8 - shift));
4705 imm |= imm << 16;
4706 } else {
4707 imm = 0xffff >> (16 - shift);
4708 }
4709 imm64 = imm | (((uint64_t)imm) << 32);
4710 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4711 }
4712 }
4713 neon_store_reg64(cpu_V0, rd + pass);
4714 }
4715 } else if (op == 15 || op == 16) {
4716 /* VCVT fixed-point. */
4717 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4718 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4719 if (op & 1) {
4720 if (u)
4721 gen_vfp_ulto(0, shift);
4722 else
4723 gen_vfp_slto(0, shift);
4724 } else {
4725 if (u)
4726 gen_vfp_toul(0, shift);
4727 else
4728 gen_vfp_tosl(0, shift);
4729 }
4730 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4731 }
4732 } else {
4733 return 1;
4734 }
4735 } else { /* (insn & 0x00380080) == 0 */
4736 int invert;
4737
4738 op = (insn >> 8) & 0xf;
4739 /* One register and immediate. */
4740 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4741 invert = (insn & (1 << 5)) != 0;
4742 switch (op) {
4743 case 0: case 1:
4744 /* no-op */
4745 break;
4746 case 2: case 3:
4747 imm <<= 8;
4748 break;
4749 case 4: case 5:
4750 imm <<= 16;
4751 break;
4752 case 6: case 7:
4753 imm <<= 24;
4754 break;
4755 case 8: case 9:
4756 imm |= imm << 16;
4757 break;
4758 case 10: case 11:
4759 imm = (imm << 8) | (imm << 24);
4760 break;
4761 case 12:
4762 imm = (imm < 8) | 0xff;
4763 break;
4764 case 13:
4765 imm = (imm << 16) | 0xffff;
4766 break;
4767 case 14:
4768 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4769 if (invert)
4770 imm = ~imm;
4771 break;
4772 case 15:
4773 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4774 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4775 break;
4776 }
4777 if (invert)
4778 imm = ~imm;
4779
4780 if (op != 14 || !invert)
4781 gen_op_movl_T1_im(imm);
4782
4783 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4784 if (op & 1 && op < 12) {
4785 tmp = neon_load_reg(rd, pass);
4786 if (invert) {
4787 /* The immediate value has already been inverted, so
4788 BIC becomes AND. */
4789 tcg_gen_andi_i32(tmp, tmp, imm);
4790 } else {
4791 tcg_gen_ori_i32(tmp, tmp, imm);
4792 }
4793 } else {
4794 /* VMOV, VMVN. */
4795 tmp = new_tmp();
4796 if (op == 14 && invert) {
4797 uint32_t val;
4798 val = 0;
4799 for (n = 0; n < 4; n++) {
4800 if (imm & (1 << (n + (pass & 1) * 4)))
4801 val |= 0xff << (n * 8);
4802 }
4803 tcg_gen_movi_i32(tmp, val);
4804 } else {
4805 tcg_gen_movi_i32(tmp, imm);
4806 }
4807 }
4808 neon_store_reg(rd, pass, tmp);
4809 }
4810 }
4811 } else { /* (insn & 0x00800010 == 0x00800000) */
4812 if (size != 3) {
4813 op = (insn >> 8) & 0xf;
4814 if ((insn & (1 << 6)) == 0) {
4815 /* Three registers of different lengths. */
4816 int src1_wide;
4817 int src2_wide;
4818 int prewiden;
4819 /* prewiden, src1_wide, src2_wide */
4820 static const int neon_3reg_wide[16][3] = {
4821 {1, 0, 0}, /* VADDL */
4822 {1, 1, 0}, /* VADDW */
4823 {1, 0, 0}, /* VSUBL */
4824 {1, 1, 0}, /* VSUBW */
4825 {0, 1, 1}, /* VADDHN */
4826 {0, 0, 0}, /* VABAL */
4827 {0, 1, 1}, /* VSUBHN */
4828 {0, 0, 0}, /* VABDL */
4829 {0, 0, 0}, /* VMLAL */
4830 {0, 0, 0}, /* VQDMLAL */
4831 {0, 0, 0}, /* VMLSL */
4832 {0, 0, 0}, /* VQDMLSL */
4833 {0, 0, 0}, /* Integer VMULL */
4834 {0, 0, 0}, /* VQDMULL */
4835 {0, 0, 0} /* Polynomial VMULL */
4836 };
4837
4838 prewiden = neon_3reg_wide[op][0];
4839 src1_wide = neon_3reg_wide[op][1];
4840 src2_wide = neon_3reg_wide[op][2];
4841
4842 if (size == 0 && (op == 9 || op == 11 || op == 13))
4843 return 1;
4844
4845 /* Avoid overlapping operands. Wide source operands are
4846 always aligned so will never overlap with wide
4847 destinations in problematic ways. */
4848 if (rd == rm && !src2_wide) {
4849 NEON_GET_REG(T0, rm, 1);
4850 gen_neon_movl_scratch_T0(2);
4851 } else if (rd == rn && !src1_wide) {
4852 NEON_GET_REG(T0, rn, 1);
4853 gen_neon_movl_scratch_T0(2);
4854 }
4855 TCGV_UNUSED(tmp3);
4856 for (pass = 0; pass < 2; pass++) {
4857 if (src1_wide) {
4858 neon_load_reg64(cpu_V0, rn + pass);
4859 TCGV_UNUSED(tmp);
4860 } else {
4861 if (pass == 1 && rd == rn) {
4862 gen_neon_movl_T0_scratch(2);
4863 tmp = new_tmp();
4864 tcg_gen_mov_i32(tmp, cpu_T[0]);
4865 } else {
4866 tmp = neon_load_reg(rn, pass);
4867 }
4868 if (prewiden) {
4869 gen_neon_widen(cpu_V0, tmp, size, u);
4870 }
4871 }
4872 if (src2_wide) {
4873 neon_load_reg64(cpu_V1, rm + pass);
4874 TCGV_UNUSED(tmp2);
4875 } else {
4876 if (pass == 1 && rd == rm) {
4877 gen_neon_movl_T0_scratch(2);
4878 tmp2 = new_tmp();
4879 tcg_gen_mov_i32(tmp2, cpu_T[0]);
4880 } else {
4881 tmp2 = neon_load_reg(rm, pass);
4882 }
4883 if (prewiden) {
4884 gen_neon_widen(cpu_V1, tmp2, size, u);
4885 }
4886 }
4887 switch (op) {
4888 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4889 gen_neon_addl(size);
4890 break;
4891 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4892 gen_neon_subl(size);
4893 break;
4894 case 5: case 7: /* VABAL, VABDL */
4895 switch ((size << 1) | u) {
4896 case 0:
4897 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4898 break;
4899 case 1:
4900 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4901 break;
4902 case 2:
4903 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4904 break;
4905 case 3:
4906 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4907 break;
4908 case 4:
4909 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4910 break;
4911 case 5:
4912 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4913 break;
4914 default: abort();
4915 }
4916 dead_tmp(tmp2);
4917 dead_tmp(tmp);
4918 break;
4919 case 8: case 9: case 10: case 11: case 12: case 13:
4920 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4921 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
4922 break;
4923 case 14: /* Polynomial VMULL */
4924 cpu_abort(env, "Polynomial VMULL not implemented");
4925
4926 default: /* 15 is RESERVED. */
4927 return 1;
4928 }
4929 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4930 /* Accumulate. */
4931 if (op == 10 || op == 11) {
4932 gen_neon_negl(cpu_V0, size);
4933 }
4934
4935 if (op != 13) {
4936 neon_load_reg64(cpu_V1, rd + pass);
4937 }
4938
4939 switch (op) {
4940 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4941 gen_neon_addl(size);
4942 break;
4943 case 9: case 11: /* VQDMLAL, VQDMLSL */
4944 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4945 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4946 break;
4947 /* Fall through. */
4948 case 13: /* VQDMULL */
4949 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4950 break;
4951 default:
4952 abort();
4953 }
4954 neon_store_reg64(cpu_V0, rd + pass);
4955 } else if (op == 4 || op == 6) {
4956 /* Narrowing operation. */
4957 tmp = new_tmp();
4958 if (u) {
4959 switch (size) {
4960 case 0:
4961 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4962 break;
4963 case 1:
4964 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4965 break;
4966 case 2:
4967 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4968 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4969 break;
4970 default: abort();
4971 }
4972 } else {
4973 switch (size) {
4974 case 0:
4975 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4976 break;
4977 case 1:
4978 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4979 break;
4980 case 2:
4981 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4982 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4983 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4984 break;
4985 default: abort();
4986 }
4987 }
4988 if (pass == 0) {
4989 tmp3 = tmp;
4990 } else {
4991 neon_store_reg(rd, 0, tmp3);
4992 neon_store_reg(rd, 1, tmp);
4993 }
4994 } else {
4995 /* Write back the result. */
4996 neon_store_reg64(cpu_V0, rd + pass);
4997 }
4998 }
4999 } else {
5000 /* Two registers and a scalar. */
5001 switch (op) {
5002 case 0: /* Integer VMLA scalar */
5003 case 1: /* Float VMLA scalar */
5004 case 4: /* Integer VMLS scalar */
5005 case 5: /* Floating point VMLS scalar */
5006 case 8: /* Integer VMUL scalar */
5007 case 9: /* Floating point VMUL scalar */
5008 case 12: /* VQDMULH scalar */
5009 case 13: /* VQRDMULH scalar */
5010 gen_neon_get_scalar(size, rm);
5011 gen_neon_movl_scratch_T0(0);
5012 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5013 if (pass != 0)
5014 gen_neon_movl_T0_scratch(0);
5015 NEON_GET_REG(T1, rn, pass);
5016 if (op == 12) {
5017 if (size == 1) {
5018 gen_helper_neon_qdmulh_s16(CPU_T0E01);
5019 } else {
5020 gen_helper_neon_qdmulh_s32(CPU_T0E01);
5021 }
5022 } else if (op == 13) {
5023 if (size == 1) {
5024 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
5025 } else {
5026 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
5027 }
5028 } else if (op & 1) {
5029 gen_helper_neon_mul_f32(CPU_T001);
5030 } else {
5031 switch (size) {
5032 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5033 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
5034 case 2: gen_op_mul_T0_T1(); break;
5035 default: return 1;
5036 }
5037 }
5038 if (op < 8) {
5039 /* Accumulate. */
5040 NEON_GET_REG(T1, rd, pass);
5041 switch (op) {
5042 case 0:
5043 gen_neon_add(size);
5044 break;
5045 case 1:
5046 gen_helper_neon_add_f32(CPU_T001);
5047 break;
5048 case 4:
5049 gen_neon_rsb(size);
5050 break;
5051 case 5:
5052 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
5053 break;
5054 default:
5055 abort();
5056 }
5057 }
5058 NEON_SET_REG(T0, rd, pass);
5059 }
5060 break;
5061 case 2: /* VMLAL sclar */
5062 case 3: /* VQDMLAL scalar */
5063 case 6: /* VMLSL scalar */
5064 case 7: /* VQDMLSL scalar */
5065 case 10: /* VMULL scalar */
5066 case 11: /* VQDMULL scalar */
5067 if (size == 0 && (op == 3 || op == 7 || op == 11))
5068 return 1;
5069
5070 gen_neon_get_scalar(size, rm);
5071 NEON_GET_REG(T1, rn, 1);
5072
5073 for (pass = 0; pass < 2; pass++) {
5074 if (pass == 0) {
5075 tmp = neon_load_reg(rn, 0);
5076 } else {
5077 tmp = new_tmp();
5078 tcg_gen_mov_i32(tmp, cpu_T[1]);
5079 }
5080 tmp2 = new_tmp();
5081 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5082 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5083 if (op == 6 || op == 7) {
5084 gen_neon_negl(cpu_V0, size);
5085 }
5086 if (op != 11) {
5087 neon_load_reg64(cpu_V1, rd + pass);
5088 }
5089 switch (op) {
5090 case 2: case 6:
5091 gen_neon_addl(size);
5092 break;
5093 case 3: case 7:
5094 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5095 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5096 break;
5097 case 10:
5098 /* no-op */
5099 break;
5100 case 11:
5101 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5102 break;
5103 default:
5104 abort();
5105 }
5106 neon_store_reg64(cpu_V0, rd + pass);
5107 }
5108 break;
5109 default: /* 14 and 15 are RESERVED */
5110 return 1;
5111 }
5112 }
5113 } else { /* size == 3 */
5114 if (!u) {
5115 /* Extract. */
5116 imm = (insn >> 8) & 0xf;
5117 count = q + 1;
5118
5119 if (imm > 7 && !q)
5120 return 1;
5121
5122 if (imm == 0) {
5123 neon_load_reg64(cpu_V0, rn);
5124 if (q) {
5125 neon_load_reg64(cpu_V1, rn + 1);
5126 }
5127 } else if (imm == 8) {
5128 neon_load_reg64(cpu_V0, rn + 1);
5129 if (q) {
5130 neon_load_reg64(cpu_V1, rm);
5131 }
5132 } else if (q) {
5133 tmp = tcg_temp_new(TCG_TYPE_I64);
5134 if (imm < 8) {
5135 neon_load_reg64(cpu_V0, rn);
5136 neon_load_reg64(tmp, rn + 1);
5137 } else {
5138 neon_load_reg64(cpu_V0, rn + 1);
5139 neon_load_reg64(tmp, rm);
5140 }
5141 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5142 tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5143 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5144 if (imm < 8) {
5145 neon_load_reg64(cpu_V1, rm);
5146 } else {
5147 neon_load_reg64(cpu_V1, rm + 1);
5148 imm -= 8;
5149 }
5150 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5151 tcg_gen_shri_i64(tmp, tmp, imm * 8);
5152 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5153 } else {
5154 neon_load_reg64(cpu_V0, rn);
5155 tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5156 neon_load_reg64(cpu_V1, rm);
5157 tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5158 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5159 }
5160 neon_store_reg64(cpu_V0, rd);
5161 if (q) {
5162 neon_store_reg64(cpu_V1, rd + 1);
5163 }
5164 } else if ((insn & (1 << 11)) == 0) {
5165 /* Two register misc. */
5166 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5167 size = (insn >> 18) & 3;
5168 switch (op) {
5169 case 0: /* VREV64 */
5170 if (size == 3)
5171 return 1;
5172 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5173 NEON_GET_REG(T0, rm, pass * 2);
5174 NEON_GET_REG(T1, rm, pass * 2 + 1);
5175 switch (size) {
5176 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5177 case 1: gen_swap_half(cpu_T[0]); break;
5178 case 2: /* no-op */ break;
5179 default: abort();
5180 }
5181 NEON_SET_REG(T0, rd, pass * 2 + 1);
5182 if (size == 2) {
5183 NEON_SET_REG(T1, rd, pass * 2);
5184 } else {
5185 gen_op_movl_T0_T1();
5186 switch (size) {
5187 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5188 case 1: gen_swap_half(cpu_T[0]); break;
5189 default: abort();
5190 }
5191 NEON_SET_REG(T0, rd, pass * 2);
5192 }
5193 }
5194 break;
5195 case 4: case 5: /* VPADDL */
5196 case 12: case 13: /* VPADAL */
5197 if (size == 3)
5198 return 1;
5199 for (pass = 0; pass < q + 1; pass++) {
5200 tmp = neon_load_reg(rm, pass * 2);
5201 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5202 tmp = neon_load_reg(rm, pass * 2 + 1);
5203 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5204 switch (size) {
5205 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5206 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5207 case 2: tcg_gen_add_i64(CPU_V001); break;
5208 default: abort();
5209 }
5210 if (op >= 12) {
5211 /* Accumulate. */
5212 neon_load_reg64(cpu_V1, rd + pass);
5213 gen_neon_addl(size);
5214 }
5215 neon_store_reg64(cpu_V0, rd + pass);
5216 }
5217 break;
5218 case 33: /* VTRN */
5219 if (size == 2) {
5220 for (n = 0; n < (q ? 4 : 2); n += 2) {
5221 NEON_GET_REG(T0, rm, n);
5222 NEON_GET_REG(T1, rd, n + 1);
5223 NEON_SET_REG(T1, rm, n);
5224 NEON_SET_REG(T0, rd, n + 1);
5225 }
5226 } else {
5227 goto elementwise;
5228 }
5229 break;
5230 case 34: /* VUZP */
5231 /* Reg Before After
5232 Rd A3 A2 A1 A0 B2 B0 A2 A0
5233 Rm B3 B2 B1 B0 B3 B1 A3 A1
5234 */
5235 if (size == 3)
5236 return 1;
5237 gen_neon_unzip(rd, q, 0, size);
5238 gen_neon_unzip(rm, q, 4, size);
5239 if (q) {
5240 static int unzip_order_q[8] =
5241 {0, 2, 4, 6, 1, 3, 5, 7};
5242 for (n = 0; n < 8; n++) {
5243 int reg = (n < 4) ? rd : rm;
5244 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5245 NEON_SET_REG(T0, reg, n % 4);
5246 }
5247 } else {
5248 static int unzip_order[4] =
5249 {0, 4, 1, 5};
5250 for (n = 0; n < 4; n++) {
5251 int reg = (n < 2) ? rd : rm;
5252 gen_neon_movl_T0_scratch(unzip_order[n]);
5253 NEON_SET_REG(T0, reg, n % 2);
5254 }
5255 }
5256 break;
5257 case 35: /* VZIP */
5258 /* Reg Before After
5259 Rd A3 A2 A1 A0 B1 A1 B0 A0
5260 Rm B3 B2 B1 B0 B3 A3 B2 A2
5261 */
5262 if (size == 3)
5263 return 1;
5264 count = (q ? 4 : 2);
5265 for (n = 0; n < count; n++) {
5266 NEON_GET_REG(T0, rd, n);
5267 NEON_GET_REG(T1, rd, n);
5268 switch (size) {
5269 case 0: gen_helper_neon_zip_u8(); break;
5270 case 1: gen_helper_neon_zip_u16(); break;
5271 case 2: /* no-op */; break;
5272 default: abort();
5273 }
5274 gen_neon_movl_scratch_T0(n * 2);
5275 gen_neon_movl_scratch_T1(n * 2 + 1);
5276 }
5277 for (n = 0; n < count * 2; n++) {
5278 int reg = (n < count) ? rd : rm;
5279 gen_neon_movl_T0_scratch(n);
5280 NEON_SET_REG(T0, reg, n % count);
5281 }
5282 break;
5283 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5284 if (size == 3)
5285 return 1;
5286 TCGV_UNUSED(tmp2);
5287 for (pass = 0; pass < 2; pass++) {
5288 neon_load_reg64(cpu_V0, rm + pass);
5289 tmp = new_tmp();
5290 if (op == 36 && q == 0) {
5291 gen_neon_narrow(size, tmp, cpu_V0);
5292 } else if (q) {
5293 gen_neon_narrow_satu(size, tmp, cpu_V0);
5294 } else {
5295 gen_neon_narrow_sats(size, tmp, cpu_V0);
5296 }
5297 if (pass == 0) {
5298 tmp2 = tmp;
5299 } else {
5300 neon_store_reg(rd, 0, tmp2);
5301 neon_store_reg(rd, 1, tmp);
5302 }
5303 }
5304 break;
5305 case 38: /* VSHLL */
5306 if (q || size == 3)
5307 return 1;
5308 tmp = neon_load_reg(rm, 0);
5309 tmp2 = neon_load_reg(rm, 1);
5310 for (pass = 0; pass < 2; pass++) {
5311 if (pass == 1)
5312 tmp = tmp2;
5313 gen_neon_widen(cpu_V0, tmp, size, 1);
5314 neon_store_reg64(cpu_V0, rd + pass);
5315 }
5316 break;
5317 default:
5318 elementwise:
5319 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5320 if (op == 30 || op == 31 || op >= 58) {
5321 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5322 neon_reg_offset(rm, pass));
5323 } else {
5324 NEON_GET_REG(T0, rm, pass);
5325 }
5326 switch (op) {
5327 case 1: /* VREV32 */
5328 switch (size) {
5329 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
5330 case 1: gen_swap_half(cpu_T[0]); break;
5331 default: return 1;
5332 }
5333 break;
5334 case 2: /* VREV16 */
5335 if (size != 0)
5336 return 1;
5337 gen_rev16(cpu_T[0]);
5338 break;
5339 case 8: /* CLS */
5340 switch (size) {
5341 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5342 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5343 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
5344 default: return 1;
5345 }
5346 break;
5347 case 9: /* CLZ */
5348 switch (size) {
5349 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5350 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
5351 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
5352 default: return 1;
5353 }
5354 break;
5355 case 10: /* CNT */
5356 if (size != 0)
5357 return 1;
5358 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
5359 break;
5360 case 11: /* VNOT */
5361 if (size != 0)
5362 return 1;
5363 gen_op_notl_T0();
5364 break;
5365 case 14: /* VQABS */
5366 switch (size) {
5367 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5368 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5369 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5370 default: return 1;
5371 }
5372 break;
5373 case 15: /* VQNEG */
5374 switch (size) {
5375 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5376 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5377 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
5378 default: return 1;
5379 }
5380 break;
5381 case 16: case 19: /* VCGT #0, VCLE #0 */
5382 gen_op_movl_T1_im(0);
5383 switch(size) {
5384 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5385 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5386 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
5387 default: return 1;
5388 }
5389 if (op == 19)
5390 gen_op_notl_T0();
5391 break;
5392 case 17: case 20: /* VCGE #0, VCLT #0 */
5393 gen_op_movl_T1_im(0);
5394 switch(size) {
5395 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5396 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5397 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
5398 default: return 1;
5399 }
5400 if (op == 20)
5401 gen_op_notl_T0();
5402 break;
5403 case 18: /* VCEQ #0 */
5404 gen_op_movl_T1_im(0);
5405 switch(size) {
5406 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5407 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5408 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
5409 default: return 1;
5410 }
5411 break;
5412 case 22: /* VABS */
5413 switch(size) {
5414 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5415 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5416 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
5417 default: return 1;
5418 }
5419 break;
5420 case 23: /* VNEG */
5421 gen_op_movl_T1_im(0);
5422 if (size == 3)
5423 return 1;
5424 gen_neon_rsb(size);
5425 break;
5426 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5427 gen_op_movl_T1_im(0);
5428 gen_helper_neon_cgt_f32(CPU_T001);
5429 if (op == 27)
5430 gen_op_notl_T0();
5431 break;
5432 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5433 gen_op_movl_T1_im(0);
5434 gen_helper_neon_cge_f32(CPU_T001);
5435 if (op == 28)
5436 gen_op_notl_T0();
5437 break;
5438 case 26: /* Float VCEQ #0 */
5439 gen_op_movl_T1_im(0);
5440 gen_helper_neon_ceq_f32(CPU_T001);
5441 break;
5442 case 30: /* Float VABS */
5443 gen_vfp_abs(0);
5444 break;
5445 case 31: /* Float VNEG */
5446 gen_vfp_neg(0);
5447 break;
5448 case 32: /* VSWP */
5449 NEON_GET_REG(T1, rd, pass);
5450 NEON_SET_REG(T1, rm, pass);
5451 break;
5452 case 33: /* VTRN */
5453 NEON_GET_REG(T1, rd, pass);
5454 switch (size) {
5455 case 0: gen_helper_neon_trn_u8(); break;
5456 case 1: gen_helper_neon_trn_u16(); break;
5457 case 2: abort();
5458 default: return 1;
5459 }
5460 NEON_SET_REG(T1, rm, pass);
5461 break;
5462 case 56: /* Integer VRECPE */
5463 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
5464 break;
5465 case 57: /* Integer VRSQRTE */
5466 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
5467 break;
5468 case 58: /* Float VRECPE */
5469 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5470 break;
5471 case 59: /* Float VRSQRTE */
5472 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5473 break;
5474 case 60: /* VCVT.F32.S32 */
5475 gen_vfp_tosiz(0);
5476 break;
5477 case 61: /* VCVT.F32.U32 */
5478 gen_vfp_touiz(0);
5479 break;
5480 case 62: /* VCVT.S32.F32 */
5481 gen_vfp_sito(0);
5482 break;
5483 case 63: /* VCVT.U32.F32 */
5484 gen_vfp_uito(0);
5485 break;
5486 default:
5487 /* Reserved: 21, 29, 39-56 */
5488 return 1;
5489 }
5490 if (op == 30 || op == 31 || op >= 58) {
5491 tcg_gen_st_f32(cpu_F0s, cpu_env,
5492 neon_reg_offset(rd, pass));
5493 } else {
5494 NEON_SET_REG(T0, rd, pass);
5495 }
5496 }
5497 break;
5498 }
5499 } else if ((insn & (1 << 10)) == 0) {
5500 /* VTBL, VTBX. */
5501 n = ((insn >> 5) & 0x18) + 8;
5502 if (insn & (1 << 6)) {
5503 tmp = neon_load_reg(rd, 0);
5504 } else {
5505 tmp = new_tmp();
5506 tcg_gen_movi_i32(tmp, 0);
5507 }
5508 tmp2 = neon_load_reg(rm, 0);
5509 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5510 tcg_const_i32(n));
5511 dead_tmp(tmp);
5512 if (insn & (1 << 6)) {
5513 tmp = neon_load_reg(rd, 1);
5514 } else {
5515 tmp = new_tmp();
5516 tcg_gen_movi_i32(tmp, 0);
5517 }
5518 tmp3 = neon_load_reg(rm, 1);
5519 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5520 tcg_const_i32(n));
5521 neon_store_reg(rd, 0, tmp2);
5522 neon_store_reg(rd, 1, tmp3);
5523 dead_tmp(tmp);
5524 } else if ((insn & 0x380) == 0) {
5525 /* VDUP */
5526 if (insn & (1 << 19)) {
5527 NEON_SET_REG(T0, rm, 1);
5528 } else {
5529 NEON_SET_REG(T0, rm, 0);
5530 }
5531 if (insn & (1 << 16)) {
5532 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
5533 } else if (insn & (1 << 17)) {
5534 if ((insn >> 18) & 1)
5535 gen_neon_dup_high16(cpu_T[0]);
5536 else
5537 gen_neon_dup_low16(cpu_T[0]);
5538 }
5539 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5540 NEON_SET_REG(T0, rd, pass);
5541 }
5542 } else {
5543 return 1;
5544 }
5545 }
5546 }
5547 return 0;
5548 }
5549
5550 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5551 {
5552 int cpnum;
5553
5554 cpnum = (insn >> 8) & 0xf;
5555 if (arm_feature(env, ARM_FEATURE_XSCALE)
5556 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5557 return 1;
5558
5559 switch (cpnum) {
5560 case 0:
5561 case 1:
5562 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5563 return disas_iwmmxt_insn(env, s, insn);
5564 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5565 return disas_dsp_insn(env, s, insn);
5566 }
5567 return 1;
5568 case 10:
5569 case 11:
5570 return disas_vfp_insn (env, s, insn);
5571 case 15:
5572 return disas_cp15_insn (env, s, insn);
5573 default:
5574 /* Unknown coprocessor. See if the board has hooked it. */
5575 return disas_cp_insn (env, s, insn);
5576 }
5577 }
5578
5579
5580 /* Store a 64-bit value to a register pair. Clobbers val. */
5581 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5582 {
5583 TCGv tmp;
5584 tmp = new_tmp();
5585 tcg_gen_trunc_i64_i32(tmp, val);
5586 store_reg(s, rlow, tmp);
5587 tmp = new_tmp();
5588 tcg_gen_shri_i64(val, val, 32);
5589 tcg_gen_trunc_i64_i32(tmp, val);
5590 store_reg(s, rhigh, tmp);
5591 }
5592
5593 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5594 static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5595 {
5596 TCGv tmp;
5597 TCGv tmp2;
5598
5599 /* Load value and extend to 64 bits. */
5600 tmp = tcg_temp_new(TCG_TYPE_I64);
5601 tmp2 = load_reg(s, rlow);
5602 tcg_gen_extu_i32_i64(tmp, tmp2);
5603 dead_tmp(tmp2);
5604 tcg_gen_add_i64(val, val, tmp);
5605 }
5606
5607 /* load and add a 64-bit value from a register pair. */
5608 static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5609 {
5610 TCGv tmp;
5611 TCGv tmpl;
5612 TCGv tmph;
5613
5614 /* Load 64-bit value rd:rn. */
5615 tmpl = load_reg(s, rlow);
5616 tmph = load_reg(s, rhigh);
5617 tmp = tcg_temp_new(TCG_TYPE_I64);
5618 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5619 dead_tmp(tmpl);
5620 dead_tmp(tmph);
5621 tcg_gen_add_i64(val, val, tmp);
5622 }
5623
5624 /* Set N and Z flags from a 64-bit value. */
5625 static void gen_logicq_cc(TCGv val)
5626 {
5627 TCGv tmp = new_tmp();
5628 gen_helper_logicq_cc(tmp, val);
5629 gen_logic_CC(tmp);
5630 dead_tmp(tmp);
5631 }
5632
5633 static void disas_arm_insn(CPUState * env, DisasContext *s)
5634 {
5635 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
5636 TCGv tmp;
5637 TCGv tmp2;
5638 TCGv tmp3;
5639 TCGv addr;
5640
5641 insn = ldl_code(s->pc);
5642 s->pc += 4;
5643
5644 /* M variants do not implement ARM mode. */
5645 if (IS_M(env))
5646 goto illegal_op;
5647 cond = insn >> 28;
5648 if (cond == 0xf){
5649 /* Unconditional instructions. */
5650 if (((insn >> 25) & 7) == 1) {
5651 /* NEON Data processing. */
5652 if (!arm_feature(env, ARM_FEATURE_NEON))
5653 goto illegal_op;
5654
5655 if (disas_neon_data_insn(env, s, insn))
5656 goto illegal_op;
5657 return;
5658 }
5659 if ((insn & 0x0f100000) == 0x04000000) {
5660 /* NEON load/store. */
5661 if (!arm_feature(env, ARM_FEATURE_NEON))
5662 goto illegal_op;
5663
5664 if (disas_neon_ls_insn(env, s, insn))
5665 goto illegal_op;
5666 return;
5667 }
5668 if ((insn & 0x0d70f000) == 0x0550f000)
5669 return; /* PLD */
5670 else if ((insn & 0x0ffffdff) == 0x01010000) {
5671 ARCH(6);
5672 /* setend */
5673 if (insn & (1 << 9)) {
5674 /* BE8 mode not implemented. */
5675 goto illegal_op;
5676 }
5677 return;
5678 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5679 switch ((insn >> 4) & 0xf) {
5680 case 1: /* clrex */
5681 ARCH(6K);
5682 gen_helper_clrex(cpu_env);
5683 return;
5684 case 4: /* dsb */
5685 case 5: /* dmb */
5686 case 6: /* isb */
5687 ARCH(7);
5688 /* We don't emulate caches so these are a no-op. */
5689 return;
5690 default:
5691 goto illegal_op;
5692 }
5693 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5694 /* srs */
5695 uint32_t offset;
5696 if (IS_USER(s))
5697 goto illegal_op;
5698 ARCH(6);
5699 op1 = (insn & 0x1f);
5700 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5701 addr = load_reg(s, 13);
5702 } else {
5703 addr = new_tmp();
5704 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
5705 }
5706 i = (insn >> 23) & 3;
5707 switch (i) {
5708 case 0: offset = -4; break; /* DA */
5709 case 1: offset = -8; break; /* DB */
5710 case 2: offset = 0; break; /* IA */
5711 case 3: offset = 4; break; /* IB */
5712 default: abort();
5713 }
5714 if (offset)
5715 tcg_gen_addi_i32(addr, addr, offset);
5716 tmp = load_reg(s, 14);
5717 gen_st32(tmp, addr, 0);
5718 tmp = new_tmp();
5719 gen_helper_cpsr_read(tmp);
5720 tcg_gen_addi_i32(addr, addr, 4);
5721 gen_st32(tmp, addr, 0);
5722 if (insn & (1 << 21)) {
5723 /* Base writeback. */
5724 switch (i) {
5725 case 0: offset = -8; break;
5726 case 1: offset = -4; break;
5727 case 2: offset = 4; break;
5728 case 3: offset = 0; break;
5729 default: abort();
5730 }
5731 if (offset)
5732 tcg_gen_addi_i32(addr, tmp, offset);
5733 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5734 gen_movl_reg_T1(s, 13);
5735 } else {
5736 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
5737 }
5738 } else {
5739 dead_tmp(addr);
5740 }
5741 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5742 /* rfe */
5743 uint32_t offset;
5744 if (IS_USER(s))
5745 goto illegal_op;
5746 ARCH(6);
5747 rn = (insn >> 16) & 0xf;
5748 addr = load_reg(s, rn);
5749 i = (insn >> 23) & 3;
5750 switch (i) {
5751 case 0: offset = -4; break; /* DA */
5752 case 1: offset = -8; break; /* DB */
5753 case 2: offset = 0; break; /* IA */
5754 case 3: offset = 4; break; /* IB */
5755 default: abort();
5756 }
5757 if (offset)
5758 tcg_gen_addi_i32(addr, addr, offset);
5759 /* Load PC into tmp and CPSR into tmp2. */
5760 tmp = gen_ld32(addr, 0);
5761 tcg_gen_addi_i32(addr, addr, 4);
5762 tmp2 = gen_ld32(addr, 0);
5763 if (insn & (1 << 21)) {
5764 /* Base writeback. */
5765 switch (i) {
5766 case 0: offset = -8; break;
5767 case 1: offset = -4; break;
5768 case 2: offset = 4; break;
5769 case 3: offset = 0; break;
5770 default: abort();
5771 }
5772 if (offset)
5773 tcg_gen_addi_i32(addr, addr, offset);
5774 store_reg(s, rn, addr);
5775 } else {
5776 dead_tmp(addr);
5777 }
5778 gen_rfe(s, tmp, tmp2);
5779 } else if ((insn & 0x0e000000) == 0x0a000000) {
5780 /* branch link and change to thumb (blx <offset>) */
5781 int32_t offset;
5782
5783 val = (uint32_t)s->pc;
5784 tmp = new_tmp();
5785 tcg_gen_movi_i32(tmp, val);
5786 store_reg(s, 14, tmp);
5787 /* Sign-extend the 24-bit offset */
5788 offset = (((int32_t)insn) << 8) >> 8;
5789 /* offset * 4 + bit24 * 2 + (thumb bit) */
5790 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5791 /* pipeline offset */
5792 val += 4;
5793 gen_bx_im(s, val);
5794 return;
5795 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5796 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5797 /* iWMMXt register transfer. */
5798 if (env->cp15.c15_cpar & (1 << 1))
5799 if (!disas_iwmmxt_insn(env, s, insn))
5800 return;
5801 }
5802 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5803 /* Coprocessor double register transfer. */
5804 } else if ((insn & 0x0f000010) == 0x0e000010) {
5805 /* Additional coprocessor register transfer. */
5806 } else if ((insn & 0x0ff10020) == 0x01000000) {
5807 uint32_t mask;
5808 uint32_t val;
5809 /* cps (privileged) */
5810 if (IS_USER(s))
5811 return;
5812 mask = val = 0;
5813 if (insn & (1 << 19)) {
5814 if (insn & (1 << 8))
5815 mask |= CPSR_A;
5816 if (insn & (1 << 7))
5817 mask |= CPSR_I;
5818 if (insn & (1 << 6))
5819 mask |= CPSR_F;
5820 if (insn & (1 << 18))
5821 val |= mask;
5822 }
5823 if (insn & (1 << 17)) {
5824 mask |= CPSR_M;
5825 val |= (insn & 0x1f);
5826 }
5827 if (mask) {
5828 gen_op_movl_T0_im(val);
5829 gen_set_psr_T0(s, mask, 0);
5830 }
5831 return;
5832 }
5833 goto illegal_op;
5834 }
5835 if (cond != 0xe) {
5836 /* if not always execute, we generate a conditional jump to
5837 next instruction */
5838 s->condlabel = gen_new_label();
5839 gen_test_cc(cond ^ 1, s->condlabel);
5840 s->condjmp = 1;
5841 }
5842 if ((insn & 0x0f900000) == 0x03000000) {
5843 if ((insn & (1 << 21)) == 0) {
5844 ARCH(6T2);
5845 rd = (insn >> 12) & 0xf;
5846 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5847 if ((insn & (1 << 22)) == 0) {
5848 /* MOVW */
5849 tmp = new_tmp();
5850 tcg_gen_movi_i32(tmp, val);
5851 } else {
5852 /* MOVT */
5853 tmp = load_reg(s, rd);
5854 tcg_gen_ext16u_i32(tmp, tmp);
5855 tcg_gen_ori_i32(tmp, tmp, val << 16);
5856 }
5857 store_reg(s, rd, tmp);
5858 } else {
5859 if (((insn >> 12) & 0xf) != 0xf)
5860 goto illegal_op;
5861 if (((insn >> 16) & 0xf) == 0) {
5862 gen_nop_hint(s, insn & 0xff);
5863 } else {
5864 /* CPSR = immediate */
5865 val = insn & 0xff;
5866 shift = ((insn >> 8) & 0xf) * 2;
5867 if (shift)
5868 val = (val >> shift) | (val << (32 - shift));
5869 gen_op_movl_T0_im(val);
5870 i = ((insn & (1 << 22)) != 0);
5871 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5872 goto illegal_op;
5873 }
5874 }
5875 } else if ((insn & 0x0f900000) == 0x01000000
5876 && (insn & 0x00000090) != 0x00000090) {
5877 /* miscellaneous instructions */
5878 op1 = (insn >> 21) & 3;
5879 sh = (insn >> 4) & 0xf;
5880 rm = insn & 0xf;
5881 switch (sh) {
5882 case 0x0: /* move program status register */
5883 if (op1 & 1) {
5884 /* PSR = reg */
5885 gen_movl_T0_reg(s, rm);
5886 i = ((op1 & 2) != 0);
5887 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5888 goto illegal_op;
5889 } else {
5890 /* reg = PSR */
5891 rd = (insn >> 12) & 0xf;
5892 if (op1 & 2) {
5893 if (IS_USER(s))
5894 goto illegal_op;
5895 tmp = load_cpu_field(spsr);
5896 } else {
5897 tmp = new_tmp();
5898 gen_helper_cpsr_read(tmp);
5899 }
5900 store_reg(s, rd, tmp);
5901 }
5902 break;
5903 case 0x1:
5904 if (op1 == 1) {
5905 /* branch/exchange thumb (bx). */
5906 tmp = load_reg(s, rm);
5907 gen_bx(s, tmp);
5908 } else if (op1 == 3) {
5909 /* clz */
5910 rd = (insn >> 12) & 0xf;
5911 tmp = load_reg(s, rm);
5912 gen_helper_clz(tmp, tmp);
5913 store_reg(s, rd, tmp);
5914 } else {
5915 goto illegal_op;
5916 }
5917 break;
5918 case 0x2:
5919 if (op1 == 1) {
5920 ARCH(5J); /* bxj */
5921 /* Trivial implementation equivalent to bx. */
5922 tmp = load_reg(s, rm);
5923 gen_bx(s, tmp);
5924 } else {
5925 goto illegal_op;
5926 }
5927 break;
5928 case 0x3:
5929 if (op1 != 1)
5930 goto illegal_op;
5931
5932 /* branch link/exchange thumb (blx) */
5933 tmp = load_reg(s, rm);
5934 tmp2 = new_tmp();
5935 tcg_gen_movi_i32(tmp2, s->pc);
5936 store_reg(s, 14, tmp2);
5937 gen_bx(s, tmp);
5938 break;
5939 case 0x5: /* saturating add/subtract */
5940 rd = (insn >> 12) & 0xf;
5941 rn = (insn >> 16) & 0xf;
5942 tmp = load_reg(s, rm);
5943 tmp2 = load_reg(s, rn);
5944 if (op1 & 2)
5945 gen_helper_double_saturate(tmp2, tmp2);
5946 if (op1 & 1)
5947 gen_helper_sub_saturate(tmp, tmp, tmp2);
5948 else
5949 gen_helper_add_saturate(tmp, tmp, tmp2);
5950 dead_tmp(tmp2);
5951 store_reg(s, rd, tmp);
5952 break;
5953 case 7: /* bkpt */
5954 gen_set_condexec(s);
5955 gen_set_pc_im(s->pc - 4);
5956 gen_exception(EXCP_BKPT);
5957 s->is_jmp = DISAS_JUMP;
5958 break;
5959 case 0x8: /* signed multiply */
5960 case 0xa:
5961 case 0xc:
5962 case 0xe:
5963 rs = (insn >> 8) & 0xf;
5964 rn = (insn >> 12) & 0xf;
5965 rd = (insn >> 16) & 0xf;
5966 if (op1 == 1) {
5967 /* (32 * 16) >> 16 */
5968 tmp = load_reg(s, rm);
5969 tmp2 = load_reg(s, rs);
5970 if (sh & 4)
5971 tcg_gen_sari_i32(tmp2, tmp2, 16);
5972 else
5973 gen_sxth(tmp2);
5974 tmp2 = gen_muls_i64_i32(tmp, tmp2);
5975 tcg_gen_shri_i64(tmp2, tmp2, 16);
5976 tmp = new_tmp();
5977 tcg_gen_trunc_i64_i32(tmp, tmp2);
5978 if ((sh & 2) == 0) {
5979 tmp2 = load_reg(s, rn);
5980 gen_helper_add_setq(tmp, tmp, tmp2);
5981 dead_tmp(tmp2);
5982 }
5983 store_reg(s, rd, tmp);
5984 } else {
5985 /* 16 * 16 */
5986 tmp = load_reg(s, rm);
5987 tmp2 = load_reg(s, rs);
5988 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5989 dead_tmp(tmp2);
5990 if (op1 == 2) {
5991 tmp2 = tcg_temp_new(TCG_TYPE_I64);
5992 tcg_gen_ext_i32_i64(tmp2, tmp);
5993 dead_tmp(tmp);
5994 gen_addq(s, tmp2, rn, rd);
5995 gen_storeq_reg(s, rn, rd, tmp2);
5996 } else {
5997 if (op1 == 0) {
5998 tmp2 = load_reg(s, rn);
5999 gen_helper_add_setq(tmp, tmp, tmp2);
6000 dead_tmp(tmp2);
6001 }
6002 store_reg(s, rd, tmp);
6003 }
6004 }
6005 break;
6006 default:
6007 goto illegal_op;
6008 }
6009 } else if (((insn & 0x0e000000) == 0 &&
6010 (insn & 0x00000090) != 0x90) ||
6011 ((insn & 0x0e000000) == (1 << 25))) {
6012 int set_cc, logic_cc, shiftop;
6013
6014 op1 = (insn >> 21) & 0xf;
6015 set_cc = (insn >> 20) & 1;
6016 logic_cc = table_logic_cc[op1] & set_cc;
6017
6018 /* data processing instruction */
6019 if (insn & (1 << 25)) {
6020 /* immediate operand */
6021 val = insn & 0xff;
6022 shift = ((insn >> 8) & 0xf) * 2;
6023 if (shift)
6024 val = (val >> shift) | (val << (32 - shift));
6025 gen_op_movl_T1_im(val);
6026 if (logic_cc && shift)
6027 gen_set_CF_bit31(cpu_T[1]);
6028 } else {
6029 /* register */
6030 rm = (insn) & 0xf;
6031 gen_movl_T1_reg(s, rm);
6032 shiftop = (insn >> 5) & 3;
6033 if (!(insn & (1 << 4))) {
6034 shift = (insn >> 7) & 0x1f;
6035 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
6036 } else {
6037 rs = (insn >> 8) & 0xf;
6038 tmp = load_reg(s, rs);
6039 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
6040 }
6041 }
6042 if (op1 != 0x0f && op1 != 0x0d) {
6043 rn = (insn >> 16) & 0xf;
6044 gen_movl_T0_reg(s, rn);
6045 }
6046 rd = (insn >> 12) & 0xf;
6047 switch(op1) {
6048 case 0x00:
6049 gen_op_andl_T0_T1();
6050 gen_movl_reg_T0(s, rd);
6051 if (logic_cc)
6052 gen_op_logic_T0_cc();
6053 break;
6054 case 0x01:
6055 gen_op_xorl_T0_T1();
6056 gen_movl_reg_T0(s, rd);
6057 if (logic_cc)
6058 gen_op_logic_T0_cc();
6059 break;
6060 case 0x02:
6061 if (set_cc && rd == 15) {
6062 /* SUBS r15, ... is used for exception return. */
6063 if (IS_USER(s))
6064 goto illegal_op;
6065 gen_op_subl_T0_T1_cc();
6066 gen_exception_return(s);
6067 } else {
6068 if (set_cc)
6069 gen_op_subl_T0_T1_cc();
6070 else
6071 gen_op_subl_T0_T1();
6072 gen_movl_reg_T0(s, rd);
6073 }
6074 break;
6075 case 0x03:
6076 if (set_cc)
6077 gen_op_rsbl_T0_T1_cc();
6078 else
6079 gen_op_rsbl_T0_T1();
6080 gen_movl_reg_T0(s, rd);
6081 break;
6082 case 0x04:
6083 if (set_cc)
6084 gen_op_addl_T0_T1_cc();
6085 else
6086 gen_op_addl_T0_T1();
6087 gen_movl_reg_T0(s, rd);
6088 break;
6089 case 0x05:
6090 if (set_cc)
6091 gen_op_adcl_T0_T1_cc();
6092 else
6093 gen_adc_T0_T1();
6094 gen_movl_reg_T0(s, rd);
6095 break;
6096 case 0x06:
6097 if (set_cc)
6098 gen_op_sbcl_T0_T1_cc();
6099 else
6100 gen_sbc_T0_T1();
6101 gen_movl_reg_T0(s, rd);
6102 break;
6103 case 0x07:
6104 if (set_cc)
6105 gen_op_rscl_T0_T1_cc();
6106 else
6107 gen_rsc_T0_T1();
6108 gen_movl_reg_T0(s, rd);
6109 break;
6110 case 0x08:
6111 if (set_cc) {
6112 gen_op_andl_T0_T1();
6113 gen_op_logic_T0_cc();
6114 }
6115 break;
6116 case 0x09:
6117 if (set_cc) {
6118 gen_op_xorl_T0_T1();
6119 gen_op_logic_T0_cc();
6120 }
6121 break;
6122 case 0x0a:
6123 if (set_cc) {
6124 gen_op_subl_T0_T1_cc();
6125 }
6126 break;
6127 case 0x0b:
6128 if (set_cc) {
6129 gen_op_addl_T0_T1_cc();
6130 }
6131 break;
6132 case 0x0c:
6133 gen_op_orl_T0_T1();
6134 gen_movl_reg_T0(s, rd);
6135 if (logic_cc)
6136 gen_op_logic_T0_cc();
6137 break;
6138 case 0x0d:
6139 if (logic_cc && rd == 15) {
6140 /* MOVS r15, ... is used for exception return. */
6141 if (IS_USER(s))
6142 goto illegal_op;
6143 gen_op_movl_T0_T1();
6144 gen_exception_return(s);
6145 } else {
6146 gen_movl_reg_T1(s, rd);
6147 if (logic_cc)
6148 gen_op_logic_T1_cc();
6149 }
6150 break;
6151 case 0x0e:
6152 gen_op_bicl_T0_T1();
6153 gen_movl_reg_T0(s, rd);
6154 if (logic_cc)
6155 gen_op_logic_T0_cc();
6156 break;
6157 default:
6158 case 0x0f:
6159 gen_op_notl_T1();
6160 gen_movl_reg_T1(s, rd);
6161 if (logic_cc)
6162 gen_op_logic_T1_cc();
6163 break;
6164 }
6165 } else {
6166 /* other instructions */
6167 op1 = (insn >> 24) & 0xf;
6168 switch(op1) {
6169 case 0x0:
6170 case 0x1:
6171 /* multiplies, extra load/stores */
6172 sh = (insn >> 5) & 3;
6173 if (sh == 0) {
6174 if (op1 == 0x0) {
6175 rd = (insn >> 16) & 0xf;
6176 rn = (insn >> 12) & 0xf;
6177 rs = (insn >> 8) & 0xf;
6178 rm = (insn) & 0xf;
6179 op1 = (insn >> 20) & 0xf;
6180 switch (op1) {
6181 case 0: case 1: case 2: case 3: case 6:
6182 /* 32 bit mul */
6183 tmp = load_reg(s, rs);
6184 tmp2 = load_reg(s, rm);
6185 tcg_gen_mul_i32(tmp, tmp, tmp2);
6186 dead_tmp(tmp2);
6187 if (insn & (1 << 22)) {
6188 /* Subtract (mls) */
6189 ARCH(6T2);
6190 tmp2 = load_reg(s, rn);
6191 tcg_gen_sub_i32(tmp, tmp2, tmp);
6192 dead_tmp(tmp2);
6193 } else if (insn & (1 << 21)) {
6194 /* Add */
6195 tmp2 = load_reg(s, rn);
6196 tcg_gen_add_i32(tmp, tmp, tmp2);
6197 dead_tmp(tmp2);
6198 }
6199 if (insn & (1 << 20))
6200 gen_logic_CC(tmp);
6201 store_reg(s, rd, tmp);
6202 break;
6203 default:
6204 /* 64 bit mul */
6205 tmp = load_reg(s, rs);
6206 tmp2 = load_reg(s, rm);
6207 if (insn & (1 << 22))
6208 tmp = gen_muls_i64_i32(tmp, tmp2);
6209 else
6210 tmp = gen_mulu_i64_i32(tmp, tmp2);
6211 if (insn & (1 << 21)) /* mult accumulate */
6212 gen_addq(s, tmp, rn, rd);
6213 if (!(insn & (1 << 23))) { /* double accumulate */
6214 ARCH(6);
6215 gen_addq_lo(s, tmp, rn);
6216 gen_addq_lo(s, tmp, rd);
6217 }
6218 if (insn & (1 << 20))
6219 gen_logicq_cc(tmp);
6220 gen_storeq_reg(s, rn, rd, tmp);
6221 break;
6222 }
6223 } else {
6224 rn = (insn >> 16) & 0xf;
6225 rd = (insn >> 12) & 0xf;
6226 if (insn & (1 << 23)) {
6227 /* load/store exclusive */
6228 gen_movl_T1_reg(s, rn);
6229 addr = cpu_T[1];
6230 if (insn & (1 << 20)) {
6231 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6232 tmp = gen_ld32(addr, IS_USER(s));
6233 store_reg(s, rd, tmp);
6234 } else {
6235 int label = gen_new_label();
6236 rm = insn & 0xf;
6237 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6238 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6239 0, label);
6240 tmp = load_reg(s,rm);
6241 gen_st32(tmp, cpu_T[1], IS_USER(s));
6242 gen_set_label(label);
6243 gen_movl_reg_T0(s, rd);
6244 }
6245 } else {
6246 /* SWP instruction */
6247 rm = (insn) & 0xf;
6248
6249 /* ??? This is not really atomic. However we know
6250 we never have multiple CPUs running in parallel,
6251 so it is good enough. */
6252 addr = load_reg(s, rn);
6253 tmp = load_reg(s, rm);
6254 if (insn & (1 << 22)) {
6255 tmp2 = gen_ld8u(addr, IS_USER(s));
6256 gen_st8(tmp, addr, IS_USER(s));
6257 } else {
6258 tmp2 = gen_ld32(addr, IS_USER(s));
6259 gen_st32(tmp, addr, IS_USER(s));
6260 }
6261 dead_tmp(addr);
6262 store_reg(s, rd, tmp2);
6263 }
6264 }
6265 } else {
6266 int address_offset;
6267 int load;
6268 /* Misc load/store */
6269 rn = (insn >> 16) & 0xf;
6270 rd = (insn >> 12) & 0xf;
6271 addr = load_reg(s, rn);
6272 if (insn & (1 << 24))
6273 gen_add_datah_offset(s, insn, 0, addr);
6274 address_offset = 0;
6275 if (insn & (1 << 20)) {
6276 /* load */
6277 switch(sh) {
6278 case 1:
6279 tmp = gen_ld16u(addr, IS_USER(s));
6280 break;
6281 case 2:
6282 tmp = gen_ld8s(addr, IS_USER(s));
6283 break;
6284 default:
6285 case 3:
6286 tmp = gen_ld16s(addr, IS_USER(s));
6287 break;
6288 }
6289 load = 1;
6290 } else if (sh & 2) {
6291 /* doubleword */
6292 if (sh & 1) {
6293 /* store */
6294 tmp = load_reg(s, rd);
6295 gen_st32(tmp, addr, IS_USER(s));
6296 tcg_gen_addi_i32(addr, addr, 4);
6297 tmp = load_reg(s, rd + 1);
6298 gen_st32(tmp, addr, IS_USER(s));
6299 load = 0;
6300 } else {
6301 /* load */
6302 tmp = gen_ld32(addr, IS_USER(s));
6303 store_reg(s, rd, tmp);
6304 tcg_gen_addi_i32(addr, addr, 4);
6305 tmp = gen_ld32(addr, IS_USER(s));
6306 rd++;
6307 load = 1;
6308 }
6309 address_offset = -4;
6310 } else {
6311 /* store */
6312 tmp = load_reg(s, rd);
6313 gen_st16(tmp, addr, IS_USER(s));
6314 load = 0;
6315 }
6316 /* Perform base writeback before the loaded value to
6317 ensure correct behavior with overlapping index registers.
6318 ldrd with base writeback is is undefined if the
6319 destination and index registers overlap. */
6320 if (!(insn & (1 << 24))) {
6321 gen_add_datah_offset(s, insn, address_offset, addr);
6322 store_reg(s, rn, addr);
6323 } else if (insn & (1 << 21)) {
6324 if (address_offset)
6325 tcg_gen_addi_i32(addr, addr, address_offset);
6326 store_reg(s, rn, addr);
6327 } else {
6328 dead_tmp(addr);
6329 }
6330 if (load) {
6331 /* Complete the load. */
6332 store_reg(s, rd, tmp);
6333 }
6334 }
6335 break;
6336 case 0x4:
6337 case 0x5:
6338 goto do_ldst;
6339 case 0x6:
6340 case 0x7:
6341 if (insn & (1 << 4)) {
6342 ARCH(6);
6343 /* Armv6 Media instructions. */
6344 rm = insn & 0xf;
6345 rn = (insn >> 16) & 0xf;
6346 rd = (insn >> 12) & 0xf;
6347 rs = (insn >> 8) & 0xf;
6348 switch ((insn >> 23) & 3) {
6349 case 0: /* Parallel add/subtract. */
6350 op1 = (insn >> 20) & 7;
6351 tmp = load_reg(s, rn);
6352 tmp2 = load_reg(s, rm);
6353 sh = (insn >> 5) & 7;
6354 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6355 goto illegal_op;
6356 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6357 dead_tmp(tmp2);
6358 store_reg(s, rd, tmp);
6359 break;
6360 case 1:
6361 if ((insn & 0x00700020) == 0) {
6362 /* Halfword pack. */
6363 tmp = load_reg(s, rn);
6364 tmp2 = load_reg(s, rm);
6365 shift = (insn >> 7) & 0x1f;
6366 if (insn & (1 << 6)) {
6367 /* pkhtb */
6368 if (shift == 0)
6369 shift = 31;
6370 tcg_gen_sari_i32(tmp2, tmp2, shift);
6371 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6372 tcg_gen_ext16u_i32(tmp2, tmp2);
6373 } else {
6374 /* pkhbt */
6375 if (shift)
6376 tcg_gen_shli_i32(tmp2, tmp2, shift);
6377 tcg_gen_ext16u_i32(tmp, tmp);
6378 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6379 }
6380 tcg_gen_or_i32(tmp, tmp, tmp2);
6381 dead_tmp(tmp2);
6382 store_reg(s, rd, tmp);
6383 } else if ((insn & 0x00200020) == 0x00200000) {
6384 /* [us]sat */
6385 tmp = load_reg(s, rm);
6386 shift = (insn >> 7) & 0x1f;
6387 if (insn & (1 << 6)) {
6388 if (shift == 0)
6389 shift = 31;
6390 tcg_gen_sari_i32(tmp, tmp, shift);
6391 } else {
6392 tcg_gen_shli_i32(tmp, tmp, shift);
6393 }
6394 sh = (insn >> 16) & 0x1f;
6395 if (sh != 0) {
6396 if (insn & (1 << 22))
6397 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
6398 else
6399 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
6400 }
6401 store_reg(s, rd, tmp);
6402 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6403 /* [us]sat16 */
6404 tmp = load_reg(s, rm);
6405 sh = (insn >> 16) & 0x1f;
6406 if (sh != 0) {
6407 if (insn & (1 << 22))
6408 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
6409 else
6410 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
6411 }
6412 store_reg(s, rd, tmp);
6413 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6414 /* Select bytes. */
6415 tmp = load_reg(s, rn);
6416 tmp2 = load_reg(s, rm);
6417 tmp3 = new_tmp();
6418 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6419 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6420 dead_tmp(tmp3);
6421 dead_tmp(tmp2);
6422 store_reg(s, rd, tmp);
6423 } else if ((insn & 0x000003e0) == 0x00000060) {
6424 tmp = load_reg(s, rm);
6425 shift = (insn >> 10) & 3;
6426 /* ??? In many cases it's not neccessary to do a
6427 rotate, a shift is sufficient. */
6428 if (shift != 0)
6429 tcg_gen_rori_i32(tmp, tmp, shift * 8);
6430 op1 = (insn >> 20) & 7;
6431 switch (op1) {
6432 case 0: gen_sxtb16(tmp); break;
6433 case 2: gen_sxtb(tmp); break;
6434 case 3: gen_sxth(tmp); break;
6435 case 4: gen_uxtb16(tmp); break;
6436 case 6: gen_uxtb(tmp); break;
6437 case 7: gen_uxth(tmp); break;
6438 default: goto illegal_op;
6439 }
6440 if (rn != 15) {
6441 tmp2 = load_reg(s, rn);
6442 if ((op1 & 3) == 0) {
6443 gen_add16(tmp, tmp2);
6444 } else {
6445 tcg_gen_add_i32(tmp, tmp, tmp2);
6446 dead_tmp(tmp2);
6447 }
6448 }
6449 store_reg(s, rd, tmp);
6450 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6451 /* rev */
6452 tmp = load_reg(s, rm);
6453 if (insn & (1 << 22)) {
6454 if (insn & (1 << 7)) {
6455 gen_revsh(tmp);
6456 } else {
6457 ARCH(6T2);
6458 gen_helper_rbit(tmp, tmp);
6459 }
6460 } else {
6461 if (insn & (1 << 7))
6462 gen_rev16(tmp);
6463 else
6464 tcg_gen_bswap_i32(tmp, tmp);
6465 }
6466 store_reg(s, rd, tmp);
6467 } else {
6468 goto illegal_op;
6469 }
6470 break;
6471 case 2: /* Multiplies (Type 3). */
6472 tmp = load_reg(s, rm);
6473 tmp2 = load_reg(s, rs);
6474 if (insn & (1 << 20)) {
6475 /* Signed multiply most significant [accumulate]. */
6476 tmp2 = gen_muls_i64_i32(tmp, tmp2);
6477 if (insn & (1 << 5))
6478 tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6479 tcg_gen_shri_i64(tmp2, tmp2, 32);
6480 tmp = new_tmp();
6481 tcg_gen_trunc_i64_i32(tmp, tmp2);
6482 if (rn != 15) {
6483 tmp2 = load_reg(s, rn);
6484 if (insn & (1 << 6)) {
6485 tcg_gen_sub_i32(tmp, tmp, tmp2);
6486 } else {
6487 tcg_gen_add_i32(tmp, tmp, tmp2);
6488 }
6489 dead_tmp(tmp2);
6490 }
6491 store_reg(s, rd, tmp);
6492 } else {
6493 if (insn & (1 << 5))
6494 gen_swap_half(tmp2);
6495 gen_smul_dual(tmp, tmp2);
6496 /* This addition cannot overflow. */
6497 if (insn & (1 << 6)) {
6498 tcg_gen_sub_i32(tmp, tmp, tmp2);
6499 } else {
6500 tcg_gen_add_i32(tmp, tmp, tmp2);
6501 }
6502 dead_tmp(tmp2);
6503 if (insn & (1 << 22)) {
6504 /* smlald, smlsld */
6505 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6506 tcg_gen_ext_i32_i64(tmp2, tmp);
6507 dead_tmp(tmp);
6508 gen_addq(s, tmp2, rd, rn);
6509 gen_storeq_reg(s, rd, rn, tmp2);
6510 } else {
6511 /* smuad, smusd, smlad, smlsd */
6512 if (rd != 15)
6513 {
6514 tmp2 = load_reg(s, rd);
6515 gen_helper_add_setq(tmp, tmp, tmp2);
6516 dead_tmp(tmp2);
6517 }
6518 store_reg(s, rn, tmp);
6519 }
6520 }
6521 break;
6522 case 3:
6523 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6524 switch (op1) {
6525 case 0: /* Unsigned sum of absolute differences. */
6526 ARCH(6);
6527 tmp = load_reg(s, rm);
6528 tmp2 = load_reg(s, rs);
6529 gen_helper_usad8(tmp, tmp, tmp2);
6530 dead_tmp(tmp2);
6531 if (rn != 15) {
6532 tmp2 = load_reg(s, rn);
6533 tcg_gen_add_i32(tmp, tmp, tmp2);
6534 dead_tmp(tmp2);
6535 }
6536 store_reg(s, rd, tmp);
6537 break;
6538 case 0x20: case 0x24: case 0x28: case 0x2c:
6539 /* Bitfield insert/clear. */
6540 ARCH(6T2);
6541 shift = (insn >> 7) & 0x1f;
6542 i = (insn >> 16) & 0x1f;
6543 i = i + 1 - shift;
6544 if (rm == 15) {
6545 tmp = new_tmp();
6546 tcg_gen_movi_i32(tmp, 0);
6547 } else {
6548 tmp = load_reg(s, rm);
6549 }
6550 if (i != 32) {
6551 tmp2 = load_reg(s, rd);
6552 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
6553 dead_tmp(tmp2);
6554 }
6555 store_reg(s, rd, tmp);
6556 break;
6557 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6558 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6559 tmp = load_reg(s, rm);
6560 shift = (insn >> 7) & 0x1f;
6561 i = ((insn >> 16) & 0x1f) + 1;
6562 if (shift + i > 32)
6563 goto illegal_op;
6564 if (i < 32) {
6565 if (op1 & 0x20) {
6566 gen_ubfx(tmp, shift, (1u << i) - 1);
6567 } else {
6568 gen_sbfx(tmp, shift, i);
6569 }
6570 }
6571 store_reg(s, rd, tmp);
6572 break;
6573 default:
6574 goto illegal_op;
6575 }
6576 break;
6577 }
6578 break;
6579 }
6580 do_ldst:
6581 /* Check for undefined extension instructions
6582 * per the ARM Bible IE:
6583 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6584 */
6585 sh = (0xf << 20) | (0xf << 4);
6586 if (op1 == 0x7 && ((insn & sh) == sh))
6587 {
6588 goto illegal_op;
6589 }
6590 /* load/store byte/word */
6591 rn = (insn >> 16) & 0xf;
6592 rd = (insn >> 12) & 0xf;
6593 tmp2 = load_reg(s, rn);
6594 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6595 if (insn & (1 << 24))
6596 gen_add_data_offset(s, insn, tmp2);
6597 if (insn & (1 << 20)) {
6598 /* load */
6599 s->is_mem = 1;
6600 if (insn & (1 << 22)) {
6601 tmp = gen_ld8u(tmp2, i);
6602 } else {
6603 tmp = gen_ld32(tmp2, i);
6604 }
6605 } else {
6606 /* store */
6607 tmp = load_reg(s, rd);
6608 if (insn & (1 << 22))
6609 gen_st8(tmp, tmp2, i);
6610 else
6611 gen_st32(tmp, tmp2, i);
6612 }
6613 if (!(insn & (1 << 24))) {
6614 gen_add_data_offset(s, insn, tmp2);
6615 store_reg(s, rn, tmp2);
6616 } else if (insn & (1 << 21)) {
6617 store_reg(s, rn, tmp2);
6618 } else {
6619 dead_tmp(tmp2);
6620 }
6621 if (insn & (1 << 20)) {
6622 /* Complete the load. */
6623 if (rd == 15)
6624 gen_bx(s, tmp);
6625 else
6626 store_reg(s, rd, tmp);
6627 }
6628 break;
6629 case 0x08:
6630 case 0x09:
6631 {
6632 int j, n, user, loaded_base;
6633 TCGv loaded_var;
6634 /* load/store multiple words */
6635 /* XXX: store correct base if write back */
6636 user = 0;
6637 if (insn & (1 << 22)) {
6638 if (IS_USER(s))
6639 goto illegal_op; /* only usable in supervisor mode */
6640
6641 if ((insn & (1 << 15)) == 0)
6642 user = 1;
6643 }
6644 rn = (insn >> 16) & 0xf;
6645 addr = load_reg(s, rn);
6646
6647 /* compute total size */
6648 loaded_base = 0;
6649 TCGV_UNUSED(loaded_var);
6650 n = 0;
6651 for(i=0;i<16;i++) {
6652 if (insn & (1 << i))
6653 n++;
6654 }
6655 /* XXX: test invalid n == 0 case ? */
6656 if (insn & (1 << 23)) {
6657 if (insn & (1 << 24)) {
6658 /* pre increment */
6659 tcg_gen_addi_i32(addr, addr, 4);
6660 } else {
6661 /* post increment */
6662 }
6663 } else {
6664 if (insn & (1 << 24)) {
6665 /* pre decrement */
6666 tcg_gen_addi_i32(addr, addr, -(n * 4));
6667 } else {
6668 /* post decrement */
6669 if (n != 1)
6670 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6671 }
6672 }
6673 j = 0;
6674 for(i=0;i<16;i++) {
6675 if (insn & (1 << i)) {
6676 if (insn & (1 << 20)) {
6677 /* load */
6678 tmp = gen_ld32(addr, IS_USER(s));
6679 if (i == 15) {
6680 gen_bx(s, tmp);
6681 } else if (user) {
6682 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6683 dead_tmp(tmp);
6684 } else if (i == rn) {
6685 loaded_var = tmp;
6686 loaded_base = 1;
6687 } else {
6688 store_reg(s, i, tmp);
6689 }
6690 } else {
6691 /* store */
6692 if (i == 15) {
6693 /* special case: r15 = PC + 8 */
6694 val = (long)s->pc + 4;
6695 tmp = new_tmp();
6696 tcg_gen_movi_i32(tmp, val);
6697 } else if (user) {
6698 tmp = new_tmp();
6699 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
6700 } else {
6701 tmp = load_reg(s, i);
6702 }
6703 gen_st32(tmp, addr, IS_USER(s));
6704 }
6705 j++;
6706 /* no need to add after the last transfer */
6707 if (j != n)
6708 tcg_gen_addi_i32(addr, addr, 4);
6709 }
6710 }
6711 if (insn & (1 << 21)) {
6712 /* write back */
6713 if (insn & (1 << 23)) {
6714 if (insn & (1 << 24)) {
6715 /* pre increment */
6716 } else {
6717 /* post increment */
6718 tcg_gen_addi_i32(addr, addr, 4);
6719 }
6720 } else {
6721 if (insn & (1 << 24)) {
6722 /* pre decrement */
6723 if (n != 1)
6724 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
6725 } else {
6726 /* post decrement */
6727 tcg_gen_addi_i32(addr, addr, -(n * 4));
6728 }
6729 }
6730 store_reg(s, rn, addr);
6731 } else {
6732 dead_tmp(addr);
6733 }
6734 if (loaded_base) {
6735 store_reg(s, rn, loaded_var);
6736 }
6737 if ((insn & (1 << 22)) && !user) {
6738 /* Restore CPSR from SPSR. */
6739 tmp = load_cpu_field(spsr);
6740 gen_set_cpsr(tmp, 0xffffffff);
6741 dead_tmp(tmp);
6742 s->is_jmp = DISAS_UPDATE;
6743 }
6744 }
6745 break;
6746 case 0xa:
6747 case 0xb:
6748 {
6749 int32_t offset;
6750
6751 /* branch (and link) */
6752 val = (int32_t)s->pc;
6753 if (insn & (1 << 24)) {
6754 tmp = new_tmp();
6755 tcg_gen_movi_i32(tmp, val);
6756 store_reg(s, 14, tmp);
6757 }
6758 offset = (((int32_t)insn << 8) >> 8);
6759 val += (offset << 2) + 4;
6760 gen_jmp(s, val);
6761 }
6762 break;
6763 case 0xc:
6764 case 0xd:
6765 case 0xe:
6766 /* Coprocessor. */
6767 if (disas_coproc_insn(env, s, insn))
6768 goto illegal_op;
6769 break;
6770 case 0xf:
6771 /* swi */
6772 gen_set_pc_im(s->pc);
6773 s->is_jmp = DISAS_SWI;
6774 break;
6775 default:
6776 illegal_op:
6777 gen_set_condexec(s);
6778 gen_set_pc_im(s->pc - 4);
6779 gen_exception(EXCP_UDEF);
6780 s->is_jmp = DISAS_JUMP;
6781 break;
6782 }
6783 }
6784 }
6785
6786 /* Return true if this is a Thumb-2 logical op. */
6787 static int
6788 thumb2_logic_op(int op)
6789 {
6790 return (op < 8);
6791 }
6792
6793 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6794 then set condition code flags based on the result of the operation.
6795 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6796 to the high bit of T1.
6797 Returns zero if the opcode is valid. */
6798
6799 static int
6800 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6801 {
6802 int logic_cc;
6803
6804 logic_cc = 0;
6805 switch (op) {
6806 case 0: /* and */
6807 gen_op_andl_T0_T1();
6808 logic_cc = conds;
6809 break;
6810 case 1: /* bic */
6811 gen_op_bicl_T0_T1();
6812 logic_cc = conds;
6813 break;
6814 case 2: /* orr */
6815 gen_op_orl_T0_T1();
6816 logic_cc = conds;
6817 break;
6818 case 3: /* orn */
6819 gen_op_notl_T1();
6820 gen_op_orl_T0_T1();
6821 logic_cc = conds;
6822 break;
6823 case 4: /* eor */
6824 gen_op_xorl_T0_T1();
6825 logic_cc = conds;
6826 break;
6827 case 8: /* add */
6828 if (conds)
6829 gen_op_addl_T0_T1_cc();
6830 else
6831 gen_op_addl_T0_T1();
6832 break;
6833 case 10: /* adc */
6834 if (conds)
6835 gen_op_adcl_T0_T1_cc();
6836 else
6837 gen_adc_T0_T1();
6838 break;
6839 case 11: /* sbc */
6840 if (conds)
6841 gen_op_sbcl_T0_T1_cc();
6842 else
6843 gen_sbc_T0_T1();
6844 break;
6845 case 13: /* sub */
6846 if (conds)
6847 gen_op_subl_T0_T1_cc();
6848 else
6849 gen_op_subl_T0_T1();
6850 break;
6851 case 14: /* rsb */
6852 if (conds)
6853 gen_op_rsbl_T0_T1_cc();
6854 else
6855 gen_op_rsbl_T0_T1();
6856 break;
6857 default: /* 5, 6, 7, 9, 12, 15. */
6858 return 1;
6859 }
6860 if (logic_cc) {
6861 gen_op_logic_T0_cc();
6862 if (shifter_out)
6863 gen_set_CF_bit31(cpu_T[1]);
6864 }
6865 return 0;
6866 }
6867
6868 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6869 is not legal. */
6870 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6871 {
6872 uint32_t insn, imm, shift, offset;
6873 uint32_t rd, rn, rm, rs;
6874 TCGv tmp;
6875 TCGv tmp2;
6876 TCGv tmp3;
6877 TCGv addr;
6878 int op;
6879 int shiftop;
6880 int conds;
6881 int logic_cc;
6882
6883 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6884 || arm_feature (env, ARM_FEATURE_M))) {
6885 /* Thumb-1 cores may need to treat bl and blx as a pair of
6886 16-bit instructions to get correct prefetch abort behavior. */
6887 insn = insn_hw1;
6888 if ((insn & (1 << 12)) == 0) {
6889 /* Second half of blx. */
6890 offset = ((insn & 0x7ff) << 1);
6891 tmp = load_reg(s, 14);
6892 tcg_gen_addi_i32(tmp, tmp, offset);
6893 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
6894
6895 tmp2 = new_tmp();
6896 tcg_gen_movi_i32(tmp2, s->pc | 1);
6897 store_reg(s, 14, tmp2);
6898 gen_bx(s, tmp);
6899 return 0;
6900 }
6901 if (insn & (1 << 11)) {
6902 /* Second half of bl. */
6903 offset = ((insn & 0x7ff) << 1) | 1;
6904 tmp = load_reg(s, 14);
6905 tcg_gen_addi_i32(tmp, tmp, offset);
6906
6907 tmp2 = new_tmp();
6908 tcg_gen_movi_i32(tmp2, s->pc | 1);
6909 store_reg(s, 14, tmp2);
6910 gen_bx(s, tmp);
6911 return 0;
6912 }
6913 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6914 /* Instruction spans a page boundary. Implement it as two
6915 16-bit instructions in case the second half causes an
6916 prefetch abort. */
6917 offset = ((int32_t)insn << 21) >> 9;
6918 gen_op_movl_T0_im(s->pc + 2 + offset);
6919 gen_movl_reg_T0(s, 14);
6920 return 0;
6921 }
6922 /* Fall through to 32-bit decode. */
6923 }
6924
6925 insn = lduw_code(s->pc);
6926 s->pc += 2;
6927 insn |= (uint32_t)insn_hw1 << 16;
6928
6929 if ((insn & 0xf800e800) != 0xf000e800) {
6930 ARCH(6T2);
6931 }
6932
6933 rn = (insn >> 16) & 0xf;
6934 rs = (insn >> 12) & 0xf;
6935 rd = (insn >> 8) & 0xf;
6936 rm = insn & 0xf;
6937 switch ((insn >> 25) & 0xf) {
6938 case 0: case 1: case 2: case 3:
6939 /* 16-bit instructions. Should never happen. */
6940 abort();
6941 case 4:
6942 if (insn & (1 << 22)) {
6943 /* Other load/store, table branch. */
6944 if (insn & 0x01200000) {
6945 /* Load/store doubleword. */
6946 if (rn == 15) {
6947 addr = new_tmp();
6948 tcg_gen_movi_i32(addr, s->pc & ~3);
6949 } else {
6950 addr = load_reg(s, rn);
6951 }
6952 offset = (insn & 0xff) * 4;
6953 if ((insn & (1 << 23)) == 0)
6954 offset = -offset;
6955 if (insn & (1 << 24)) {
6956 tcg_gen_addi_i32(addr, addr, offset);
6957 offset = 0;
6958 }
6959 if (insn & (1 << 20)) {
6960 /* ldrd */
6961 tmp = gen_ld32(addr, IS_USER(s));
6962 store_reg(s, rs, tmp);
6963 tcg_gen_addi_i32(addr, addr, 4);
6964 tmp = gen_ld32(addr, IS_USER(s));
6965 store_reg(s, rd, tmp);
6966 } else {
6967 /* strd */
6968 tmp = load_reg(s, rs);
6969 gen_st32(tmp, addr, IS_USER(s));
6970 tcg_gen_addi_i32(addr, addr, 4);
6971 tmp = load_reg(s, rd);
6972 gen_st32(tmp, addr, IS_USER(s));
6973 }
6974 if (insn & (1 << 21)) {
6975 /* Base writeback. */
6976 if (rn == 15)
6977 goto illegal_op;
6978 tcg_gen_addi_i32(addr, addr, offset - 4);
6979 store_reg(s, rn, addr);
6980 } else {
6981 dead_tmp(addr);
6982 }
6983 } else if ((insn & (1 << 23)) == 0) {
6984 /* Load/store exclusive word. */
6985 gen_movl_T1_reg(s, rn);
6986 addr = cpu_T[1];
6987 if (insn & (1 << 20)) {
6988 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6989 tmp = gen_ld32(addr, IS_USER(s));
6990 store_reg(s, rd, tmp);
6991 } else {
6992 int label = gen_new_label();
6993 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6994 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6995 0, label);
6996 tmp = load_reg(s, rs);
6997 gen_st32(tmp, cpu_T[1], IS_USER(s));
6998 gen_set_label(label);
6999 gen_movl_reg_T0(s, rd);
7000 }
7001 } else if ((insn & (1 << 6)) == 0) {
7002 /* Table Branch. */
7003 if (rn == 15) {
7004 addr = new_tmp();
7005 tcg_gen_movi_i32(addr, s->pc);
7006 } else {
7007 addr = load_reg(s, rn);
7008 }
7009 tmp = load_reg(s, rm);
7010 tcg_gen_add_i32(addr, addr, tmp);
7011 if (insn & (1 << 4)) {
7012 /* tbh */
7013 tcg_gen_add_i32(addr, addr, tmp);
7014 dead_tmp(tmp);
7015 tmp = gen_ld16u(addr, IS_USER(s));
7016 } else { /* tbb */
7017 dead_tmp(tmp);
7018 tmp = gen_ld8u(addr, IS_USER(s));
7019 }
7020 dead_tmp(addr);
7021 tcg_gen_shli_i32(tmp, tmp, 1);
7022 tcg_gen_addi_i32(tmp, tmp, s->pc);
7023 store_reg(s, 15, tmp);
7024 } else {
7025 /* Load/store exclusive byte/halfword/doubleword. */
7026 /* ??? These are not really atomic. However we know
7027 we never have multiple CPUs running in parallel,
7028 so it is good enough. */
7029 op = (insn >> 4) & 0x3;
7030 /* Must use a global reg for the address because we have
7031 a conditional branch in the store instruction. */
7032 gen_movl_T1_reg(s, rn);
7033 addr = cpu_T[1];
7034 if (insn & (1 << 20)) {
7035 gen_helper_mark_exclusive(cpu_env, addr);
7036 switch (op) {
7037 case 0:
7038 tmp = gen_ld8u(addr, IS_USER(s));
7039 break;
7040 case 1:
7041 tmp = gen_ld16u(addr, IS_USER(s));
7042 break;
7043 case 3:
7044 tmp = gen_ld32(addr, IS_USER(s));
7045 tcg_gen_addi_i32(addr, addr, 4);
7046 tmp2 = gen_ld32(addr, IS_USER(s));
7047 store_reg(s, rd, tmp2);
7048 break;
7049 default:
7050 goto illegal_op;
7051 }
7052 store_reg(s, rs, tmp);
7053 } else {
7054 int label = gen_new_label();
7055 /* Must use a global that is not killed by the branch. */
7056 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7057 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
7058 tmp = load_reg(s, rs);
7059 switch (op) {
7060 case 0:
7061 gen_st8(tmp, addr, IS_USER(s));
7062 break;
7063 case 1:
7064 gen_st16(tmp, addr, IS_USER(s));
7065 break;
7066 case 3:
7067 gen_st32(tmp, addr, IS_USER(s));
7068 tcg_gen_addi_i32(addr, addr, 4);
7069 tmp = load_reg(s, rd);
7070 gen_st32(tmp, addr, IS_USER(s));
7071 break;
7072 default:
7073 goto illegal_op;
7074 }
7075 gen_set_label(label);
7076 gen_movl_reg_T0(s, rm);
7077 }
7078 }
7079 } else {
7080 /* Load/store multiple, RFE, SRS. */
7081 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7082 /* Not available in user mode. */
7083 if (IS_USER(s))
7084 goto illegal_op;
7085 if (insn & (1 << 20)) {
7086 /* rfe */
7087 addr = load_reg(s, rn);
7088 if ((insn & (1 << 24)) == 0)
7089 tcg_gen_addi_i32(addr, addr, -8);
7090 /* Load PC into tmp and CPSR into tmp2. */
7091 tmp = gen_ld32(addr, 0);
7092 tcg_gen_addi_i32(addr, addr, 4);
7093 tmp2 = gen_ld32(addr, 0);
7094 if (insn & (1 << 21)) {
7095 /* Base writeback. */
7096 if (insn & (1 << 24)) {
7097 tcg_gen_addi_i32(addr, addr, 4);
7098 } else {
7099 tcg_gen_addi_i32(addr, addr, -4);
7100 }
7101 store_reg(s, rn, addr);
7102 } else {
7103 dead_tmp(addr);
7104 }
7105 gen_rfe(s, tmp, tmp2);
7106 } else {
7107 /* srs */
7108 op = (insn & 0x1f);
7109 if (op == (env->uncached_cpsr & CPSR_M)) {
7110 addr = load_reg(s, 13);
7111 } else {
7112 addr = new_tmp();
7113 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
7114 }
7115 if ((insn & (1 << 24)) == 0) {
7116 tcg_gen_addi_i32(addr, addr, -8);
7117 }
7118 tmp = load_reg(s, 14);
7119 gen_st32(tmp, addr, 0);
7120 tcg_gen_addi_i32(addr, addr, 4);
7121 tmp = new_tmp();
7122 gen_helper_cpsr_read(tmp);
7123 gen_st32(tmp, addr, 0);
7124 if (insn & (1 << 21)) {
7125 if ((insn & (1 << 24)) == 0) {
7126 tcg_gen_addi_i32(addr, addr, -4);
7127 } else {
7128 tcg_gen_addi_i32(addr, addr, 4);
7129 }
7130 if (op == (env->uncached_cpsr & CPSR_M)) {
7131 store_reg(s, 13, addr);
7132 } else {
7133 gen_helper_set_r13_banked(cpu_env,
7134 tcg_const_i32(op), addr);
7135 }
7136 } else {
7137 dead_tmp(addr);
7138 }
7139 }
7140 } else {
7141 int i;
7142 /* Load/store multiple. */
7143 addr = load_reg(s, rn);
7144 offset = 0;
7145 for (i = 0; i < 16; i++) {
7146 if (insn & (1 << i))
7147 offset += 4;
7148 }
7149 if (insn & (1 << 24)) {
7150 tcg_gen_addi_i32(addr, addr, -offset);
7151 }
7152
7153 for (i = 0; i < 16; i++) {
7154 if ((insn & (1 << i)) == 0)
7155 continue;
7156 if (insn & (1 << 20)) {
7157 /* Load. */
7158 tmp = gen_ld32(addr, IS_USER(s));
7159 if (i == 15) {
7160 gen_bx(s, tmp);
7161 } else {
7162 store_reg(s, i, tmp);
7163 }
7164 } else {
7165 /* Store. */
7166 tmp = load_reg(s, i);
7167 gen_st32(tmp, addr, IS_USER(s));
7168 }
7169 tcg_gen_addi_i32(addr, addr, 4);
7170 }
7171 if (insn & (1 << 21)) {
7172 /* Base register writeback. */
7173 if (insn & (1 << 24)) {
7174 tcg_gen_addi_i32(addr, addr, -offset);
7175 }
7176 /* Fault if writeback register is in register list. */
7177 if (insn & (1 << rn))
7178 goto illegal_op;
7179 store_reg(s, rn, addr);
7180 } else {
7181 dead_tmp(addr);
7182 }
7183 }
7184 }
7185 break;
7186 case 5: /* Data processing register constant shift. */
7187 if (rn == 15)
7188 gen_op_movl_T0_im(0);
7189 else
7190 gen_movl_T0_reg(s, rn);
7191 gen_movl_T1_reg(s, rm);
7192 op = (insn >> 21) & 0xf;
7193 shiftop = (insn >> 4) & 3;
7194 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7195 conds = (insn & (1 << 20)) != 0;
7196 logic_cc = (conds && thumb2_logic_op(op));
7197 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
7198 if (gen_thumb2_data_op(s, op, conds, 0))
7199 goto illegal_op;
7200 if (rd != 15)
7201 gen_movl_reg_T0(s, rd);
7202 break;
7203 case 13: /* Misc data processing. */
7204 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7205 if (op < 4 && (insn & 0xf000) != 0xf000)
7206 goto illegal_op;
7207 switch (op) {
7208 case 0: /* Register controlled shift. */
7209 tmp = load_reg(s, rn);
7210 tmp2 = load_reg(s, rm);
7211 if ((insn & 0x70) != 0)
7212 goto illegal_op;
7213 op = (insn >> 21) & 3;
7214 logic_cc = (insn & (1 << 20)) != 0;
7215 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7216 if (logic_cc)
7217 gen_logic_CC(tmp);
7218 store_reg(s, rd, tmp);
7219 break;
7220 case 1: /* Sign/zero extend. */
7221 tmp = load_reg(s, rm);
7222 shift = (insn >> 4) & 3;
7223 /* ??? In many cases it's not neccessary to do a
7224 rotate, a shift is sufficient. */
7225 if (shift != 0)
7226 tcg_gen_rori_i32(tmp, tmp, shift * 8);
7227 op = (insn >> 20) & 7;
7228 switch (op) {
7229 case 0: gen_sxth(tmp); break;
7230 case 1: gen_uxth(tmp); break;
7231 case 2: gen_sxtb16(tmp); break;
7232 case 3: gen_uxtb16(tmp); break;
7233 case 4: gen_sxtb(tmp); break;
7234 case 5: gen_uxtb(tmp); break;
7235 default: goto illegal_op;
7236 }
7237 if (rn != 15) {
7238 tmp2 = load_reg(s, rn);
7239 if ((op >> 1) == 1) {
7240 gen_add16(tmp, tmp2);
7241 } else {
7242 tcg_gen_add_i32(tmp, tmp, tmp2);
7243 dead_tmp(tmp2);
7244 }
7245 }
7246 store_reg(s, rd, tmp);
7247 break;
7248 case 2: /* SIMD add/subtract. */
7249 op = (insn >> 20) & 7;
7250 shift = (insn >> 4) & 7;
7251 if ((op & 3) == 3 || (shift & 3) == 3)
7252 goto illegal_op;
7253 tmp = load_reg(s, rn);
7254 tmp2 = load_reg(s, rm);
7255 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7256 dead_tmp(tmp2);
7257 store_reg(s, rd, tmp);
7258 break;
7259 case 3: /* Other data processing. */
7260 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7261 if (op < 4) {
7262 /* Saturating add/subtract. */
7263 tmp = load_reg(s, rn);
7264 tmp2 = load_reg(s, rm);
7265 if (op & 2)
7266 gen_helper_double_saturate(tmp, tmp);
7267 if (op & 1)
7268 gen_helper_sub_saturate(tmp, tmp2, tmp);
7269 else
7270 gen_helper_add_saturate(tmp, tmp, tmp2);
7271 dead_tmp(tmp2);
7272 } else {
7273 tmp = load_reg(s, rn);
7274 switch (op) {
7275 case 0x0a: /* rbit */
7276 gen_helper_rbit(tmp, tmp);
7277 break;
7278 case 0x08: /* rev */
7279 tcg_gen_bswap_i32(tmp, tmp);
7280 break;
7281 case 0x09: /* rev16 */
7282 gen_rev16(tmp);
7283 break;
7284 case 0x0b: /* revsh */
7285 gen_revsh(tmp);
7286 break;
7287 case 0x10: /* sel */
7288 tmp2 = load_reg(s, rm);
7289 tmp3 = new_tmp();
7290 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7291 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7292 dead_tmp(tmp3);
7293 dead_tmp(tmp2);
7294 break;
7295 case 0x18: /* clz */
7296 gen_helper_clz(tmp, tmp);
7297 break;
7298 default:
7299 goto illegal_op;
7300 }
7301 }
7302 store_reg(s, rd, tmp);
7303 break;
7304 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7305 op = (insn >> 4) & 0xf;
7306 tmp = load_reg(s, rn);
7307 tmp2 = load_reg(s, rm);
7308 switch ((insn >> 20) & 7) {
7309 case 0: /* 32 x 32 -> 32 */
7310 tcg_gen_mul_i32(tmp, tmp, tmp2);
7311 dead_tmp(tmp2);
7312 if (rs != 15) {
7313 tmp2 = load_reg(s, rs);
7314 if (op)
7315 tcg_gen_sub_i32(tmp, tmp2, tmp);
7316 else
7317 tcg_gen_add_i32(tmp, tmp, tmp2);
7318 dead_tmp(tmp2);
7319 }
7320 break;
7321 case 1: /* 16 x 16 -> 32 */
7322 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7323 dead_tmp(tmp2);
7324 if (rs != 15) {
7325 tmp2 = load_reg(s, rs);
7326 gen_helper_add_setq(tmp, tmp, tmp2);
7327 dead_tmp(tmp2);
7328 }
7329 break;
7330 case 2: /* Dual multiply add. */
7331 case 4: /* Dual multiply subtract. */
7332 if (op)
7333 gen_swap_half(tmp2);
7334 gen_smul_dual(tmp, tmp2);
7335 /* This addition cannot overflow. */
7336 if (insn & (1 << 22)) {
7337 tcg_gen_sub_i32(tmp, tmp, tmp2);
7338 } else {
7339 tcg_gen_add_i32(tmp, tmp, tmp2);
7340 }
7341 dead_tmp(tmp2);
7342 if (rs != 15)
7343 {
7344 tmp2 = load_reg(s, rs);
7345 gen_helper_add_setq(tmp, tmp, tmp2);
7346 dead_tmp(tmp2);
7347 }
7348 break;
7349 case 3: /* 32 * 16 -> 32msb */
7350 if (op)
7351 tcg_gen_sari_i32(tmp2, tmp2, 16);
7352 else
7353 gen_sxth(tmp2);
7354 tmp2 = gen_muls_i64_i32(tmp, tmp2);
7355 tcg_gen_shri_i64(tmp2, tmp2, 16);
7356 tmp = new_tmp();
7357 tcg_gen_trunc_i64_i32(tmp, tmp2);
7358 if (rs != 15)
7359 {
7360 tmp2 = load_reg(s, rs);
7361 gen_helper_add_setq(tmp, tmp, tmp2);
7362 dead_tmp(tmp2);
7363 }
7364 break;
7365 case 5: case 6: /* 32 * 32 -> 32msb */
7366 gen_imull(tmp, tmp2);
7367 if (insn & (1 << 5)) {
7368 gen_roundqd(tmp, tmp2);
7369 dead_tmp(tmp2);
7370 } else {
7371 dead_tmp(tmp);
7372 tmp = tmp2;
7373 }
7374 if (rs != 15) {
7375 tmp2 = load_reg(s, rs);
7376 if (insn & (1 << 21)) {
7377 tcg_gen_add_i32(tmp, tmp, tmp2);
7378 } else {
7379 tcg_gen_sub_i32(tmp, tmp2, tmp);
7380 }
7381 dead_tmp(tmp2);
7382 }
7383 break;
7384 case 7: /* Unsigned sum of absolute differences. */
7385 gen_helper_usad8(tmp, tmp, tmp2);
7386 dead_tmp(tmp2);
7387 if (rs != 15) {
7388 tmp2 = load_reg(s, rs);
7389 tcg_gen_add_i32(tmp, tmp, tmp2);
7390 dead_tmp(tmp2);
7391 }
7392 break;
7393 }
7394 store_reg(s, rd, tmp);
7395 break;
7396 case 6: case 7: /* 64-bit multiply, Divide. */
7397 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7398 tmp = load_reg(s, rn);
7399 tmp2 = load_reg(s, rm);
7400 if ((op & 0x50) == 0x10) {
7401 /* sdiv, udiv */
7402 if (!arm_feature(env, ARM_FEATURE_DIV))
7403 goto illegal_op;
7404 if (op & 0x20)
7405 gen_helper_udiv(tmp, tmp, tmp2);
7406 else
7407 gen_helper_sdiv(tmp, tmp, tmp2);
7408 dead_tmp(tmp2);
7409 store_reg(s, rd, tmp);
7410 } else if ((op & 0xe) == 0xc) {
7411 /* Dual multiply accumulate long. */
7412 if (op & 1)
7413 gen_swap_half(tmp2);
7414 gen_smul_dual(tmp, tmp2);
7415 if (op & 0x10) {
7416 tcg_gen_sub_i32(tmp, tmp, tmp2);
7417 } else {
7418 tcg_gen_add_i32(tmp, tmp, tmp2);
7419 }
7420 dead_tmp(tmp2);
7421 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7422 gen_addq(s, tmp, rs, rd);
7423 gen_storeq_reg(s, rs, rd, tmp);
7424 } else {
7425 if (op & 0x20) {
7426 /* Unsigned 64-bit multiply */
7427 tmp = gen_mulu_i64_i32(tmp, tmp2);
7428 } else {
7429 if (op & 8) {
7430 /* smlalxy */
7431 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7432 dead_tmp(tmp2);
7433 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7434 tcg_gen_ext_i32_i64(tmp2, tmp);
7435 dead_tmp(tmp);
7436 tmp = tmp2;
7437 } else {
7438 /* Signed 64-bit multiply */
7439 tmp = gen_muls_i64_i32(tmp, tmp2);
7440 }
7441 }
7442 if (op & 4) {
7443 /* umaal */
7444 gen_addq_lo(s, tmp, rs);
7445 gen_addq_lo(s, tmp, rd);
7446 } else if (op & 0x40) {
7447 /* 64-bit accumulate. */
7448 gen_addq(s, tmp, rs, rd);
7449 }
7450 gen_storeq_reg(s, rs, rd, tmp);
7451 }
7452 break;
7453 }
7454 break;
7455 case 6: case 7: case 14: case 15:
7456 /* Coprocessor. */
7457 if (((insn >> 24) & 3) == 3) {
7458 /* Translate into the equivalent ARM encoding. */
7459 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7460 if (disas_neon_data_insn(env, s, insn))
7461 goto illegal_op;
7462 } else {
7463 if (insn & (1 << 28))
7464 goto illegal_op;
7465 if (disas_coproc_insn (env, s, insn))
7466 goto illegal_op;
7467 }
7468 break;
7469 case 8: case 9: case 10: case 11:
7470 if (insn & (1 << 15)) {
7471 /* Branches, misc control. */
7472 if (insn & 0x5000) {
7473 /* Unconditional branch. */
7474 /* signextend(hw1[10:0]) -> offset[:12]. */
7475 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7476 /* hw1[10:0] -> offset[11:1]. */
7477 offset |= (insn & 0x7ff) << 1;
7478 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7479 offset[24:22] already have the same value because of the
7480 sign extension above. */
7481 offset ^= ((~insn) & (1 << 13)) << 10;
7482 offset ^= ((~insn) & (1 << 11)) << 11;
7483
7484 if (insn & (1 << 14)) {
7485 /* Branch and link. */
7486 gen_op_movl_T1_im(s->pc | 1);
7487 gen_movl_reg_T1(s, 14);
7488 }
7489
7490 offset += s->pc;
7491 if (insn & (1 << 12)) {
7492 /* b/bl */
7493 gen_jmp(s, offset);
7494 } else {
7495 /* blx */
7496 offset &= ~(uint32_t)2;
7497 gen_bx_im(s, offset);
7498 }
7499 } else if (((insn >> 23) & 7) == 7) {
7500 /* Misc control */
7501 if (insn & (1 << 13))
7502 goto illegal_op;
7503
7504 if (insn & (1 << 26)) {
7505 /* Secure monitor call (v6Z) */
7506 goto illegal_op; /* not implemented. */
7507 } else {
7508 op = (insn >> 20) & 7;
7509 switch (op) {
7510 case 0: /* msr cpsr. */
7511 if (IS_M(env)) {
7512 tmp = load_reg(s, rn);
7513 addr = tcg_const_i32(insn & 0xff);
7514 gen_helper_v7m_msr(cpu_env, addr, tmp);
7515 gen_lookup_tb(s);
7516 break;
7517 }
7518 /* fall through */
7519 case 1: /* msr spsr. */
7520 if (IS_M(env))
7521 goto illegal_op;
7522 gen_movl_T0_reg(s, rn);
7523 if (gen_set_psr_T0(s,
7524 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7525 op == 1))
7526 goto illegal_op;
7527 break;
7528 case 2: /* cps, nop-hint. */
7529 if (((insn >> 8) & 7) == 0) {
7530 gen_nop_hint(s, insn & 0xff);
7531 }
7532 /* Implemented as NOP in user mode. */
7533 if (IS_USER(s))
7534 break;
7535 offset = 0;
7536 imm = 0;
7537 if (insn & (1 << 10)) {
7538 if (insn & (1 << 7))
7539 offset |= CPSR_A;
7540 if (insn & (1 << 6))
7541 offset |= CPSR_I;
7542 if (insn & (1 << 5))
7543 offset |= CPSR_F;
7544 if (insn & (1 << 9))
7545 imm = CPSR_A | CPSR_I | CPSR_F;
7546 }
7547 if (insn & (1 << 8)) {
7548 offset |= 0x1f;
7549 imm |= (insn & 0x1f);
7550 }
7551 if (offset) {
7552 gen_op_movl_T0_im(imm);
7553 gen_set_psr_T0(s, offset, 0);
7554 }
7555 break;
7556 case 3: /* Special control operations. */
7557 op = (insn >> 4) & 0xf;
7558 switch (op) {
7559 case 2: /* clrex */
7560 gen_helper_clrex(cpu_env);
7561 break;
7562 case 4: /* dsb */
7563 case 5: /* dmb */
7564 case 6: /* isb */
7565 /* These execute as NOPs. */
7566 ARCH(7);
7567 break;
7568 default:
7569 goto illegal_op;
7570 }
7571 break;
7572 case 4: /* bxj */
7573 /* Trivial implementation equivalent to bx. */
7574 tmp = load_reg(s, rn);
7575 gen_bx(s, tmp);
7576 break;
7577 case 5: /* Exception return. */
7578 /* Unpredictable in user mode. */
7579 goto illegal_op;
7580 case 6: /* mrs cpsr. */
7581 tmp = new_tmp();
7582 if (IS_M(env)) {
7583 addr = tcg_const_i32(insn & 0xff);
7584 gen_helper_v7m_mrs(tmp, cpu_env, addr);
7585 } else {
7586 gen_helper_cpsr_read(tmp);
7587 }
7588 store_reg(s, rd, tmp);
7589 break;
7590 case 7: /* mrs spsr. */
7591 /* Not accessible in user mode. */
7592 if (IS_USER(s) || IS_M(env))
7593 goto illegal_op;
7594 tmp = load_cpu_field(spsr);
7595 store_reg(s, rd, tmp);
7596 break;
7597 }
7598 }
7599 } else {
7600 /* Conditional branch. */
7601 op = (insn >> 22) & 0xf;
7602 /* Generate a conditional jump to next instruction. */
7603 s->condlabel = gen_new_label();
7604 gen_test_cc(op ^ 1, s->condlabel);
7605 s->condjmp = 1;
7606
7607 /* offset[11:1] = insn[10:0] */
7608 offset = (insn & 0x7ff) << 1;
7609 /* offset[17:12] = insn[21:16]. */
7610 offset |= (insn & 0x003f0000) >> 4;
7611 /* offset[31:20] = insn[26]. */
7612 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7613 /* offset[18] = insn[13]. */
7614 offset |= (insn & (1 << 13)) << 5;
7615 /* offset[19] = insn[11]. */
7616 offset |= (insn & (1 << 11)) << 8;
7617
7618 /* jump to the offset */
7619 gen_jmp(s, s->pc + offset);
7620 }
7621 } else {
7622 /* Data processing immediate. */
7623 if (insn & (1 << 25)) {
7624 if (insn & (1 << 24)) {
7625 if (insn & (1 << 20))
7626 goto illegal_op;
7627 /* Bitfield/Saturate. */
7628 op = (insn >> 21) & 7;
7629 imm = insn & 0x1f;
7630 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7631 if (rn == 15) {
7632 tmp = new_tmp();
7633 tcg_gen_movi_i32(tmp, 0);
7634 } else {
7635 tmp = load_reg(s, rn);
7636 }
7637 switch (op) {
7638 case 2: /* Signed bitfield extract. */
7639 imm++;
7640 if (shift + imm > 32)
7641 goto illegal_op;
7642 if (imm < 32)
7643 gen_sbfx(tmp, shift, imm);
7644 break;
7645 case 6: /* Unsigned bitfield extract. */
7646 imm++;
7647 if (shift + imm > 32)
7648 goto illegal_op;
7649 if (imm < 32)
7650 gen_ubfx(tmp, shift, (1u << imm) - 1);
7651 break;
7652 case 3: /* Bitfield insert/clear. */
7653 if (imm < shift)
7654 goto illegal_op;
7655 imm = imm + 1 - shift;
7656 if (imm != 32) {
7657 tmp2 = load_reg(s, rd);
7658 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
7659 dead_tmp(tmp2);
7660 }
7661 break;
7662 case 7:
7663 goto illegal_op;
7664 default: /* Saturate. */
7665 if (shift) {
7666 if (op & 1)
7667 tcg_gen_sari_i32(tmp, tmp, shift);
7668 else
7669 tcg_gen_shli_i32(tmp, tmp, shift);
7670 }
7671 tmp2 = tcg_const_i32(imm);
7672 if (op & 4) {
7673 /* Unsigned. */
7674 if ((op & 1) && shift == 0)
7675 gen_helper_usat16(tmp, tmp, tmp2);
7676 else
7677 gen_helper_usat(tmp, tmp, tmp2);
7678 } else {
7679 /* Signed. */
7680 if ((op & 1) && shift == 0)
7681 gen_helper_ssat16(tmp, tmp, tmp2);
7682 else
7683 gen_helper_ssat(tmp, tmp, tmp2);
7684 }
7685 break;
7686 }
7687 store_reg(s, rd, tmp);
7688 } else {
7689 imm = ((insn & 0x04000000) >> 15)
7690 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7691 if (insn & (1 << 22)) {
7692 /* 16-bit immediate. */
7693 imm |= (insn >> 4) & 0xf000;
7694 if (insn & (1 << 23)) {
7695 /* movt */
7696 tmp = load_reg(s, rd);
7697 tcg_gen_ext16u_i32(tmp, tmp);
7698 tcg_gen_ori_i32(tmp, tmp, imm << 16);
7699 } else {
7700 /* movw */
7701 tmp = new_tmp();
7702 tcg_gen_movi_i32(tmp, imm);
7703 }
7704 } else {
7705 /* Add/sub 12-bit immediate. */
7706 if (rn == 15) {
7707 offset = s->pc & ~(uint32_t)3;
7708 if (insn & (1 << 23))
7709 offset -= imm;
7710 else
7711 offset += imm;
7712 tmp = new_tmp();
7713 tcg_gen_movi_i32(tmp, offset);
7714 } else {
7715 tmp = load_reg(s, rn);
7716 if (insn & (1 << 23))
7717 tcg_gen_subi_i32(tmp, tmp, imm);
7718 else
7719 tcg_gen_addi_i32(tmp, tmp, imm);
7720 }
7721 }
7722 store_reg(s, rd, tmp);
7723 }
7724 } else {
7725 int shifter_out = 0;
7726 /* modified 12-bit immediate. */
7727 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7728 imm = (insn & 0xff);
7729 switch (shift) {
7730 case 0: /* XY */
7731 /* Nothing to do. */
7732 break;
7733 case 1: /* 00XY00XY */
7734 imm |= imm << 16;
7735 break;
7736 case 2: /* XY00XY00 */
7737 imm |= imm << 16;
7738 imm <<= 8;
7739 break;
7740 case 3: /* XYXYXYXY */
7741 imm |= imm << 16;
7742 imm |= imm << 8;
7743 break;
7744 default: /* Rotated constant. */
7745 shift = (shift << 1) | (imm >> 7);
7746 imm |= 0x80;
7747 imm = imm << (32 - shift);
7748 shifter_out = 1;
7749 break;
7750 }
7751 gen_op_movl_T1_im(imm);
7752 rn = (insn >> 16) & 0xf;
7753 if (rn == 15)
7754 gen_op_movl_T0_im(0);
7755 else
7756 gen_movl_T0_reg(s, rn);
7757 op = (insn >> 21) & 0xf;
7758 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7759 shifter_out))
7760 goto illegal_op;
7761 rd = (insn >> 8) & 0xf;
7762 if (rd != 15) {
7763 gen_movl_reg_T0(s, rd);
7764 }
7765 }
7766 }
7767 break;
7768 case 12: /* Load/store single data item. */
7769 {
7770 int postinc = 0;
7771 int writeback = 0;
7772 int user;
7773 if ((insn & 0x01100000) == 0x01000000) {
7774 if (disas_neon_ls_insn(env, s, insn))
7775 goto illegal_op;
7776 break;
7777 }
7778 user = IS_USER(s);
7779 if (rn == 15) {
7780 addr = new_tmp();
7781 /* PC relative. */
7782 /* s->pc has already been incremented by 4. */
7783 imm = s->pc & 0xfffffffc;
7784 if (insn & (1 << 23))
7785 imm += insn & 0xfff;
7786 else
7787 imm -= insn & 0xfff;
7788 tcg_gen_movi_i32(addr, imm);
7789 } else {
7790 addr = load_reg(s, rn);
7791 if (insn & (1 << 23)) {
7792 /* Positive offset. */
7793 imm = insn & 0xfff;
7794 tcg_gen_addi_i32(addr, addr, imm);
7795 } else {
7796 op = (insn >> 8) & 7;
7797 imm = insn & 0xff;
7798 switch (op) {
7799 case 0: case 8: /* Shifted Register. */
7800 shift = (insn >> 4) & 0xf;
7801 if (shift > 3)
7802 goto illegal_op;
7803 tmp = load_reg(s, rm);
7804 if (shift)
7805 tcg_gen_shli_i32(tmp, tmp, shift);
7806 tcg_gen_add_i32(addr, addr, tmp);
7807 dead_tmp(tmp);
7808 break;
7809 case 4: /* Negative offset. */
7810 tcg_gen_addi_i32(addr, addr, -imm);
7811 break;
7812 case 6: /* User privilege. */
7813 tcg_gen_addi_i32(addr, addr, imm);
7814 user = 1;
7815 break;
7816 case 1: /* Post-decrement. */
7817 imm = -imm;
7818 /* Fall through. */
7819 case 3: /* Post-increment. */
7820 postinc = 1;
7821 writeback = 1;
7822 break;
7823 case 5: /* Pre-decrement. */
7824 imm = -imm;
7825 /* Fall through. */
7826 case 7: /* Pre-increment. */
7827 tcg_gen_addi_i32(addr, addr, imm);
7828 writeback = 1;
7829 break;
7830 default:
7831 goto illegal_op;
7832 }
7833 }
7834 }
7835 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7836 if (insn & (1 << 20)) {
7837 /* Load. */
7838 if (rs == 15 && op != 2) {
7839 if (op & 2)
7840 goto illegal_op;
7841 /* Memory hint. Implemented as NOP. */
7842 } else {
7843 switch (op) {
7844 case 0: tmp = gen_ld8u(addr, user); break;
7845 case 4: tmp = gen_ld8s(addr, user); break;
7846 case 1: tmp = gen_ld16u(addr, user); break;
7847 case 5: tmp = gen_ld16s(addr, user); break;
7848 case 2: tmp = gen_ld32(addr, user); break;
7849 default: goto illegal_op;
7850 }
7851 if (rs == 15) {
7852 gen_bx(s, tmp);
7853 } else {
7854 store_reg(s, rs, tmp);
7855 }
7856 }
7857 } else {
7858 /* Store. */
7859 if (rs == 15)
7860 goto illegal_op;
7861 tmp = load_reg(s, rs);
7862 switch (op) {
7863 case 0: gen_st8(tmp, addr, user); break;
7864 case 1: gen_st16(tmp, addr, user); break;
7865 case 2: gen_st32(tmp, addr, user); break;
7866 default: goto illegal_op;
7867 }
7868 }
7869 if (postinc)
7870 tcg_gen_addi_i32(addr, addr, imm);
7871 if (writeback) {
7872 store_reg(s, rn, addr);
7873 } else {
7874 dead_tmp(addr);
7875 }
7876 }
7877 break;
7878 default:
7879 goto illegal_op;
7880 }
7881 return 0;
7882 illegal_op:
7883 return 1;
7884 }
7885
7886 static void disas_thumb_insn(CPUState *env, DisasContext *s)
7887 {
7888 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7889 int32_t offset;
7890 int i;
7891 TCGv tmp;
7892 TCGv tmp2;
7893 TCGv addr;
7894
7895 if (s->condexec_mask) {
7896 cond = s->condexec_cond;
7897 s->condlabel = gen_new_label();
7898 gen_test_cc(cond ^ 1, s->condlabel);
7899 s->condjmp = 1;
7900 }
7901
7902 insn = lduw_code(s->pc);
7903 s->pc += 2;
7904
7905 switch (insn >> 12) {
7906 case 0: case 1:
7907 rd = insn & 7;
7908 op = (insn >> 11) & 3;
7909 if (op == 3) {
7910 /* add/subtract */
7911 rn = (insn >> 3) & 7;
7912 gen_movl_T0_reg(s, rn);
7913 if (insn & (1 << 10)) {
7914 /* immediate */
7915 gen_op_movl_T1_im((insn >> 6) & 7);
7916 } else {
7917 /* reg */
7918 rm = (insn >> 6) & 7;
7919 gen_movl_T1_reg(s, rm);
7920 }
7921 if (insn & (1 << 9)) {
7922 if (s->condexec_mask)
7923 gen_op_subl_T0_T1();
7924 else
7925 gen_op_subl_T0_T1_cc();
7926 } else {
7927 if (s->condexec_mask)
7928 gen_op_addl_T0_T1();
7929 else
7930 gen_op_addl_T0_T1_cc();
7931 }
7932 gen_movl_reg_T0(s, rd);
7933 } else {
7934 /* shift immediate */
7935 rm = (insn >> 3) & 7;
7936 shift = (insn >> 6) & 0x1f;
7937 tmp = load_reg(s, rm);
7938 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7939 if (!s->condexec_mask)
7940 gen_logic_CC(tmp);
7941 store_reg(s, rd, tmp);
7942 }
7943 break;
7944 case 2: case 3:
7945 /* arithmetic large immediate */
7946 op = (insn >> 11) & 3;
7947 rd = (insn >> 8) & 0x7;
7948 if (op == 0) {
7949 gen_op_movl_T0_im(insn & 0xff);
7950 } else {
7951 gen_movl_T0_reg(s, rd);
7952 gen_op_movl_T1_im(insn & 0xff);
7953 }
7954 switch (op) {
7955 case 0: /* mov */
7956 if (!s->condexec_mask)
7957 gen_op_logic_T0_cc();
7958 break;
7959 case 1: /* cmp */
7960 gen_op_subl_T0_T1_cc();
7961 break;
7962 case 2: /* add */
7963 if (s->condexec_mask)
7964 gen_op_addl_T0_T1();
7965 else
7966 gen_op_addl_T0_T1_cc();
7967 break;
7968 case 3: /* sub */
7969 if (s->condexec_mask)
7970 gen_op_subl_T0_T1();
7971 else
7972 gen_op_subl_T0_T1_cc();
7973 break;
7974 }
7975 if (op != 1)
7976 gen_movl_reg_T0(s, rd);
7977 break;
7978 case 4:
7979 if (insn & (1 << 11)) {
7980 rd = (insn >> 8) & 7;
7981 /* load pc-relative. Bit 1 of PC is ignored. */
7982 val = s->pc + 2 + ((insn & 0xff) * 4);
7983 val &= ~(uint32_t)2;
7984 addr = new_tmp();
7985 tcg_gen_movi_i32(addr, val);
7986 tmp = gen_ld32(addr, IS_USER(s));
7987 dead_tmp(addr);
7988 store_reg(s, rd, tmp);
7989 break;
7990 }
7991 if (insn & (1 << 10)) {
7992 /* data processing extended or blx */
7993 rd = (insn & 7) | ((insn >> 4) & 8);
7994 rm = (insn >> 3) & 0xf;
7995 op = (insn >> 8) & 3;
7996 switch (op) {
7997 case 0: /* add */
7998 gen_movl_T0_reg(s, rd);
7999 gen_movl_T1_reg(s, rm);
8000 gen_op_addl_T0_T1();
8001 gen_movl_reg_T0(s, rd);
8002 break;
8003 case 1: /* cmp */
8004 gen_movl_T0_reg(s, rd);
8005 gen_movl_T1_reg(s, rm);
8006 gen_op_subl_T0_T1_cc();
8007 break;
8008 case 2: /* mov/cpy */
8009 gen_movl_T0_reg(s, rm);
8010 gen_movl_reg_T0(s, rd);
8011 break;
8012 case 3:/* branch [and link] exchange thumb register */
8013 tmp = load_reg(s, rm);
8014 if (insn & (1 << 7)) {
8015 val = (uint32_t)s->pc | 1;
8016 tmp2 = new_tmp();
8017 tcg_gen_movi_i32(tmp2, val);
8018 store_reg(s, 14, tmp2);
8019 }
8020 gen_bx(s, tmp);
8021 break;
8022 }
8023 break;
8024 }
8025
8026 /* data processing register */
8027 rd = insn & 7;
8028 rm = (insn >> 3) & 7;
8029 op = (insn >> 6) & 0xf;
8030 if (op == 2 || op == 3 || op == 4 || op == 7) {
8031 /* the shift/rotate ops want the operands backwards */
8032 val = rm;
8033 rm = rd;
8034 rd = val;
8035 val = 1;
8036 } else {
8037 val = 0;
8038 }
8039
8040 if (op == 9) /* neg */
8041 gen_op_movl_T0_im(0);
8042 else if (op != 0xf) /* mvn doesn't read its first operand */
8043 gen_movl_T0_reg(s, rd);
8044
8045 gen_movl_T1_reg(s, rm);
8046 switch (op) {
8047 case 0x0: /* and */
8048 gen_op_andl_T0_T1();
8049 if (!s->condexec_mask)
8050 gen_op_logic_T0_cc();
8051 break;
8052 case 0x1: /* eor */
8053 gen_op_xorl_T0_T1();
8054 if (!s->condexec_mask)
8055 gen_op_logic_T0_cc();
8056 break;
8057 case 0x2: /* lsl */
8058 if (s->condexec_mask) {
8059 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
8060 } else {
8061 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8062 gen_op_logic_T1_cc();
8063 }
8064 break;
8065 case 0x3: /* lsr */
8066 if (s->condexec_mask) {
8067 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
8068 } else {
8069 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8070 gen_op_logic_T1_cc();
8071 }
8072 break;
8073 case 0x4: /* asr */
8074 if (s->condexec_mask) {
8075 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
8076 } else {
8077 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8078 gen_op_logic_T1_cc();
8079 }
8080 break;
8081 case 0x5: /* adc */
8082 if (s->condexec_mask)
8083 gen_adc_T0_T1();
8084 else
8085 gen_op_adcl_T0_T1_cc();
8086 break;
8087 case 0x6: /* sbc */
8088 if (s->condexec_mask)
8089 gen_sbc_T0_T1();
8090 else
8091 gen_op_sbcl_T0_T1_cc();
8092 break;
8093 case 0x7: /* ror */
8094 if (s->condexec_mask) {
8095 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
8096 } else {
8097 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
8098 gen_op_logic_T1_cc();
8099 }
8100 break;
8101 case 0x8: /* tst */
8102 gen_op_andl_T0_T1();
8103 gen_op_logic_T0_cc();
8104 rd = 16;
8105 break;
8106 case 0x9: /* neg */
8107 if (s->condexec_mask)
8108 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
8109 else
8110 gen_op_subl_T0_T1_cc();
8111 break;
8112 case 0xa: /* cmp */
8113 gen_op_subl_T0_T1_cc();
8114 rd = 16;
8115 break;
8116 case 0xb: /* cmn */
8117 gen_op_addl_T0_T1_cc();
8118 rd = 16;
8119 break;
8120 case 0xc: /* orr */
8121 gen_op_orl_T0_T1();
8122 if (!s->condexec_mask)
8123 gen_op_logic_T0_cc();
8124 break;
8125 case 0xd: /* mul */
8126 gen_op_mull_T0_T1();
8127 if (!s->condexec_mask)
8128 gen_op_logic_T0_cc();
8129 break;
8130 case 0xe: /* bic */
8131 gen_op_bicl_T0_T1();
8132 if (!s->condexec_mask)
8133 gen_op_logic_T0_cc();
8134 break;
8135 case 0xf: /* mvn */
8136 gen_op_notl_T1();
8137 if (!s->condexec_mask)
8138 gen_op_logic_T1_cc();
8139 val = 1;
8140 rm = rd;
8141 break;
8142 }
8143 if (rd != 16) {
8144 if (val)
8145 gen_movl_reg_T1(s, rm);
8146 else
8147 gen_movl_reg_T0(s, rd);
8148 }
8149 break;
8150
8151 case 5:
8152 /* load/store register offset. */
8153 rd = insn & 7;
8154 rn = (insn >> 3) & 7;
8155 rm = (insn >> 6) & 7;
8156 op = (insn >> 9) & 7;
8157 addr = load_reg(s, rn);
8158 tmp = load_reg(s, rm);
8159 tcg_gen_add_i32(addr, addr, tmp);
8160 dead_tmp(tmp);
8161
8162 if (op < 3) /* store */
8163 tmp = load_reg(s, rd);
8164
8165 switch (op) {
8166 case 0: /* str */
8167 gen_st32(tmp, addr, IS_USER(s));
8168 break;
8169 case 1: /* strh */
8170 gen_st16(tmp, addr, IS_USER(s));
8171 break;
8172 case 2: /* strb */
8173 gen_st8(tmp, addr, IS_USER(s));
8174 break;
8175 case 3: /* ldrsb */
8176 tmp = gen_ld8s(addr, IS_USER(s));
8177 break;
8178 case 4: /* ldr */
8179 tmp = gen_ld32(addr, IS_USER(s));
8180 break;
8181 case 5: /* ldrh */
8182 tmp = gen_ld16u(addr, IS_USER(s));
8183 break;
8184 case 6: /* ldrb */
8185 tmp = gen_ld8u(addr, IS_USER(s));
8186 break;
8187 case 7: /* ldrsh */
8188 tmp = gen_ld16s(addr, IS_USER(s));
8189 break;
8190 }
8191 if (op >= 3) /* load */
8192 store_reg(s, rd, tmp);
8193 dead_tmp(addr);
8194 break;
8195
8196 case 6:
8197 /* load/store word immediate offset */
8198 rd = insn & 7;
8199 rn = (insn >> 3) & 7;
8200 addr = load_reg(s, rn);
8201 val = (insn >> 4) & 0x7c;
8202 tcg_gen_addi_i32(addr, addr, val);
8203
8204 if (insn & (1 << 11)) {
8205 /* load */
8206 tmp = gen_ld32(addr, IS_USER(s));
8207 store_reg(s, rd, tmp);
8208 } else {
8209 /* store */
8210 tmp = load_reg(s, rd);
8211 gen_st32(tmp, addr, IS_USER(s));
8212 }
8213 dead_tmp(addr);
8214 break;
8215
8216 case 7:
8217 /* load/store byte immediate offset */
8218 rd = insn & 7;
8219 rn = (insn >> 3) & 7;
8220 addr = load_reg(s, rn);
8221 val = (insn >> 6) & 0x1f;
8222 tcg_gen_addi_i32(addr, addr, val);
8223
8224 if (insn & (1 << 11)) {
8225 /* load */
8226 tmp = gen_ld8u(addr, IS_USER(s));
8227 store_reg(s, rd, tmp);
8228 } else {
8229 /* store */
8230 tmp = load_reg(s, rd);
8231 gen_st8(tmp, addr, IS_USER(s));
8232 }
8233 dead_tmp(addr);
8234 break;
8235
8236 case 8:
8237 /* load/store halfword immediate offset */
8238 rd = insn & 7;
8239 rn = (insn >> 3) & 7;
8240 addr = load_reg(s, rn);
8241 val = (insn >> 5) & 0x3e;
8242 tcg_gen_addi_i32(addr, addr, val);
8243
8244 if (insn & (1 << 11)) {
8245 /* load */
8246 tmp = gen_ld16u(addr, IS_USER(s));
8247 store_reg(s, rd, tmp);
8248 } else {
8249 /* store */
8250 tmp = load_reg(s, rd);
8251 gen_st16(tmp, addr, IS_USER(s));
8252 }
8253 dead_tmp(addr);
8254 break;
8255
8256 case 9:
8257 /* load/store from stack */
8258 rd = (insn >> 8) & 7;
8259 addr = load_reg(s, 13);
8260 val = (insn & 0xff) * 4;
8261 tcg_gen_addi_i32(addr, addr, val);
8262
8263 if (insn & (1 << 11)) {
8264 /* load */
8265 tmp = gen_ld32(addr, IS_USER(s));
8266 store_reg(s, rd, tmp);
8267 } else {
8268 /* store */
8269 tmp = load_reg(s, rd);
8270 gen_st32(tmp, addr, IS_USER(s));
8271 }
8272 dead_tmp(addr);
8273 break;
8274
8275 case 10:
8276 /* add to high reg */
8277 rd = (insn >> 8) & 7;
8278 if (insn & (1 << 11)) {
8279 /* SP */
8280 tmp = load_reg(s, 13);
8281 } else {
8282 /* PC. bit 1 is ignored. */
8283 tmp = new_tmp();
8284 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8285 }
8286 val = (insn & 0xff) * 4;
8287 tcg_gen_addi_i32(tmp, tmp, val);
8288 store_reg(s, rd, tmp);
8289 break;
8290
8291 case 11:
8292 /* misc */
8293 op = (insn >> 8) & 0xf;
8294 switch (op) {
8295 case 0:
8296 /* adjust stack pointer */
8297 tmp = load_reg(s, 13);
8298 val = (insn & 0x7f) * 4;
8299 if (insn & (1 << 7))
8300 val = -(int32_t)val;
8301 tcg_gen_addi_i32(tmp, tmp, val);
8302 store_reg(s, 13, tmp);
8303 break;
8304
8305 case 2: /* sign/zero extend. */
8306 ARCH(6);
8307 rd = insn & 7;
8308 rm = (insn >> 3) & 7;
8309 tmp = load_reg(s, rm);
8310 switch ((insn >> 6) & 3) {
8311 case 0: gen_sxth(tmp); break;
8312 case 1: gen_sxtb(tmp); break;
8313 case 2: gen_uxth(tmp); break;
8314 case 3: gen_uxtb(tmp); break;
8315 }
8316 store_reg(s, rd, tmp);
8317 break;
8318 case 4: case 5: case 0xc: case 0xd:
8319 /* push/pop */
8320 addr = load_reg(s, 13);
8321 if (insn & (1 << 8))
8322 offset = 4;
8323 else
8324 offset = 0;
8325 for (i = 0; i < 8; i++) {
8326 if (insn & (1 << i))
8327 offset += 4;
8328 }
8329 if ((insn & (1 << 11)) == 0) {
8330 tcg_gen_addi_i32(addr, addr, -offset);
8331 }
8332 for (i = 0; i < 8; i++) {
8333 if (insn & (1 << i)) {
8334 if (insn & (1 << 11)) {
8335 /* pop */
8336 tmp = gen_ld32(addr, IS_USER(s));
8337 store_reg(s, i, tmp);
8338 } else {
8339 /* push */
8340 tmp = load_reg(s, i);
8341 gen_st32(tmp, addr, IS_USER(s));
8342 }
8343 /* advance to the next address. */
8344 tcg_gen_addi_i32(addr, addr, 4);
8345 }
8346 }
8347 TCGV_UNUSED(tmp);
8348 if (insn & (1 << 8)) {
8349 if (insn & (1 << 11)) {
8350 /* pop pc */
8351 tmp = gen_ld32(addr, IS_USER(s));
8352 /* don't set the pc until the rest of the instruction
8353 has completed */
8354 } else {
8355 /* push lr */
8356 tmp = load_reg(s, 14);
8357 gen_st32(tmp, addr, IS_USER(s));
8358 }
8359 tcg_gen_addi_i32(addr, addr, 4);
8360 }
8361 if ((insn & (1 << 11)) == 0) {
8362 tcg_gen_addi_i32(addr, addr, -offset);
8363 }
8364 /* write back the new stack pointer */
8365 store_reg(s, 13, addr);
8366 /* set the new PC value */
8367 if ((insn & 0x0900) == 0x0900)
8368 gen_bx(s, tmp);
8369 break;
8370
8371 case 1: case 3: case 9: case 11: /* czb */
8372 rm = insn & 7;
8373 tmp = load_reg(s, rm);
8374 s->condlabel = gen_new_label();
8375 s->condjmp = 1;
8376 if (insn & (1 << 11))
8377 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8378 else
8379 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8380 dead_tmp(tmp);
8381 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8382 val = (uint32_t)s->pc + 2;
8383 val += offset;
8384 gen_jmp(s, val);
8385 break;
8386
8387 case 15: /* IT, nop-hint. */
8388 if ((insn & 0xf) == 0) {
8389 gen_nop_hint(s, (insn >> 4) & 0xf);
8390 break;
8391 }
8392 /* If Then. */
8393 s->condexec_cond = (insn >> 4) & 0xe;
8394 s->condexec_mask = insn & 0x1f;
8395 /* No actual code generated for this insn, just setup state. */
8396 break;
8397
8398 case 0xe: /* bkpt */
8399 gen_set_condexec(s);
8400 gen_set_pc_im(s->pc - 2);
8401 gen_exception(EXCP_BKPT);
8402 s->is_jmp = DISAS_JUMP;
8403 break;
8404
8405 case 0xa: /* rev */
8406 ARCH(6);
8407 rn = (insn >> 3) & 0x7;
8408 rd = insn & 0x7;
8409 tmp = load_reg(s, rn);
8410 switch ((insn >> 6) & 3) {
8411 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8412 case 1: gen_rev16(tmp); break;
8413 case 3: gen_revsh(tmp); break;
8414 default: goto illegal_op;
8415 }
8416 store_reg(s, rd, tmp);
8417 break;
8418
8419 case 6: /* cps */
8420 ARCH(6);
8421 if (IS_USER(s))
8422 break;
8423 if (IS_M(env)) {
8424 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8425 /* PRIMASK */
8426 if (insn & 1) {
8427 addr = tcg_const_i32(16);
8428 gen_helper_v7m_msr(cpu_env, addr, tmp);
8429 }
8430 /* FAULTMASK */
8431 if (insn & 2) {
8432 addr = tcg_const_i32(17);
8433 gen_helper_v7m_msr(cpu_env, addr, tmp);
8434 }
8435 gen_lookup_tb(s);
8436 } else {
8437 if (insn & (1 << 4))
8438 shift = CPSR_A | CPSR_I | CPSR_F;
8439 else
8440 shift = 0;
8441
8442 val = ((insn & 7) << 6) & shift;
8443 gen_op_movl_T0_im(val);
8444 gen_set_psr_T0(s, shift, 0);
8445 }
8446 break;
8447
8448 default:
8449 goto undef;
8450 }
8451 break;
8452
8453 case 12:
8454 /* load/store multiple */
8455 rn = (insn >> 8) & 0x7;
8456 addr = load_reg(s, rn);
8457 for (i = 0; i < 8; i++) {
8458 if (insn & (1 << i)) {
8459 if (insn & (1 << 11)) {
8460 /* load */
8461 tmp = gen_ld32(addr, IS_USER(s));
8462 store_reg(s, i, tmp);
8463 } else {
8464 /* store */
8465 tmp = load_reg(s, i);
8466 gen_st32(tmp, addr, IS_USER(s));
8467 }
8468 /* advance to the next address */
8469 tcg_gen_addi_i32(addr, addr, 4);
8470 }
8471 }
8472 /* Base register writeback. */
8473 if ((insn & (1 << rn)) == 0) {
8474 store_reg(s, rn, addr);
8475 } else {
8476 dead_tmp(addr);
8477 }
8478 break;
8479
8480 case 13:
8481 /* conditional branch or swi */
8482 cond = (insn >> 8) & 0xf;
8483 if (cond == 0xe)
8484 goto undef;
8485
8486 if (cond == 0xf) {
8487 /* swi */
8488 gen_set_condexec(s);
8489 gen_set_pc_im(s->pc);
8490 s->is_jmp = DISAS_SWI;
8491 break;
8492 }
8493 /* generate a conditional jump to next instruction */
8494 s->condlabel = gen_new_label();
8495 gen_test_cc(cond ^ 1, s->condlabel);
8496 s->condjmp = 1;
8497 gen_movl_T1_reg(s, 15);
8498
8499 /* jump to the offset */
8500 val = (uint32_t)s->pc + 2;
8501 offset = ((int32_t)insn << 24) >> 24;
8502 val += offset << 1;
8503 gen_jmp(s, val);
8504 break;
8505
8506 case 14:
8507 if (insn & (1 << 11)) {
8508 if (disas_thumb2_insn(env, s, insn))
8509 goto undef32;
8510 break;
8511 }
8512 /* unconditional branch */
8513 val = (uint32_t)s->pc;
8514 offset = ((int32_t)insn << 21) >> 21;
8515 val += (offset << 1) + 2;
8516 gen_jmp(s, val);
8517 break;
8518
8519 case 15:
8520 if (disas_thumb2_insn(env, s, insn))
8521 goto undef32;
8522 break;
8523 }
8524 return;
8525 undef32:
8526 gen_set_condexec(s);
8527 gen_set_pc_im(s->pc - 4);
8528 gen_exception(EXCP_UDEF);
8529 s->is_jmp = DISAS_JUMP;
8530 return;
8531 illegal_op:
8532 undef:
8533 gen_set_condexec(s);
8534 gen_set_pc_im(s->pc - 2);
8535 gen_exception(EXCP_UDEF);
8536 s->is_jmp = DISAS_JUMP;
8537 }
8538
8539 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8540 basic block 'tb'. If search_pc is TRUE, also generate PC
8541 information for each intermediate instruction. */
8542 static inline void gen_intermediate_code_internal(CPUState *env,
8543 TranslationBlock *tb,
8544 int search_pc)
8545 {
8546 DisasContext dc1, *dc = &dc1;
8547 uint16_t *gen_opc_end;
8548 int j, lj;
8549 target_ulong pc_start;
8550 uint32_t next_page_start;
8551 int num_insns;
8552 int max_insns;
8553
8554 /* generate intermediate code */
8555 num_temps = 0;
8556 memset(temps, 0, sizeof(temps));
8557
8558 pc_start = tb->pc;
8559
8560 dc->tb = tb;
8561
8562 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
8563
8564 dc->is_jmp = DISAS_NEXT;
8565 dc->pc = pc_start;
8566 dc->singlestep_enabled = env->singlestep_enabled;
8567 dc->condjmp = 0;
8568 dc->thumb = env->thumb;
8569 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8570 dc->condexec_cond = env->condexec_bits >> 4;
8571 dc->is_mem = 0;
8572 #if !defined(CONFIG_USER_ONLY)
8573 if (IS_M(env)) {
8574 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8575 } else {
8576 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8577 }
8578 #endif
8579 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8580 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8581 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8582 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
8583 cpu_V0 = cpu_F0d;
8584 cpu_V1 = cpu_F1d;
8585 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8586 cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
8587 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8588 lj = -1;
8589 num_insns = 0;
8590 max_insns = tb->cflags & CF_COUNT_MASK;
8591 if (max_insns == 0)
8592 max_insns = CF_COUNT_MASK;
8593
8594 gen_icount_start();
8595 /* Reset the conditional execution bits immediately. This avoids
8596 complications trying to do it at the end of the block. */
8597 if (env->condexec_bits)
8598 {
8599 TCGv tmp = new_tmp();
8600 tcg_gen_movi_i32(tmp, 0);
8601 store_cpu_field(tmp, condexec_bits);
8602 }
8603 do {
8604 #ifdef CONFIG_USER_ONLY
8605 /* Intercept jump to the magic kernel page. */
8606 if (dc->pc >= 0xffff0000) {
8607 /* We always get here via a jump, so know we are not in a
8608 conditional execution block. */
8609 gen_exception(EXCP_KERNEL_TRAP);
8610 dc->is_jmp = DISAS_UPDATE;
8611 break;
8612 }
8613 #else
8614 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8615 /* We always get here via a jump, so know we are not in a
8616 conditional execution block. */
8617 gen_exception(EXCP_EXCEPTION_EXIT);
8618 dc->is_jmp = DISAS_UPDATE;
8619 break;
8620 }
8621 #endif
8622
8623 if (env->nb_breakpoints > 0) {
8624 for(j = 0; j < env->nb_breakpoints; j++) {
8625 if (env->breakpoints[j] == dc->pc) {
8626 gen_set_condexec(dc);
8627 gen_set_pc_im(dc->pc);
8628 gen_exception(EXCP_DEBUG);
8629 dc->is_jmp = DISAS_JUMP;
8630 /* Advance PC so that clearing the breakpoint will
8631 invalidate this TB. */
8632 dc->pc += 2;
8633 goto done_generating;
8634 break;
8635 }
8636 }
8637 }
8638 if (search_pc) {
8639 j = gen_opc_ptr - gen_opc_buf;
8640 if (lj < j) {
8641 lj++;
8642 while (lj < j)
8643 gen_opc_instr_start[lj++] = 0;
8644 }
8645 gen_opc_pc[lj] = dc->pc;
8646 gen_opc_instr_start[lj] = 1;
8647 gen_opc_icount[lj] = num_insns;
8648 }
8649
8650 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8651 gen_io_start();
8652
8653 if (env->thumb) {
8654 disas_thumb_insn(env, dc);
8655 if (dc->condexec_mask) {
8656 dc->condexec_cond = (dc->condexec_cond & 0xe)
8657 | ((dc->condexec_mask >> 4) & 1);
8658 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8659 if (dc->condexec_mask == 0) {
8660 dc->condexec_cond = 0;
8661 }
8662 }
8663 } else {
8664 disas_arm_insn(env, dc);
8665 }
8666 if (num_temps) {
8667 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8668 num_temps = 0;
8669 }
8670
8671 if (dc->condjmp && !dc->is_jmp) {
8672 gen_set_label(dc->condlabel);
8673 dc->condjmp = 0;
8674 }
8675 /* Terminate the TB on memory ops if watchpoints are present. */
8676 /* FIXME: This should be replacd by the deterministic execution
8677 * IRQ raising bits. */
8678 if (dc->is_mem && env->nb_watchpoints)
8679 break;
8680
8681 /* Translation stops when a conditional branch is enoutered.
8682 * Otherwise the subsequent code could get translated several times.
8683 * Also stop translation when a page boundary is reached. This
8684 * ensures prefetch aborts occur at the right place. */
8685 num_insns ++;
8686 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8687 !env->singlestep_enabled &&
8688 dc->pc < next_page_start &&
8689 num_insns < max_insns);
8690
8691 if (tb->cflags & CF_LAST_IO) {
8692 if (dc->condjmp) {
8693 /* FIXME: This can theoretically happen with self-modifying
8694 code. */
8695 cpu_abort(env, "IO on conditional branch instruction");
8696 }
8697 gen_io_end();
8698 }
8699
8700 /* At this stage dc->condjmp will only be set when the skipped
8701 instruction was a conditional branch or trap, and the PC has
8702 already been written. */
8703 if (unlikely(env->singlestep_enabled)) {
8704 /* Make sure the pc is updated, and raise a debug exception. */
8705 if (dc->condjmp) {
8706 gen_set_condexec(dc);
8707 if (dc->is_jmp == DISAS_SWI) {
8708 gen_exception(EXCP_SWI);
8709 } else {
8710 gen_exception(EXCP_DEBUG);
8711 }
8712 gen_set_label(dc->condlabel);
8713 }
8714 if (dc->condjmp || !dc->is_jmp) {
8715 gen_set_pc_im(dc->pc);
8716 dc->condjmp = 0;
8717 }
8718 gen_set_condexec(dc);
8719 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
8720 gen_exception(EXCP_SWI);
8721 } else {
8722 /* FIXME: Single stepping a WFI insn will not halt
8723 the CPU. */
8724 gen_exception(EXCP_DEBUG);
8725 }
8726 } else {
8727 /* While branches must always occur at the end of an IT block,
8728 there are a few other things that can cause us to terminate
8729 the TB in the middel of an IT block:
8730 - Exception generating instructions (bkpt, swi, undefined).
8731 - Page boundaries.
8732 - Hardware watchpoints.
8733 Hardware breakpoints have already been handled and skip this code.
8734 */
8735 gen_set_condexec(dc);
8736 switch(dc->is_jmp) {
8737 case DISAS_NEXT:
8738 gen_goto_tb(dc, 1, dc->pc);
8739 break;
8740 default:
8741 case DISAS_JUMP:
8742 case DISAS_UPDATE:
8743 /* indicate that the hash table must be used to find the next TB */
8744 tcg_gen_exit_tb(0);
8745 break;
8746 case DISAS_TB_JUMP:
8747 /* nothing more to generate */
8748 break;
8749 case DISAS_WFI:
8750 gen_helper_wfi();
8751 break;
8752 case DISAS_SWI:
8753 gen_exception(EXCP_SWI);
8754 break;
8755 }
8756 if (dc->condjmp) {
8757 gen_set_label(dc->condlabel);
8758 gen_set_condexec(dc);
8759 gen_goto_tb(dc, 1, dc->pc);
8760 dc->condjmp = 0;
8761 }
8762 }
8763
8764 done_generating:
8765 gen_icount_end(tb, num_insns);
8766 *gen_opc_ptr = INDEX_op_end;
8767
8768 #ifdef DEBUG_DISAS
8769 if (loglevel & CPU_LOG_TB_IN_ASM) {
8770 fprintf(logfile, "----------------\n");
8771 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
8772 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
8773 fprintf(logfile, "\n");
8774 }
8775 #endif
8776 if (search_pc) {
8777 j = gen_opc_ptr - gen_opc_buf;
8778 lj++;
8779 while (lj <= j)
8780 gen_opc_instr_start[lj++] = 0;
8781 } else {
8782 tb->size = dc->pc - pc_start;
8783 tb->icount = num_insns;
8784 }
8785 }
8786
8787 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8788 {
8789 gen_intermediate_code_internal(env, tb, 0);
8790 }
8791
8792 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8793 {
8794 gen_intermediate_code_internal(env, tb, 1);
8795 }
8796
8797 static const char *cpu_mode_names[16] = {
8798 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8799 "???", "???", "???", "und", "???", "???", "???", "sys"
8800 };
8801
8802 void cpu_dump_state(CPUState *env, FILE *f,
8803 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8804 int flags)
8805 {
8806 int i;
8807 #if 0
8808 union {
8809 uint32_t i;
8810 float s;
8811 } s0, s1;
8812 CPU_DoubleU d;
8813 /* ??? This assumes float64 and double have the same layout.
8814 Oh well, it's only debug dumps. */
8815 union {
8816 float64 f64;
8817 double d;
8818 } d0;
8819 #endif
8820 uint32_t psr;
8821
8822 for(i=0;i<16;i++) {
8823 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
8824 if ((i % 4) == 3)
8825 cpu_fprintf(f, "\n");
8826 else
8827 cpu_fprintf(f, " ");
8828 }
8829 psr = cpsr_read(env);
8830 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8831 psr,
8832 psr & (1 << 31) ? 'N' : '-',
8833 psr & (1 << 30) ? 'Z' : '-',
8834 psr & (1 << 29) ? 'C' : '-',
8835 psr & (1 << 28) ? 'V' : '-',
8836 psr & CPSR_T ? 'T' : 'A',
8837 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
8838
8839 #if 0
8840 for (i = 0; i < 16; i++) {
8841 d.d = env->vfp.regs[i];
8842 s0.i = d.l.lower;
8843 s1.i = d.l.upper;
8844 d0.f64 = d.d;
8845 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
8846 i * 2, (int)s0.i, s0.s,
8847 i * 2 + 1, (int)s1.i, s1.s,
8848 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
8849 d0.d);
8850 }
8851 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
8852 #endif
8853 }
8854
8855 void gen_pc_load(CPUState *env, TranslationBlock *tb,
8856 unsigned long searched_pc, int pc_pos, void *puc)
8857 {
8858 env->regs[15] = gen_opc_pc[pc_pos];
8859 }