]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
Merge remote branch 'mst/for_anthony' into staging
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
31 #include "qemu-log.h"
32
33 #include "helpers.h"
34 #define GEN_HELPER 1
35 #include "helpers.h"
36
37 #define ENABLE_ARCH_5J 0
38 #define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
39 #define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
40 #define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
41 #define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
42
43 #define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
44
45 /* internal defines */
46 typedef struct DisasContext {
47 target_ulong pc;
48 int is_jmp;
49 /* Nonzero if this instruction has been conditionally skipped. */
50 int condjmp;
51 /* The label that will be jumped to when the instruction is skipped. */
52 int condlabel;
53 /* Thumb-2 condtional execution bits. */
54 int condexec_mask;
55 int condexec_cond;
56 struct TranslationBlock *tb;
57 int singlestep_enabled;
58 int thumb;
59 #if !defined(CONFIG_USER_ONLY)
60 int user;
61 #endif
62 } DisasContext;
63
64 #if defined(CONFIG_USER_ONLY)
65 #define IS_USER(s) 1
66 #else
67 #define IS_USER(s) (s->user)
68 #endif
69
70 /* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72 #define DISAS_WFI 4
73 #define DISAS_SWI 5
74
75 static TCGv_ptr cpu_env;
76 /* We reuse the same 64-bit temporaries for efficiency. */
77 static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
78 static TCGv_i32 cpu_R[16];
79 static TCGv_i32 cpu_exclusive_addr;
80 static TCGv_i32 cpu_exclusive_val;
81 static TCGv_i32 cpu_exclusive_high;
82 #ifdef CONFIG_USER_ONLY
83 static TCGv_i32 cpu_exclusive_test;
84 static TCGv_i32 cpu_exclusive_info;
85 #endif
86
87 /* FIXME: These should be removed. */
88 static TCGv cpu_F0s, cpu_F1s;
89 static TCGv_i64 cpu_F0d, cpu_F1d;
90
91 #include "gen-icount.h"
92
93 static const char *regnames[] =
94 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
95 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
96
97 /* initialize TCG globals. */
98 void arm_translate_init(void)
99 {
100 int i;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 for (i = 0; i < 16; i++) {
105 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUState, regs[i]),
107 regnames[i]);
108 }
109 cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUState, exclusive_addr), "exclusive_addr");
111 cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUState, exclusive_val), "exclusive_val");
113 cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUState, exclusive_high), "exclusive_high");
115 #ifdef CONFIG_USER_ONLY
116 cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUState, exclusive_test), "exclusive_test");
118 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUState, exclusive_info), "exclusive_info");
120 #endif
121
122 #define GEN_HELPER 2
123 #include "helpers.h"
124 }
125
126 static int num_temps;
127
128 /* Allocate a temporary variable. */
129 static TCGv_i32 new_tmp(void)
130 {
131 num_temps++;
132 return tcg_temp_new_i32();
133 }
134
135 /* Release a temporary variable. */
136 static void dead_tmp(TCGv tmp)
137 {
138 tcg_temp_free(tmp);
139 num_temps--;
140 }
141
142 static inline TCGv load_cpu_offset(int offset)
143 {
144 TCGv tmp = new_tmp();
145 tcg_gen_ld_i32(tmp, cpu_env, offset);
146 return tmp;
147 }
148
149 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
150
151 static inline void store_cpu_offset(TCGv var, int offset)
152 {
153 tcg_gen_st_i32(var, cpu_env, offset);
154 dead_tmp(var);
155 }
156
157 #define store_cpu_field(var, name) \
158 store_cpu_offset(var, offsetof(CPUState, name))
159
160 /* Set a variable to the value of a CPU register. */
161 static void load_reg_var(DisasContext *s, TCGv var, int reg)
162 {
163 if (reg == 15) {
164 uint32_t addr;
165 /* normaly, since we updated PC, we need only to add one insn */
166 if (s->thumb)
167 addr = (long)s->pc + 2;
168 else
169 addr = (long)s->pc + 4;
170 tcg_gen_movi_i32(var, addr);
171 } else {
172 tcg_gen_mov_i32(var, cpu_R[reg]);
173 }
174 }
175
176 /* Create a new temporary and set it to the value of a CPU register. */
177 static inline TCGv load_reg(DisasContext *s, int reg)
178 {
179 TCGv tmp = new_tmp();
180 load_reg_var(s, tmp, reg);
181 return tmp;
182 }
183
184 /* Set a CPU register. The source must be a temporary and will be
185 marked as dead. */
186 static void store_reg(DisasContext *s, int reg, TCGv var)
187 {
188 if (reg == 15) {
189 tcg_gen_andi_i32(var, var, ~1);
190 s->is_jmp = DISAS_JUMP;
191 }
192 tcg_gen_mov_i32(cpu_R[reg], var);
193 dead_tmp(var);
194 }
195
196 /* Value extensions. */
197 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
198 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
199 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
200 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
201
202 #define gen_sxtb16(var) gen_helper_sxtb16(var, var)
203 #define gen_uxtb16(var) gen_helper_uxtb16(var, var)
204
205
206 static inline void gen_set_cpsr(TCGv var, uint32_t mask)
207 {
208 TCGv tmp_mask = tcg_const_i32(mask);
209 gen_helper_cpsr_write(var, tmp_mask);
210 tcg_temp_free_i32(tmp_mask);
211 }
212 /* Set NZCV flags from the high 4 bits of var. */
213 #define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
214
215 static void gen_exception(int excp)
216 {
217 TCGv tmp = new_tmp();
218 tcg_gen_movi_i32(tmp, excp);
219 gen_helper_exception(tmp);
220 dead_tmp(tmp);
221 }
222
223 static void gen_smul_dual(TCGv a, TCGv b)
224 {
225 TCGv tmp1 = new_tmp();
226 TCGv tmp2 = new_tmp();
227 tcg_gen_ext16s_i32(tmp1, a);
228 tcg_gen_ext16s_i32(tmp2, b);
229 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
230 dead_tmp(tmp2);
231 tcg_gen_sari_i32(a, a, 16);
232 tcg_gen_sari_i32(b, b, 16);
233 tcg_gen_mul_i32(b, b, a);
234 tcg_gen_mov_i32(a, tmp1);
235 dead_tmp(tmp1);
236 }
237
238 /* Byteswap each halfword. */
239 static void gen_rev16(TCGv var)
240 {
241 TCGv tmp = new_tmp();
242 tcg_gen_shri_i32(tmp, var, 8);
243 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
244 tcg_gen_shli_i32(var, var, 8);
245 tcg_gen_andi_i32(var, var, 0xff00ff00);
246 tcg_gen_or_i32(var, var, tmp);
247 dead_tmp(tmp);
248 }
249
250 /* Byteswap low halfword and sign extend. */
251 static void gen_revsh(TCGv var)
252 {
253 tcg_gen_ext16u_i32(var, var);
254 tcg_gen_bswap16_i32(var, var);
255 tcg_gen_ext16s_i32(var, var);
256 }
257
258 /* Unsigned bitfield extract. */
259 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
260 {
261 if (shift)
262 tcg_gen_shri_i32(var, var, shift);
263 tcg_gen_andi_i32(var, var, mask);
264 }
265
266 /* Signed bitfield extract. */
267 static void gen_sbfx(TCGv var, int shift, int width)
268 {
269 uint32_t signbit;
270
271 if (shift)
272 tcg_gen_sari_i32(var, var, shift);
273 if (shift + width < 32) {
274 signbit = 1u << (width - 1);
275 tcg_gen_andi_i32(var, var, (1u << width) - 1);
276 tcg_gen_xori_i32(var, var, signbit);
277 tcg_gen_subi_i32(var, var, signbit);
278 }
279 }
280
281 /* Bitfield insertion. Insert val into base. Clobbers base and val. */
282 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
283 {
284 tcg_gen_andi_i32(val, val, mask);
285 tcg_gen_shli_i32(val, val, shift);
286 tcg_gen_andi_i32(base, base, ~(mask << shift));
287 tcg_gen_or_i32(dest, base, val);
288 }
289
290 /* Return (b << 32) + a. Mark inputs as dead */
291 static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv b)
292 {
293 TCGv_i64 tmp64 = tcg_temp_new_i64();
294
295 tcg_gen_extu_i32_i64(tmp64, b);
296 dead_tmp(b);
297 tcg_gen_shli_i64(tmp64, tmp64, 32);
298 tcg_gen_add_i64(a, tmp64, a);
299
300 tcg_temp_free_i64(tmp64);
301 return a;
302 }
303
304 /* Return (b << 32) - a. Mark inputs as dead. */
305 static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv b)
306 {
307 TCGv_i64 tmp64 = tcg_temp_new_i64();
308
309 tcg_gen_extu_i32_i64(tmp64, b);
310 dead_tmp(b);
311 tcg_gen_shli_i64(tmp64, tmp64, 32);
312 tcg_gen_sub_i64(a, tmp64, a);
313
314 tcg_temp_free_i64(tmp64);
315 return a;
316 }
317
318 /* FIXME: Most targets have native widening multiplication.
319 It would be good to use that instead of a full wide multiply. */
320 /* 32x32->64 multiply. Marks inputs as dead. */
321 static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
322 {
323 TCGv_i64 tmp1 = tcg_temp_new_i64();
324 TCGv_i64 tmp2 = tcg_temp_new_i64();
325
326 tcg_gen_extu_i32_i64(tmp1, a);
327 dead_tmp(a);
328 tcg_gen_extu_i32_i64(tmp2, b);
329 dead_tmp(b);
330 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
331 tcg_temp_free_i64(tmp2);
332 return tmp1;
333 }
334
335 static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
336 {
337 TCGv_i64 tmp1 = tcg_temp_new_i64();
338 TCGv_i64 tmp2 = tcg_temp_new_i64();
339
340 tcg_gen_ext_i32_i64(tmp1, a);
341 dead_tmp(a);
342 tcg_gen_ext_i32_i64(tmp2, b);
343 dead_tmp(b);
344 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
345 tcg_temp_free_i64(tmp2);
346 return tmp1;
347 }
348
349 /* Swap low and high halfwords. */
350 static void gen_swap_half(TCGv var)
351 {
352 TCGv tmp = new_tmp();
353 tcg_gen_shri_i32(tmp, var, 16);
354 tcg_gen_shli_i32(var, var, 16);
355 tcg_gen_or_i32(var, var, tmp);
356 dead_tmp(tmp);
357 }
358
359 /* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
360 tmp = (t0 ^ t1) & 0x8000;
361 t0 &= ~0x8000;
362 t1 &= ~0x8000;
363 t0 = (t0 + t1) ^ tmp;
364 */
365
366 static void gen_add16(TCGv t0, TCGv t1)
367 {
368 TCGv tmp = new_tmp();
369 tcg_gen_xor_i32(tmp, t0, t1);
370 tcg_gen_andi_i32(tmp, tmp, 0x8000);
371 tcg_gen_andi_i32(t0, t0, ~0x8000);
372 tcg_gen_andi_i32(t1, t1, ~0x8000);
373 tcg_gen_add_i32(t0, t0, t1);
374 tcg_gen_xor_i32(t0, t0, tmp);
375 dead_tmp(tmp);
376 dead_tmp(t1);
377 }
378
379 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
380
381 /* Set CF to the top bit of var. */
382 static void gen_set_CF_bit31(TCGv var)
383 {
384 TCGv tmp = new_tmp();
385 tcg_gen_shri_i32(tmp, var, 31);
386 gen_set_CF(tmp);
387 dead_tmp(tmp);
388 }
389
390 /* Set N and Z flags from var. */
391 static inline void gen_logic_CC(TCGv var)
392 {
393 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
394 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
395 }
396
397 /* T0 += T1 + CF. */
398 static void gen_adc(TCGv t0, TCGv t1)
399 {
400 TCGv tmp;
401 tcg_gen_add_i32(t0, t0, t1);
402 tmp = load_cpu_field(CF);
403 tcg_gen_add_i32(t0, t0, tmp);
404 dead_tmp(tmp);
405 }
406
407 /* dest = T0 + T1 + CF. */
408 static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
409 {
410 TCGv tmp;
411 tcg_gen_add_i32(dest, t0, t1);
412 tmp = load_cpu_field(CF);
413 tcg_gen_add_i32(dest, dest, tmp);
414 dead_tmp(tmp);
415 }
416
417 /* dest = T0 - T1 + CF - 1. */
418 static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
419 {
420 TCGv tmp;
421 tcg_gen_sub_i32(dest, t0, t1);
422 tmp = load_cpu_field(CF);
423 tcg_gen_add_i32(dest, dest, tmp);
424 tcg_gen_subi_i32(dest, dest, 1);
425 dead_tmp(tmp);
426 }
427
428 /* FIXME: Implement this natively. */
429 #define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
430
431 static void shifter_out_im(TCGv var, int shift)
432 {
433 TCGv tmp = new_tmp();
434 if (shift == 0) {
435 tcg_gen_andi_i32(tmp, var, 1);
436 } else {
437 tcg_gen_shri_i32(tmp, var, shift);
438 if (shift != 31)
439 tcg_gen_andi_i32(tmp, tmp, 1);
440 }
441 gen_set_CF(tmp);
442 dead_tmp(tmp);
443 }
444
445 /* Shift by immediate. Includes special handling for shift == 0. */
446 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
447 {
448 switch (shiftop) {
449 case 0: /* LSL */
450 if (shift != 0) {
451 if (flags)
452 shifter_out_im(var, 32 - shift);
453 tcg_gen_shli_i32(var, var, shift);
454 }
455 break;
456 case 1: /* LSR */
457 if (shift == 0) {
458 if (flags) {
459 tcg_gen_shri_i32(var, var, 31);
460 gen_set_CF(var);
461 }
462 tcg_gen_movi_i32(var, 0);
463 } else {
464 if (flags)
465 shifter_out_im(var, shift - 1);
466 tcg_gen_shri_i32(var, var, shift);
467 }
468 break;
469 case 2: /* ASR */
470 if (shift == 0)
471 shift = 32;
472 if (flags)
473 shifter_out_im(var, shift - 1);
474 if (shift == 32)
475 shift = 31;
476 tcg_gen_sari_i32(var, var, shift);
477 break;
478 case 3: /* ROR/RRX */
479 if (shift != 0) {
480 if (flags)
481 shifter_out_im(var, shift - 1);
482 tcg_gen_rotri_i32(var, var, shift); break;
483 } else {
484 TCGv tmp = load_cpu_field(CF);
485 if (flags)
486 shifter_out_im(var, 0);
487 tcg_gen_shri_i32(var, var, 1);
488 tcg_gen_shli_i32(tmp, tmp, 31);
489 tcg_gen_or_i32(var, var, tmp);
490 dead_tmp(tmp);
491 }
492 }
493 };
494
495 static inline void gen_arm_shift_reg(TCGv var, int shiftop,
496 TCGv shift, int flags)
497 {
498 if (flags) {
499 switch (shiftop) {
500 case 0: gen_helper_shl_cc(var, var, shift); break;
501 case 1: gen_helper_shr_cc(var, var, shift); break;
502 case 2: gen_helper_sar_cc(var, var, shift); break;
503 case 3: gen_helper_ror_cc(var, var, shift); break;
504 }
505 } else {
506 switch (shiftop) {
507 case 0: gen_helper_shl(var, var, shift); break;
508 case 1: gen_helper_shr(var, var, shift); break;
509 case 2: gen_helper_sar(var, var, shift); break;
510 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
511 tcg_gen_rotr_i32(var, var, shift); break;
512 }
513 }
514 dead_tmp(shift);
515 }
516
517 #define PAS_OP(pfx) \
518 switch (op2) { \
519 case 0: gen_pas_helper(glue(pfx,add16)); break; \
520 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
521 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
522 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
523 case 4: gen_pas_helper(glue(pfx,add8)); break; \
524 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
525 }
526 static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
527 {
528 TCGv_ptr tmp;
529
530 switch (op1) {
531 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
532 case 1:
533 tmp = tcg_temp_new_ptr();
534 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
535 PAS_OP(s)
536 tcg_temp_free_ptr(tmp);
537 break;
538 case 5:
539 tmp = tcg_temp_new_ptr();
540 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
541 PAS_OP(u)
542 tcg_temp_free_ptr(tmp);
543 break;
544 #undef gen_pas_helper
545 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
546 case 2:
547 PAS_OP(q);
548 break;
549 case 3:
550 PAS_OP(sh);
551 break;
552 case 6:
553 PAS_OP(uq);
554 break;
555 case 7:
556 PAS_OP(uh);
557 break;
558 #undef gen_pas_helper
559 }
560 }
561 #undef PAS_OP
562
563 /* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
564 #define PAS_OP(pfx) \
565 switch (op1) { \
566 case 0: gen_pas_helper(glue(pfx,add8)); break; \
567 case 1: gen_pas_helper(glue(pfx,add16)); break; \
568 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
569 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
570 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
571 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
572 }
573 static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
574 {
575 TCGv_ptr tmp;
576
577 switch (op2) {
578 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
579 case 0:
580 tmp = tcg_temp_new_ptr();
581 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
582 PAS_OP(s)
583 tcg_temp_free_ptr(tmp);
584 break;
585 case 4:
586 tmp = tcg_temp_new_ptr();
587 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
588 PAS_OP(u)
589 tcg_temp_free_ptr(tmp);
590 break;
591 #undef gen_pas_helper
592 #define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
593 case 1:
594 PAS_OP(q);
595 break;
596 case 2:
597 PAS_OP(sh);
598 break;
599 case 5:
600 PAS_OP(uq);
601 break;
602 case 6:
603 PAS_OP(uh);
604 break;
605 #undef gen_pas_helper
606 }
607 }
608 #undef PAS_OP
609
610 static void gen_test_cc(int cc, int label)
611 {
612 TCGv tmp;
613 TCGv tmp2;
614 int inv;
615
616 switch (cc) {
617 case 0: /* eq: Z */
618 tmp = load_cpu_field(ZF);
619 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
620 break;
621 case 1: /* ne: !Z */
622 tmp = load_cpu_field(ZF);
623 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
624 break;
625 case 2: /* cs: C */
626 tmp = load_cpu_field(CF);
627 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
628 break;
629 case 3: /* cc: !C */
630 tmp = load_cpu_field(CF);
631 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
632 break;
633 case 4: /* mi: N */
634 tmp = load_cpu_field(NF);
635 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
636 break;
637 case 5: /* pl: !N */
638 tmp = load_cpu_field(NF);
639 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
640 break;
641 case 6: /* vs: V */
642 tmp = load_cpu_field(VF);
643 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
644 break;
645 case 7: /* vc: !V */
646 tmp = load_cpu_field(VF);
647 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
648 break;
649 case 8: /* hi: C && !Z */
650 inv = gen_new_label();
651 tmp = load_cpu_field(CF);
652 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
653 dead_tmp(tmp);
654 tmp = load_cpu_field(ZF);
655 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
656 gen_set_label(inv);
657 break;
658 case 9: /* ls: !C || Z */
659 tmp = load_cpu_field(CF);
660 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
661 dead_tmp(tmp);
662 tmp = load_cpu_field(ZF);
663 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
664 break;
665 case 10: /* ge: N == V -> N ^ V == 0 */
666 tmp = load_cpu_field(VF);
667 tmp2 = load_cpu_field(NF);
668 tcg_gen_xor_i32(tmp, tmp, tmp2);
669 dead_tmp(tmp2);
670 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
671 break;
672 case 11: /* lt: N != V -> N ^ V != 0 */
673 tmp = load_cpu_field(VF);
674 tmp2 = load_cpu_field(NF);
675 tcg_gen_xor_i32(tmp, tmp, tmp2);
676 dead_tmp(tmp2);
677 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
678 break;
679 case 12: /* gt: !Z && N == V */
680 inv = gen_new_label();
681 tmp = load_cpu_field(ZF);
682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
683 dead_tmp(tmp);
684 tmp = load_cpu_field(VF);
685 tmp2 = load_cpu_field(NF);
686 tcg_gen_xor_i32(tmp, tmp, tmp2);
687 dead_tmp(tmp2);
688 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
689 gen_set_label(inv);
690 break;
691 case 13: /* le: Z || N != V */
692 tmp = load_cpu_field(ZF);
693 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
694 dead_tmp(tmp);
695 tmp = load_cpu_field(VF);
696 tmp2 = load_cpu_field(NF);
697 tcg_gen_xor_i32(tmp, tmp, tmp2);
698 dead_tmp(tmp2);
699 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
700 break;
701 default:
702 fprintf(stderr, "Bad condition code 0x%x\n", cc);
703 abort();
704 }
705 dead_tmp(tmp);
706 }
707
708 static const uint8_t table_logic_cc[16] = {
709 1, /* and */
710 1, /* xor */
711 0, /* sub */
712 0, /* rsb */
713 0, /* add */
714 0, /* adc */
715 0, /* sbc */
716 0, /* rsc */
717 1, /* andl */
718 1, /* xorl */
719 0, /* cmp */
720 0, /* cmn */
721 1, /* orr */
722 1, /* mov */
723 1, /* bic */
724 1, /* mvn */
725 };
726
727 /* Set PC and Thumb state from an immediate address. */
728 static inline void gen_bx_im(DisasContext *s, uint32_t addr)
729 {
730 TCGv tmp;
731
732 s->is_jmp = DISAS_UPDATE;
733 if (s->thumb != (addr & 1)) {
734 tmp = new_tmp();
735 tcg_gen_movi_i32(tmp, addr & 1);
736 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
737 dead_tmp(tmp);
738 }
739 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
740 }
741
742 /* Set PC and Thumb state from var. var is marked as dead. */
743 static inline void gen_bx(DisasContext *s, TCGv var)
744 {
745 s->is_jmp = DISAS_UPDATE;
746 tcg_gen_andi_i32(cpu_R[15], var, ~1);
747 tcg_gen_andi_i32(var, var, 1);
748 store_cpu_field(var, thumb);
749 }
750
751 /* Variant of store_reg which uses branch&exchange logic when storing
752 to r15 in ARM architecture v7 and above. The source must be a temporary
753 and will be marked as dead. */
754 static inline void store_reg_bx(CPUState *env, DisasContext *s,
755 int reg, TCGv var)
756 {
757 if (reg == 15 && ENABLE_ARCH_7) {
758 gen_bx(s, var);
759 } else {
760 store_reg(s, reg, var);
761 }
762 }
763
764 static inline TCGv gen_ld8s(TCGv addr, int index)
765 {
766 TCGv tmp = new_tmp();
767 tcg_gen_qemu_ld8s(tmp, addr, index);
768 return tmp;
769 }
770 static inline TCGv gen_ld8u(TCGv addr, int index)
771 {
772 TCGv tmp = new_tmp();
773 tcg_gen_qemu_ld8u(tmp, addr, index);
774 return tmp;
775 }
776 static inline TCGv gen_ld16s(TCGv addr, int index)
777 {
778 TCGv tmp = new_tmp();
779 tcg_gen_qemu_ld16s(tmp, addr, index);
780 return tmp;
781 }
782 static inline TCGv gen_ld16u(TCGv addr, int index)
783 {
784 TCGv tmp = new_tmp();
785 tcg_gen_qemu_ld16u(tmp, addr, index);
786 return tmp;
787 }
788 static inline TCGv gen_ld32(TCGv addr, int index)
789 {
790 TCGv tmp = new_tmp();
791 tcg_gen_qemu_ld32u(tmp, addr, index);
792 return tmp;
793 }
794 static inline TCGv_i64 gen_ld64(TCGv addr, int index)
795 {
796 TCGv_i64 tmp = tcg_temp_new_i64();
797 tcg_gen_qemu_ld64(tmp, addr, index);
798 return tmp;
799 }
800 static inline void gen_st8(TCGv val, TCGv addr, int index)
801 {
802 tcg_gen_qemu_st8(val, addr, index);
803 dead_tmp(val);
804 }
805 static inline void gen_st16(TCGv val, TCGv addr, int index)
806 {
807 tcg_gen_qemu_st16(val, addr, index);
808 dead_tmp(val);
809 }
810 static inline void gen_st32(TCGv val, TCGv addr, int index)
811 {
812 tcg_gen_qemu_st32(val, addr, index);
813 dead_tmp(val);
814 }
815 static inline void gen_st64(TCGv_i64 val, TCGv addr, int index)
816 {
817 tcg_gen_qemu_st64(val, addr, index);
818 tcg_temp_free_i64(val);
819 }
820
821 static inline void gen_set_pc_im(uint32_t val)
822 {
823 tcg_gen_movi_i32(cpu_R[15], val);
824 }
825
826 /* Force a TB lookup after an instruction that changes the CPU state. */
827 static inline void gen_lookup_tb(DisasContext *s)
828 {
829 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
830 s->is_jmp = DISAS_UPDATE;
831 }
832
833 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
834 TCGv var)
835 {
836 int val, rm, shift, shiftop;
837 TCGv offset;
838
839 if (!(insn & (1 << 25))) {
840 /* immediate */
841 val = insn & 0xfff;
842 if (!(insn & (1 << 23)))
843 val = -val;
844 if (val != 0)
845 tcg_gen_addi_i32(var, var, val);
846 } else {
847 /* shift/register */
848 rm = (insn) & 0xf;
849 shift = (insn >> 7) & 0x1f;
850 shiftop = (insn >> 5) & 3;
851 offset = load_reg(s, rm);
852 gen_arm_shift_im(offset, shiftop, shift, 0);
853 if (!(insn & (1 << 23)))
854 tcg_gen_sub_i32(var, var, offset);
855 else
856 tcg_gen_add_i32(var, var, offset);
857 dead_tmp(offset);
858 }
859 }
860
861 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
862 int extra, TCGv var)
863 {
864 int val, rm;
865 TCGv offset;
866
867 if (insn & (1 << 22)) {
868 /* immediate */
869 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
870 if (!(insn & (1 << 23)))
871 val = -val;
872 val += extra;
873 if (val != 0)
874 tcg_gen_addi_i32(var, var, val);
875 } else {
876 /* register */
877 if (extra)
878 tcg_gen_addi_i32(var, var, extra);
879 rm = (insn) & 0xf;
880 offset = load_reg(s, rm);
881 if (!(insn & (1 << 23)))
882 tcg_gen_sub_i32(var, var, offset);
883 else
884 tcg_gen_add_i32(var, var, offset);
885 dead_tmp(offset);
886 }
887 }
888
889 #define VFP_OP2(name) \
890 static inline void gen_vfp_##name(int dp) \
891 { \
892 if (dp) \
893 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
894 else \
895 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
896 }
897
898 VFP_OP2(add)
899 VFP_OP2(sub)
900 VFP_OP2(mul)
901 VFP_OP2(div)
902
903 #undef VFP_OP2
904
905 static inline void gen_vfp_abs(int dp)
906 {
907 if (dp)
908 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
909 else
910 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
911 }
912
913 static inline void gen_vfp_neg(int dp)
914 {
915 if (dp)
916 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
917 else
918 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
919 }
920
921 static inline void gen_vfp_sqrt(int dp)
922 {
923 if (dp)
924 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
925 else
926 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
927 }
928
929 static inline void gen_vfp_cmp(int dp)
930 {
931 if (dp)
932 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
933 else
934 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
935 }
936
937 static inline void gen_vfp_cmpe(int dp)
938 {
939 if (dp)
940 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
941 else
942 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
943 }
944
945 static inline void gen_vfp_F1_ld0(int dp)
946 {
947 if (dp)
948 tcg_gen_movi_i64(cpu_F1d, 0);
949 else
950 tcg_gen_movi_i32(cpu_F1s, 0);
951 }
952
953 static inline void gen_vfp_uito(int dp)
954 {
955 if (dp)
956 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
957 else
958 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
959 }
960
961 static inline void gen_vfp_sito(int dp)
962 {
963 if (dp)
964 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
965 else
966 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
967 }
968
969 static inline void gen_vfp_toui(int dp)
970 {
971 if (dp)
972 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
973 else
974 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
975 }
976
977 static inline void gen_vfp_touiz(int dp)
978 {
979 if (dp)
980 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
981 else
982 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
983 }
984
985 static inline void gen_vfp_tosi(int dp)
986 {
987 if (dp)
988 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
989 else
990 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
991 }
992
993 static inline void gen_vfp_tosiz(int dp)
994 {
995 if (dp)
996 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
997 else
998 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
999 }
1000
1001 #define VFP_GEN_FIX(name) \
1002 static inline void gen_vfp_##name(int dp, int shift) \
1003 { \
1004 TCGv tmp_shift = tcg_const_i32(shift); \
1005 if (dp) \
1006 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
1007 else \
1008 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
1009 tcg_temp_free_i32(tmp_shift); \
1010 }
1011 VFP_GEN_FIX(tosh)
1012 VFP_GEN_FIX(tosl)
1013 VFP_GEN_FIX(touh)
1014 VFP_GEN_FIX(toul)
1015 VFP_GEN_FIX(shto)
1016 VFP_GEN_FIX(slto)
1017 VFP_GEN_FIX(uhto)
1018 VFP_GEN_FIX(ulto)
1019 #undef VFP_GEN_FIX
1020
1021 static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv addr)
1022 {
1023 if (dp)
1024 tcg_gen_qemu_ld64(cpu_F0d, addr, IS_USER(s));
1025 else
1026 tcg_gen_qemu_ld32u(cpu_F0s, addr, IS_USER(s));
1027 }
1028
1029 static inline void gen_vfp_st(DisasContext *s, int dp, TCGv addr)
1030 {
1031 if (dp)
1032 tcg_gen_qemu_st64(cpu_F0d, addr, IS_USER(s));
1033 else
1034 tcg_gen_qemu_st32(cpu_F0s, addr, IS_USER(s));
1035 }
1036
1037 static inline long
1038 vfp_reg_offset (int dp, int reg)
1039 {
1040 if (dp)
1041 return offsetof(CPUARMState, vfp.regs[reg]);
1042 else if (reg & 1) {
1043 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1044 + offsetof(CPU_DoubleU, l.upper);
1045 } else {
1046 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1047 + offsetof(CPU_DoubleU, l.lower);
1048 }
1049 }
1050
1051 /* Return the offset of a 32-bit piece of a NEON register.
1052 zero is the least significant end of the register. */
1053 static inline long
1054 neon_reg_offset (int reg, int n)
1055 {
1056 int sreg;
1057 sreg = reg * 2 + n;
1058 return vfp_reg_offset(0, sreg);
1059 }
1060
1061 static TCGv neon_load_reg(int reg, int pass)
1062 {
1063 TCGv tmp = new_tmp();
1064 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1065 return tmp;
1066 }
1067
1068 static void neon_store_reg(int reg, int pass, TCGv var)
1069 {
1070 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1071 dead_tmp(var);
1072 }
1073
1074 static inline void neon_load_reg64(TCGv_i64 var, int reg)
1075 {
1076 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1077 }
1078
1079 static inline void neon_store_reg64(TCGv_i64 var, int reg)
1080 {
1081 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1082 }
1083
1084 #define tcg_gen_ld_f32 tcg_gen_ld_i32
1085 #define tcg_gen_ld_f64 tcg_gen_ld_i64
1086 #define tcg_gen_st_f32 tcg_gen_st_i32
1087 #define tcg_gen_st_f64 tcg_gen_st_i64
1088
1089 static inline void gen_mov_F0_vreg(int dp, int reg)
1090 {
1091 if (dp)
1092 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1093 else
1094 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1095 }
1096
1097 static inline void gen_mov_F1_vreg(int dp, int reg)
1098 {
1099 if (dp)
1100 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
1101 else
1102 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
1103 }
1104
1105 static inline void gen_mov_vreg_F0(int dp, int reg)
1106 {
1107 if (dp)
1108 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
1109 else
1110 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
1111 }
1112
1113 #define ARM_CP_RW_BIT (1 << 20)
1114
1115 static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
1116 {
1117 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1118 }
1119
1120 static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
1121 {
1122 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1123 }
1124
1125 static inline TCGv iwmmxt_load_creg(int reg)
1126 {
1127 TCGv var = new_tmp();
1128 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1129 return var;
1130 }
1131
1132 static inline void iwmmxt_store_creg(int reg, TCGv var)
1133 {
1134 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1135 dead_tmp(var);
1136 }
1137
1138 static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1139 {
1140 iwmmxt_store_reg(cpu_M0, rn);
1141 }
1142
1143 static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1144 {
1145 iwmmxt_load_reg(cpu_M0, rn);
1146 }
1147
1148 static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1149 {
1150 iwmmxt_load_reg(cpu_V1, rn);
1151 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1152 }
1153
1154 static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1155 {
1156 iwmmxt_load_reg(cpu_V1, rn);
1157 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1158 }
1159
1160 static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1161 {
1162 iwmmxt_load_reg(cpu_V1, rn);
1163 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1164 }
1165
1166 #define IWMMXT_OP(name) \
1167 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1168 { \
1169 iwmmxt_load_reg(cpu_V1, rn); \
1170 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1171 }
1172
1173 #define IWMMXT_OP_ENV(name) \
1174 static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1175 { \
1176 iwmmxt_load_reg(cpu_V1, rn); \
1177 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1178 }
1179
1180 #define IWMMXT_OP_ENV_SIZE(name) \
1181 IWMMXT_OP_ENV(name##b) \
1182 IWMMXT_OP_ENV(name##w) \
1183 IWMMXT_OP_ENV(name##l)
1184
1185 #define IWMMXT_OP_ENV1(name) \
1186 static inline void gen_op_iwmmxt_##name##_M0(void) \
1187 { \
1188 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1189 }
1190
1191 IWMMXT_OP(maddsq)
1192 IWMMXT_OP(madduq)
1193 IWMMXT_OP(sadb)
1194 IWMMXT_OP(sadw)
1195 IWMMXT_OP(mulslw)
1196 IWMMXT_OP(mulshw)
1197 IWMMXT_OP(mululw)
1198 IWMMXT_OP(muluhw)
1199 IWMMXT_OP(macsw)
1200 IWMMXT_OP(macuw)
1201
1202 IWMMXT_OP_ENV_SIZE(unpackl)
1203 IWMMXT_OP_ENV_SIZE(unpackh)
1204
1205 IWMMXT_OP_ENV1(unpacklub)
1206 IWMMXT_OP_ENV1(unpackluw)
1207 IWMMXT_OP_ENV1(unpacklul)
1208 IWMMXT_OP_ENV1(unpackhub)
1209 IWMMXT_OP_ENV1(unpackhuw)
1210 IWMMXT_OP_ENV1(unpackhul)
1211 IWMMXT_OP_ENV1(unpacklsb)
1212 IWMMXT_OP_ENV1(unpacklsw)
1213 IWMMXT_OP_ENV1(unpacklsl)
1214 IWMMXT_OP_ENV1(unpackhsb)
1215 IWMMXT_OP_ENV1(unpackhsw)
1216 IWMMXT_OP_ENV1(unpackhsl)
1217
1218 IWMMXT_OP_ENV_SIZE(cmpeq)
1219 IWMMXT_OP_ENV_SIZE(cmpgtu)
1220 IWMMXT_OP_ENV_SIZE(cmpgts)
1221
1222 IWMMXT_OP_ENV_SIZE(mins)
1223 IWMMXT_OP_ENV_SIZE(minu)
1224 IWMMXT_OP_ENV_SIZE(maxs)
1225 IWMMXT_OP_ENV_SIZE(maxu)
1226
1227 IWMMXT_OP_ENV_SIZE(subn)
1228 IWMMXT_OP_ENV_SIZE(addn)
1229 IWMMXT_OP_ENV_SIZE(subu)
1230 IWMMXT_OP_ENV_SIZE(addu)
1231 IWMMXT_OP_ENV_SIZE(subs)
1232 IWMMXT_OP_ENV_SIZE(adds)
1233
1234 IWMMXT_OP_ENV(avgb0)
1235 IWMMXT_OP_ENV(avgb1)
1236 IWMMXT_OP_ENV(avgw0)
1237 IWMMXT_OP_ENV(avgw1)
1238
1239 IWMMXT_OP(msadb)
1240
1241 IWMMXT_OP_ENV(packuw)
1242 IWMMXT_OP_ENV(packul)
1243 IWMMXT_OP_ENV(packuq)
1244 IWMMXT_OP_ENV(packsw)
1245 IWMMXT_OP_ENV(packsl)
1246 IWMMXT_OP_ENV(packsq)
1247
1248 static void gen_op_iwmmxt_set_mup(void)
1249 {
1250 TCGv tmp;
1251 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1252 tcg_gen_ori_i32(tmp, tmp, 2);
1253 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1254 }
1255
1256 static void gen_op_iwmmxt_set_cup(void)
1257 {
1258 TCGv tmp;
1259 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1260 tcg_gen_ori_i32(tmp, tmp, 1);
1261 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1262 }
1263
1264 static void gen_op_iwmmxt_setpsr_nz(void)
1265 {
1266 TCGv tmp = new_tmp();
1267 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1268 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1269 }
1270
1271 static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1272 {
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
1275 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1276 }
1277
1278 static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, TCGv dest)
1279 {
1280 int rd;
1281 uint32_t offset;
1282 TCGv tmp;
1283
1284 rd = (insn >> 16) & 0xf;
1285 tmp = load_reg(s, rd);
1286
1287 offset = (insn & 0xff) << ((insn >> 7) & 2);
1288 if (insn & (1 << 24)) {
1289 /* Pre indexed */
1290 if (insn & (1 << 23))
1291 tcg_gen_addi_i32(tmp, tmp, offset);
1292 else
1293 tcg_gen_addi_i32(tmp, tmp, -offset);
1294 tcg_gen_mov_i32(dest, tmp);
1295 if (insn & (1 << 21))
1296 store_reg(s, rd, tmp);
1297 else
1298 dead_tmp(tmp);
1299 } else if (insn & (1 << 21)) {
1300 /* Post indexed */
1301 tcg_gen_mov_i32(dest, tmp);
1302 if (insn & (1 << 23))
1303 tcg_gen_addi_i32(tmp, tmp, offset);
1304 else
1305 tcg_gen_addi_i32(tmp, tmp, -offset);
1306 store_reg(s, rd, tmp);
1307 } else if (!(insn & (1 << 23)))
1308 return 1;
1309 return 0;
1310 }
1311
1312 static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv dest)
1313 {
1314 int rd = (insn >> 0) & 0xf;
1315 TCGv tmp;
1316
1317 if (insn & (1 << 8)) {
1318 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
1319 return 1;
1320 } else {
1321 tmp = iwmmxt_load_creg(rd);
1322 }
1323 } else {
1324 tmp = new_tmp();
1325 iwmmxt_load_reg(cpu_V0, rd);
1326 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1327 }
1328 tcg_gen_andi_i32(tmp, tmp, mask);
1329 tcg_gen_mov_i32(dest, tmp);
1330 dead_tmp(tmp);
1331 return 0;
1332 }
1333
1334 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1335 (ie. an undefined instruction). */
1336 static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1337 {
1338 int rd, wrd;
1339 int rdhi, rdlo, rd0, rd1, i;
1340 TCGv addr;
1341 TCGv tmp, tmp2, tmp3;
1342
1343 if ((insn & 0x0e000e00) == 0x0c000000) {
1344 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1345 wrd = insn & 0xf;
1346 rdlo = (insn >> 12) & 0xf;
1347 rdhi = (insn >> 16) & 0xf;
1348 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1349 iwmmxt_load_reg(cpu_V0, wrd);
1350 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1351 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1352 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
1353 } else { /* TMCRR */
1354 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1355 iwmmxt_store_reg(cpu_V0, wrd);
1356 gen_op_iwmmxt_set_mup();
1357 }
1358 return 0;
1359 }
1360
1361 wrd = (insn >> 12) & 0xf;
1362 addr = new_tmp();
1363 if (gen_iwmmxt_address(s, insn, addr)) {
1364 dead_tmp(addr);
1365 return 1;
1366 }
1367 if (insn & ARM_CP_RW_BIT) {
1368 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1369 tmp = new_tmp();
1370 tcg_gen_qemu_ld32u(tmp, addr, IS_USER(s));
1371 iwmmxt_store_creg(wrd, tmp);
1372 } else {
1373 i = 1;
1374 if (insn & (1 << 8)) {
1375 if (insn & (1 << 22)) { /* WLDRD */
1376 tcg_gen_qemu_ld64(cpu_M0, addr, IS_USER(s));
1377 i = 0;
1378 } else { /* WLDRW wRd */
1379 tmp = gen_ld32(addr, IS_USER(s));
1380 }
1381 } else {
1382 if (insn & (1 << 22)) { /* WLDRH */
1383 tmp = gen_ld16u(addr, IS_USER(s));
1384 } else { /* WLDRB */
1385 tmp = gen_ld8u(addr, IS_USER(s));
1386 }
1387 }
1388 if (i) {
1389 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1390 dead_tmp(tmp);
1391 }
1392 gen_op_iwmmxt_movq_wRn_M0(wrd);
1393 }
1394 } else {
1395 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1396 tmp = iwmmxt_load_creg(wrd);
1397 gen_st32(tmp, addr, IS_USER(s));
1398 } else {
1399 gen_op_iwmmxt_movq_M0_wRn(wrd);
1400 tmp = new_tmp();
1401 if (insn & (1 << 8)) {
1402 if (insn & (1 << 22)) { /* WSTRD */
1403 dead_tmp(tmp);
1404 tcg_gen_qemu_st64(cpu_M0, addr, IS_USER(s));
1405 } else { /* WSTRW wRd */
1406 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1407 gen_st32(tmp, addr, IS_USER(s));
1408 }
1409 } else {
1410 if (insn & (1 << 22)) { /* WSTRH */
1411 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1412 gen_st16(tmp, addr, IS_USER(s));
1413 } else { /* WSTRB */
1414 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1415 gen_st8(tmp, addr, IS_USER(s));
1416 }
1417 }
1418 }
1419 }
1420 dead_tmp(addr);
1421 return 0;
1422 }
1423
1424 if ((insn & 0x0f000000) != 0x0e000000)
1425 return 1;
1426
1427 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1428 case 0x000: /* WOR */
1429 wrd = (insn >> 12) & 0xf;
1430 rd0 = (insn >> 0) & 0xf;
1431 rd1 = (insn >> 16) & 0xf;
1432 gen_op_iwmmxt_movq_M0_wRn(rd0);
1433 gen_op_iwmmxt_orq_M0_wRn(rd1);
1434 gen_op_iwmmxt_setpsr_nz();
1435 gen_op_iwmmxt_movq_wRn_M0(wrd);
1436 gen_op_iwmmxt_set_mup();
1437 gen_op_iwmmxt_set_cup();
1438 break;
1439 case 0x011: /* TMCR */
1440 if (insn & 0xf)
1441 return 1;
1442 rd = (insn >> 12) & 0xf;
1443 wrd = (insn >> 16) & 0xf;
1444 switch (wrd) {
1445 case ARM_IWMMXT_wCID:
1446 case ARM_IWMMXT_wCASF:
1447 break;
1448 case ARM_IWMMXT_wCon:
1449 gen_op_iwmmxt_set_cup();
1450 /* Fall through. */
1451 case ARM_IWMMXT_wCSSF:
1452 tmp = iwmmxt_load_creg(wrd);
1453 tmp2 = load_reg(s, rd);
1454 tcg_gen_andc_i32(tmp, tmp, tmp2);
1455 dead_tmp(tmp2);
1456 iwmmxt_store_creg(wrd, tmp);
1457 break;
1458 case ARM_IWMMXT_wCGR0:
1459 case ARM_IWMMXT_wCGR1:
1460 case ARM_IWMMXT_wCGR2:
1461 case ARM_IWMMXT_wCGR3:
1462 gen_op_iwmmxt_set_cup();
1463 tmp = load_reg(s, rd);
1464 iwmmxt_store_creg(wrd, tmp);
1465 break;
1466 default:
1467 return 1;
1468 }
1469 break;
1470 case 0x100: /* WXOR */
1471 wrd = (insn >> 12) & 0xf;
1472 rd0 = (insn >> 0) & 0xf;
1473 rd1 = (insn >> 16) & 0xf;
1474 gen_op_iwmmxt_movq_M0_wRn(rd0);
1475 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1476 gen_op_iwmmxt_setpsr_nz();
1477 gen_op_iwmmxt_movq_wRn_M0(wrd);
1478 gen_op_iwmmxt_set_mup();
1479 gen_op_iwmmxt_set_cup();
1480 break;
1481 case 0x111: /* TMRC */
1482 if (insn & 0xf)
1483 return 1;
1484 rd = (insn >> 12) & 0xf;
1485 wrd = (insn >> 16) & 0xf;
1486 tmp = iwmmxt_load_creg(wrd);
1487 store_reg(s, rd, tmp);
1488 break;
1489 case 0x300: /* WANDN */
1490 wrd = (insn >> 12) & 0xf;
1491 rd0 = (insn >> 0) & 0xf;
1492 rd1 = (insn >> 16) & 0xf;
1493 gen_op_iwmmxt_movq_M0_wRn(rd0);
1494 tcg_gen_neg_i64(cpu_M0, cpu_M0);
1495 gen_op_iwmmxt_andq_M0_wRn(rd1);
1496 gen_op_iwmmxt_setpsr_nz();
1497 gen_op_iwmmxt_movq_wRn_M0(wrd);
1498 gen_op_iwmmxt_set_mup();
1499 gen_op_iwmmxt_set_cup();
1500 break;
1501 case 0x200: /* WAND */
1502 wrd = (insn >> 12) & 0xf;
1503 rd0 = (insn >> 0) & 0xf;
1504 rd1 = (insn >> 16) & 0xf;
1505 gen_op_iwmmxt_movq_M0_wRn(rd0);
1506 gen_op_iwmmxt_andq_M0_wRn(rd1);
1507 gen_op_iwmmxt_setpsr_nz();
1508 gen_op_iwmmxt_movq_wRn_M0(wrd);
1509 gen_op_iwmmxt_set_mup();
1510 gen_op_iwmmxt_set_cup();
1511 break;
1512 case 0x810: case 0xa10: /* WMADD */
1513 wrd = (insn >> 12) & 0xf;
1514 rd0 = (insn >> 0) & 0xf;
1515 rd1 = (insn >> 16) & 0xf;
1516 gen_op_iwmmxt_movq_M0_wRn(rd0);
1517 if (insn & (1 << 21))
1518 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1519 else
1520 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1521 gen_op_iwmmxt_movq_wRn_M0(wrd);
1522 gen_op_iwmmxt_set_mup();
1523 break;
1524 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1525 wrd = (insn >> 12) & 0xf;
1526 rd0 = (insn >> 16) & 0xf;
1527 rd1 = (insn >> 0) & 0xf;
1528 gen_op_iwmmxt_movq_M0_wRn(rd0);
1529 switch ((insn >> 22) & 3) {
1530 case 0:
1531 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1532 break;
1533 case 1:
1534 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1535 break;
1536 case 2:
1537 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1538 break;
1539 case 3:
1540 return 1;
1541 }
1542 gen_op_iwmmxt_movq_wRn_M0(wrd);
1543 gen_op_iwmmxt_set_mup();
1544 gen_op_iwmmxt_set_cup();
1545 break;
1546 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1547 wrd = (insn >> 12) & 0xf;
1548 rd0 = (insn >> 16) & 0xf;
1549 rd1 = (insn >> 0) & 0xf;
1550 gen_op_iwmmxt_movq_M0_wRn(rd0);
1551 switch ((insn >> 22) & 3) {
1552 case 0:
1553 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1554 break;
1555 case 1:
1556 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1557 break;
1558 case 2:
1559 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1560 break;
1561 case 3:
1562 return 1;
1563 }
1564 gen_op_iwmmxt_movq_wRn_M0(wrd);
1565 gen_op_iwmmxt_set_mup();
1566 gen_op_iwmmxt_set_cup();
1567 break;
1568 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1569 wrd = (insn >> 12) & 0xf;
1570 rd0 = (insn >> 16) & 0xf;
1571 rd1 = (insn >> 0) & 0xf;
1572 gen_op_iwmmxt_movq_M0_wRn(rd0);
1573 if (insn & (1 << 22))
1574 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1575 else
1576 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1577 if (!(insn & (1 << 20)))
1578 gen_op_iwmmxt_addl_M0_wRn(wrd);
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 gen_op_iwmmxt_set_mup();
1581 break;
1582 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1583 wrd = (insn >> 12) & 0xf;
1584 rd0 = (insn >> 16) & 0xf;
1585 rd1 = (insn >> 0) & 0xf;
1586 gen_op_iwmmxt_movq_M0_wRn(rd0);
1587 if (insn & (1 << 21)) {
1588 if (insn & (1 << 20))
1589 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1590 else
1591 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1592 } else {
1593 if (insn & (1 << 20))
1594 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1595 else
1596 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1597 }
1598 gen_op_iwmmxt_movq_wRn_M0(wrd);
1599 gen_op_iwmmxt_set_mup();
1600 break;
1601 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1602 wrd = (insn >> 12) & 0xf;
1603 rd0 = (insn >> 16) & 0xf;
1604 rd1 = (insn >> 0) & 0xf;
1605 gen_op_iwmmxt_movq_M0_wRn(rd0);
1606 if (insn & (1 << 21))
1607 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1608 else
1609 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1610 if (!(insn & (1 << 20))) {
1611 iwmmxt_load_reg(cpu_V1, wrd);
1612 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1613 }
1614 gen_op_iwmmxt_movq_wRn_M0(wrd);
1615 gen_op_iwmmxt_set_mup();
1616 break;
1617 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1618 wrd = (insn >> 12) & 0xf;
1619 rd0 = (insn >> 16) & 0xf;
1620 rd1 = (insn >> 0) & 0xf;
1621 gen_op_iwmmxt_movq_M0_wRn(rd0);
1622 switch ((insn >> 22) & 3) {
1623 case 0:
1624 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1625 break;
1626 case 1:
1627 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1628 break;
1629 case 2:
1630 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1640 wrd = (insn >> 12) & 0xf;
1641 rd0 = (insn >> 16) & 0xf;
1642 rd1 = (insn >> 0) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 if (insn & (1 << 22)) {
1645 if (insn & (1 << 20))
1646 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1647 else
1648 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1649 } else {
1650 if (insn & (1 << 20))
1651 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1652 else
1653 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1654 }
1655 gen_op_iwmmxt_movq_wRn_M0(wrd);
1656 gen_op_iwmmxt_set_mup();
1657 gen_op_iwmmxt_set_cup();
1658 break;
1659 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 16) & 0xf;
1662 rd1 = (insn >> 0) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
1664 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1665 tcg_gen_andi_i32(tmp, tmp, 7);
1666 iwmmxt_load_reg(cpu_V1, rd1);
1667 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
1668 dead_tmp(tmp);
1669 gen_op_iwmmxt_movq_wRn_M0(wrd);
1670 gen_op_iwmmxt_set_mup();
1671 break;
1672 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1673 if (((insn >> 6) & 3) == 3)
1674 return 1;
1675 rd = (insn >> 12) & 0xf;
1676 wrd = (insn >> 16) & 0xf;
1677 tmp = load_reg(s, rd);
1678 gen_op_iwmmxt_movq_M0_wRn(wrd);
1679 switch ((insn >> 6) & 3) {
1680 case 0:
1681 tmp2 = tcg_const_i32(0xff);
1682 tmp3 = tcg_const_i32((insn & 7) << 3);
1683 break;
1684 case 1:
1685 tmp2 = tcg_const_i32(0xffff);
1686 tmp3 = tcg_const_i32((insn & 3) << 4);
1687 break;
1688 case 2:
1689 tmp2 = tcg_const_i32(0xffffffff);
1690 tmp3 = tcg_const_i32((insn & 1) << 5);
1691 break;
1692 default:
1693 TCGV_UNUSED(tmp2);
1694 TCGV_UNUSED(tmp3);
1695 }
1696 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
1697 tcg_temp_free(tmp3);
1698 tcg_temp_free(tmp2);
1699 dead_tmp(tmp);
1700 gen_op_iwmmxt_movq_wRn_M0(wrd);
1701 gen_op_iwmmxt_set_mup();
1702 break;
1703 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1704 rd = (insn >> 12) & 0xf;
1705 wrd = (insn >> 16) & 0xf;
1706 if (rd == 15 || ((insn >> 22) & 3) == 3)
1707 return 1;
1708 gen_op_iwmmxt_movq_M0_wRn(wrd);
1709 tmp = new_tmp();
1710 switch ((insn >> 22) & 3) {
1711 case 0:
1712 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1713 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1714 if (insn & 8) {
1715 tcg_gen_ext8s_i32(tmp, tmp);
1716 } else {
1717 tcg_gen_andi_i32(tmp, tmp, 0xff);
1718 }
1719 break;
1720 case 1:
1721 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1722 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1723 if (insn & 8) {
1724 tcg_gen_ext16s_i32(tmp, tmp);
1725 } else {
1726 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1727 }
1728 break;
1729 case 2:
1730 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1731 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1732 break;
1733 }
1734 store_reg(s, rd, tmp);
1735 break;
1736 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1737 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1738 return 1;
1739 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1740 switch ((insn >> 22) & 3) {
1741 case 0:
1742 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
1743 break;
1744 case 1:
1745 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
1746 break;
1747 case 2:
1748 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
1749 break;
1750 }
1751 tcg_gen_shli_i32(tmp, tmp, 28);
1752 gen_set_nzcv(tmp);
1753 dead_tmp(tmp);
1754 break;
1755 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1756 if (((insn >> 6) & 3) == 3)
1757 return 1;
1758 rd = (insn >> 12) & 0xf;
1759 wrd = (insn >> 16) & 0xf;
1760 tmp = load_reg(s, rd);
1761 switch ((insn >> 6) & 3) {
1762 case 0:
1763 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
1764 break;
1765 case 1:
1766 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
1767 break;
1768 case 2:
1769 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
1770 break;
1771 }
1772 dead_tmp(tmp);
1773 gen_op_iwmmxt_movq_wRn_M0(wrd);
1774 gen_op_iwmmxt_set_mup();
1775 break;
1776 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1777 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1778 return 1;
1779 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1780 tmp2 = new_tmp();
1781 tcg_gen_mov_i32(tmp2, tmp);
1782 switch ((insn >> 22) & 3) {
1783 case 0:
1784 for (i = 0; i < 7; i ++) {
1785 tcg_gen_shli_i32(tmp2, tmp2, 4);
1786 tcg_gen_and_i32(tmp, tmp, tmp2);
1787 }
1788 break;
1789 case 1:
1790 for (i = 0; i < 3; i ++) {
1791 tcg_gen_shli_i32(tmp2, tmp2, 8);
1792 tcg_gen_and_i32(tmp, tmp, tmp2);
1793 }
1794 break;
1795 case 2:
1796 tcg_gen_shli_i32(tmp2, tmp2, 16);
1797 tcg_gen_and_i32(tmp, tmp, tmp2);
1798 break;
1799 }
1800 gen_set_nzcv(tmp);
1801 dead_tmp(tmp2);
1802 dead_tmp(tmp);
1803 break;
1804 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 gen_op_iwmmxt_movq_M0_wRn(rd0);
1808 switch ((insn >> 22) & 3) {
1809 case 0:
1810 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
1811 break;
1812 case 1:
1813 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
1814 break;
1815 case 2:
1816 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
1817 break;
1818 case 3:
1819 return 1;
1820 }
1821 gen_op_iwmmxt_movq_wRn_M0(wrd);
1822 gen_op_iwmmxt_set_mup();
1823 break;
1824 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1825 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
1826 return 1;
1827 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
1828 tmp2 = new_tmp();
1829 tcg_gen_mov_i32(tmp2, tmp);
1830 switch ((insn >> 22) & 3) {
1831 case 0:
1832 for (i = 0; i < 7; i ++) {
1833 tcg_gen_shli_i32(tmp2, tmp2, 4);
1834 tcg_gen_or_i32(tmp, tmp, tmp2);
1835 }
1836 break;
1837 case 1:
1838 for (i = 0; i < 3; i ++) {
1839 tcg_gen_shli_i32(tmp2, tmp2, 8);
1840 tcg_gen_or_i32(tmp, tmp, tmp2);
1841 }
1842 break;
1843 case 2:
1844 tcg_gen_shli_i32(tmp2, tmp2, 16);
1845 tcg_gen_or_i32(tmp, tmp, tmp2);
1846 break;
1847 }
1848 gen_set_nzcv(tmp);
1849 dead_tmp(tmp2);
1850 dead_tmp(tmp);
1851 break;
1852 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1853 rd = (insn >> 12) & 0xf;
1854 rd0 = (insn >> 16) & 0xf;
1855 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
1856 return 1;
1857 gen_op_iwmmxt_movq_M0_wRn(rd0);
1858 tmp = new_tmp();
1859 switch ((insn >> 22) & 3) {
1860 case 0:
1861 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
1862 break;
1863 case 1:
1864 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
1865 break;
1866 case 2:
1867 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
1868 break;
1869 }
1870 store_reg(s, rd, tmp);
1871 break;
1872 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1873 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1874 wrd = (insn >> 12) & 0xf;
1875 rd0 = (insn >> 16) & 0xf;
1876 rd1 = (insn >> 0) & 0xf;
1877 gen_op_iwmmxt_movq_M0_wRn(rd0);
1878 switch ((insn >> 22) & 3) {
1879 case 0:
1880 if (insn & (1 << 21))
1881 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1884 break;
1885 case 1:
1886 if (insn & (1 << 21))
1887 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1888 else
1889 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1890 break;
1891 case 2:
1892 if (insn & (1 << 21))
1893 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1894 else
1895 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1896 break;
1897 case 3:
1898 return 1;
1899 }
1900 gen_op_iwmmxt_movq_wRn_M0(wrd);
1901 gen_op_iwmmxt_set_mup();
1902 gen_op_iwmmxt_set_cup();
1903 break;
1904 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1905 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1906 wrd = (insn >> 12) & 0xf;
1907 rd0 = (insn >> 16) & 0xf;
1908 gen_op_iwmmxt_movq_M0_wRn(rd0);
1909 switch ((insn >> 22) & 3) {
1910 case 0:
1911 if (insn & (1 << 21))
1912 gen_op_iwmmxt_unpacklsb_M0();
1913 else
1914 gen_op_iwmmxt_unpacklub_M0();
1915 break;
1916 case 1:
1917 if (insn & (1 << 21))
1918 gen_op_iwmmxt_unpacklsw_M0();
1919 else
1920 gen_op_iwmmxt_unpackluw_M0();
1921 break;
1922 case 2:
1923 if (insn & (1 << 21))
1924 gen_op_iwmmxt_unpacklsl_M0();
1925 else
1926 gen_op_iwmmxt_unpacklul_M0();
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 gen_op_iwmmxt_set_cup();
1934 break;
1935 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1936 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1937 wrd = (insn >> 12) & 0xf;
1938 rd0 = (insn >> 16) & 0xf;
1939 gen_op_iwmmxt_movq_M0_wRn(rd0);
1940 switch ((insn >> 22) & 3) {
1941 case 0:
1942 if (insn & (1 << 21))
1943 gen_op_iwmmxt_unpackhsb_M0();
1944 else
1945 gen_op_iwmmxt_unpackhub_M0();
1946 break;
1947 case 1:
1948 if (insn & (1 << 21))
1949 gen_op_iwmmxt_unpackhsw_M0();
1950 else
1951 gen_op_iwmmxt_unpackhuw_M0();
1952 break;
1953 case 2:
1954 if (insn & (1 << 21))
1955 gen_op_iwmmxt_unpackhsl_M0();
1956 else
1957 gen_op_iwmmxt_unpackhul_M0();
1958 break;
1959 case 3:
1960 return 1;
1961 }
1962 gen_op_iwmmxt_movq_wRn_M0(wrd);
1963 gen_op_iwmmxt_set_mup();
1964 gen_op_iwmmxt_set_cup();
1965 break;
1966 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1967 case 0x214: case 0x614: case 0xa14: case 0xe14:
1968 if (((insn >> 22) & 3) == 0)
1969 return 1;
1970 wrd = (insn >> 12) & 0xf;
1971 rd0 = (insn >> 16) & 0xf;
1972 gen_op_iwmmxt_movq_M0_wRn(rd0);
1973 tmp = new_tmp();
1974 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
1975 dead_tmp(tmp);
1976 return 1;
1977 }
1978 switch ((insn >> 22) & 3) {
1979 case 1:
1980 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
1981 break;
1982 case 2:
1983 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
1984 break;
1985 case 3:
1986 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
1987 break;
1988 }
1989 dead_tmp(tmp);
1990 gen_op_iwmmxt_movq_wRn_M0(wrd);
1991 gen_op_iwmmxt_set_mup();
1992 gen_op_iwmmxt_set_cup();
1993 break;
1994 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1995 case 0x014: case 0x414: case 0x814: case 0xc14:
1996 if (((insn >> 22) & 3) == 0)
1997 return 1;
1998 wrd = (insn >> 12) & 0xf;
1999 rd0 = (insn >> 16) & 0xf;
2000 gen_op_iwmmxt_movq_M0_wRn(rd0);
2001 tmp = new_tmp();
2002 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2003 dead_tmp(tmp);
2004 return 1;
2005 }
2006 switch ((insn >> 22) & 3) {
2007 case 1:
2008 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
2009 break;
2010 case 2:
2011 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
2012 break;
2013 case 3:
2014 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
2015 break;
2016 }
2017 dead_tmp(tmp);
2018 gen_op_iwmmxt_movq_wRn_M0(wrd);
2019 gen_op_iwmmxt_set_mup();
2020 gen_op_iwmmxt_set_cup();
2021 break;
2022 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2023 case 0x114: case 0x514: case 0x914: case 0xd14:
2024 if (((insn >> 22) & 3) == 0)
2025 return 1;
2026 wrd = (insn >> 12) & 0xf;
2027 rd0 = (insn >> 16) & 0xf;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 tmp = new_tmp();
2030 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
2031 dead_tmp(tmp);
2032 return 1;
2033 }
2034 switch ((insn >> 22) & 3) {
2035 case 1:
2036 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
2037 break;
2038 case 2:
2039 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
2040 break;
2041 case 3:
2042 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
2043 break;
2044 }
2045 dead_tmp(tmp);
2046 gen_op_iwmmxt_movq_wRn_M0(wrd);
2047 gen_op_iwmmxt_set_mup();
2048 gen_op_iwmmxt_set_cup();
2049 break;
2050 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2051 case 0x314: case 0x714: case 0xb14: case 0xf14:
2052 if (((insn >> 22) & 3) == 0)
2053 return 1;
2054 wrd = (insn >> 12) & 0xf;
2055 rd0 = (insn >> 16) & 0xf;
2056 gen_op_iwmmxt_movq_M0_wRn(rd0);
2057 tmp = new_tmp();
2058 switch ((insn >> 22) & 3) {
2059 case 1:
2060 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
2061 dead_tmp(tmp);
2062 return 1;
2063 }
2064 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
2065 break;
2066 case 2:
2067 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
2068 dead_tmp(tmp);
2069 return 1;
2070 }
2071 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
2072 break;
2073 case 3:
2074 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
2075 dead_tmp(tmp);
2076 return 1;
2077 }
2078 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
2079 break;
2080 }
2081 dead_tmp(tmp);
2082 gen_op_iwmmxt_movq_wRn_M0(wrd);
2083 gen_op_iwmmxt_set_mup();
2084 gen_op_iwmmxt_set_cup();
2085 break;
2086 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2087 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 switch ((insn >> 22) & 3) {
2093 case 0:
2094 if (insn & (1 << 21))
2095 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2096 else
2097 gen_op_iwmmxt_minub_M0_wRn(rd1);
2098 break;
2099 case 1:
2100 if (insn & (1 << 21))
2101 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2102 else
2103 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2104 break;
2105 case 2:
2106 if (insn & (1 << 21))
2107 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2108 else
2109 gen_op_iwmmxt_minul_M0_wRn(rd1);
2110 break;
2111 case 3:
2112 return 1;
2113 }
2114 gen_op_iwmmxt_movq_wRn_M0(wrd);
2115 gen_op_iwmmxt_set_mup();
2116 break;
2117 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2118 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2119 wrd = (insn >> 12) & 0xf;
2120 rd0 = (insn >> 16) & 0xf;
2121 rd1 = (insn >> 0) & 0xf;
2122 gen_op_iwmmxt_movq_M0_wRn(rd0);
2123 switch ((insn >> 22) & 3) {
2124 case 0:
2125 if (insn & (1 << 21))
2126 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2127 else
2128 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2129 break;
2130 case 1:
2131 if (insn & (1 << 21))
2132 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2133 else
2134 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2135 break;
2136 case 2:
2137 if (insn & (1 << 21))
2138 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2139 else
2140 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2141 break;
2142 case 3:
2143 return 1;
2144 }
2145 gen_op_iwmmxt_movq_wRn_M0(wrd);
2146 gen_op_iwmmxt_set_mup();
2147 break;
2148 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2149 case 0x402: case 0x502: case 0x602: case 0x702:
2150 wrd = (insn >> 12) & 0xf;
2151 rd0 = (insn >> 16) & 0xf;
2152 rd1 = (insn >> 0) & 0xf;
2153 gen_op_iwmmxt_movq_M0_wRn(rd0);
2154 tmp = tcg_const_i32((insn >> 20) & 3);
2155 iwmmxt_load_reg(cpu_V1, rd1);
2156 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
2157 tcg_temp_free(tmp);
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 break;
2161 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2162 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2163 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2164 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2165 wrd = (insn >> 12) & 0xf;
2166 rd0 = (insn >> 16) & 0xf;
2167 rd1 = (insn >> 0) & 0xf;
2168 gen_op_iwmmxt_movq_M0_wRn(rd0);
2169 switch ((insn >> 20) & 0xf) {
2170 case 0x0:
2171 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2172 break;
2173 case 0x1:
2174 gen_op_iwmmxt_subub_M0_wRn(rd1);
2175 break;
2176 case 0x3:
2177 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2178 break;
2179 case 0x4:
2180 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2181 break;
2182 case 0x5:
2183 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2184 break;
2185 case 0x7:
2186 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2187 break;
2188 case 0x8:
2189 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2190 break;
2191 case 0x9:
2192 gen_op_iwmmxt_subul_M0_wRn(rd1);
2193 break;
2194 case 0xb:
2195 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2196 break;
2197 default:
2198 return 1;
2199 }
2200 gen_op_iwmmxt_movq_wRn_M0(wrd);
2201 gen_op_iwmmxt_set_mup();
2202 gen_op_iwmmxt_set_cup();
2203 break;
2204 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2205 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2206 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2207 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2208 wrd = (insn >> 12) & 0xf;
2209 rd0 = (insn >> 16) & 0xf;
2210 gen_op_iwmmxt_movq_M0_wRn(rd0);
2211 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
2212 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
2213 tcg_temp_free(tmp);
2214 gen_op_iwmmxt_movq_wRn_M0(wrd);
2215 gen_op_iwmmxt_set_mup();
2216 gen_op_iwmmxt_set_cup();
2217 break;
2218 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2219 case 0x418: case 0x518: case 0x618: case 0x718:
2220 case 0x818: case 0x918: case 0xa18: case 0xb18:
2221 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2222 wrd = (insn >> 12) & 0xf;
2223 rd0 = (insn >> 16) & 0xf;
2224 rd1 = (insn >> 0) & 0xf;
2225 gen_op_iwmmxt_movq_M0_wRn(rd0);
2226 switch ((insn >> 20) & 0xf) {
2227 case 0x0:
2228 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2229 break;
2230 case 0x1:
2231 gen_op_iwmmxt_addub_M0_wRn(rd1);
2232 break;
2233 case 0x3:
2234 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2235 break;
2236 case 0x4:
2237 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2238 break;
2239 case 0x5:
2240 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2241 break;
2242 case 0x7:
2243 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2244 break;
2245 case 0x8:
2246 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2247 break;
2248 case 0x9:
2249 gen_op_iwmmxt_addul_M0_wRn(rd1);
2250 break;
2251 case 0xb:
2252 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2253 break;
2254 default:
2255 return 1;
2256 }
2257 gen_op_iwmmxt_movq_wRn_M0(wrd);
2258 gen_op_iwmmxt_set_mup();
2259 gen_op_iwmmxt_set_cup();
2260 break;
2261 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2262 case 0x408: case 0x508: case 0x608: case 0x708:
2263 case 0x808: case 0x908: case 0xa08: case 0xb08:
2264 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2265 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2266 return 1;
2267 wrd = (insn >> 12) & 0xf;
2268 rd0 = (insn >> 16) & 0xf;
2269 rd1 = (insn >> 0) & 0xf;
2270 gen_op_iwmmxt_movq_M0_wRn(rd0);
2271 switch ((insn >> 22) & 3) {
2272 case 1:
2273 if (insn & (1 << 21))
2274 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2275 else
2276 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2277 break;
2278 case 2:
2279 if (insn & (1 << 21))
2280 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2281 else
2282 gen_op_iwmmxt_packul_M0_wRn(rd1);
2283 break;
2284 case 3:
2285 if (insn & (1 << 21))
2286 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2287 else
2288 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2289 break;
2290 }
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 gen_op_iwmmxt_set_cup();
2294 break;
2295 case 0x201: case 0x203: case 0x205: case 0x207:
2296 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2297 case 0x211: case 0x213: case 0x215: case 0x217:
2298 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2299 wrd = (insn >> 5) & 0xf;
2300 rd0 = (insn >> 12) & 0xf;
2301 rd1 = (insn >> 0) & 0xf;
2302 if (rd0 == 0xf || rd1 == 0xf)
2303 return 1;
2304 gen_op_iwmmxt_movq_M0_wRn(wrd);
2305 tmp = load_reg(s, rd0);
2306 tmp2 = load_reg(s, rd1);
2307 switch ((insn >> 16) & 0xf) {
2308 case 0x0: /* TMIA */
2309 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2310 break;
2311 case 0x8: /* TMIAPH */
2312 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2313 break;
2314 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
2315 if (insn & (1 << 16))
2316 tcg_gen_shri_i32(tmp, tmp, 16);
2317 if (insn & (1 << 17))
2318 tcg_gen_shri_i32(tmp2, tmp2, 16);
2319 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2320 break;
2321 default:
2322 dead_tmp(tmp2);
2323 dead_tmp(tmp);
2324 return 1;
2325 }
2326 dead_tmp(tmp2);
2327 dead_tmp(tmp);
2328 gen_op_iwmmxt_movq_wRn_M0(wrd);
2329 gen_op_iwmmxt_set_mup();
2330 break;
2331 default:
2332 return 1;
2333 }
2334
2335 return 0;
2336 }
2337
2338 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2339 (ie. an undefined instruction). */
2340 static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2341 {
2342 int acc, rd0, rd1, rdhi, rdlo;
2343 TCGv tmp, tmp2;
2344
2345 if ((insn & 0x0ff00f10) == 0x0e200010) {
2346 /* Multiply with Internal Accumulate Format */
2347 rd0 = (insn >> 12) & 0xf;
2348 rd1 = insn & 0xf;
2349 acc = (insn >> 5) & 7;
2350
2351 if (acc != 0)
2352 return 1;
2353
2354 tmp = load_reg(s, rd0);
2355 tmp2 = load_reg(s, rd1);
2356 switch ((insn >> 16) & 0xf) {
2357 case 0x0: /* MIA */
2358 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
2359 break;
2360 case 0x8: /* MIAPH */
2361 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
2362 break;
2363 case 0xc: /* MIABB */
2364 case 0xd: /* MIABT */
2365 case 0xe: /* MIATB */
2366 case 0xf: /* MIATT */
2367 if (insn & (1 << 16))
2368 tcg_gen_shri_i32(tmp, tmp, 16);
2369 if (insn & (1 << 17))
2370 tcg_gen_shri_i32(tmp2, tmp2, 16);
2371 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
2372 break;
2373 default:
2374 return 1;
2375 }
2376 dead_tmp(tmp2);
2377 dead_tmp(tmp);
2378
2379 gen_op_iwmmxt_movq_wRn_M0(acc);
2380 return 0;
2381 }
2382
2383 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2384 /* Internal Accumulator Access Format */
2385 rdhi = (insn >> 16) & 0xf;
2386 rdlo = (insn >> 12) & 0xf;
2387 acc = insn & 7;
2388
2389 if (acc != 0)
2390 return 1;
2391
2392 if (insn & ARM_CP_RW_BIT) { /* MRA */
2393 iwmmxt_load_reg(cpu_V0, acc);
2394 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2395 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2396 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2397 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
2398 } else { /* MAR */
2399 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2400 iwmmxt_store_reg(cpu_V0, acc);
2401 }
2402 return 0;
2403 }
2404
2405 return 1;
2406 }
2407
2408 /* Disassemble system coprocessor instruction. Return nonzero if
2409 instruction is not defined. */
2410 static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2411 {
2412 TCGv tmp, tmp2;
2413 uint32_t rd = (insn >> 12) & 0xf;
2414 uint32_t cp = (insn >> 8) & 0xf;
2415 if (IS_USER(s)) {
2416 return 1;
2417 }
2418
2419 if (insn & ARM_CP_RW_BIT) {
2420 if (!env->cp[cp].cp_read)
2421 return 1;
2422 gen_set_pc_im(s->pc);
2423 tmp = new_tmp();
2424 tmp2 = tcg_const_i32(insn);
2425 gen_helper_get_cp(tmp, cpu_env, tmp2);
2426 tcg_temp_free(tmp2);
2427 store_reg(s, rd, tmp);
2428 } else {
2429 if (!env->cp[cp].cp_write)
2430 return 1;
2431 gen_set_pc_im(s->pc);
2432 tmp = load_reg(s, rd);
2433 tmp2 = tcg_const_i32(insn);
2434 gen_helper_set_cp(cpu_env, tmp2, tmp);
2435 tcg_temp_free(tmp2);
2436 dead_tmp(tmp);
2437 }
2438 return 0;
2439 }
2440
2441 static int cp15_user_ok(uint32_t insn)
2442 {
2443 int cpn = (insn >> 16) & 0xf;
2444 int cpm = insn & 0xf;
2445 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2446
2447 if (cpn == 13 && cpm == 0) {
2448 /* TLS register. */
2449 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2450 return 1;
2451 }
2452 if (cpn == 7) {
2453 /* ISB, DSB, DMB. */
2454 if ((cpm == 5 && op == 4)
2455 || (cpm == 10 && (op == 4 || op == 5)))
2456 return 1;
2457 }
2458 return 0;
2459 }
2460
2461 static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
2462 {
2463 TCGv tmp;
2464 int cpn = (insn >> 16) & 0xf;
2465 int cpm = insn & 0xf;
2466 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2467
2468 if (!arm_feature(env, ARM_FEATURE_V6K))
2469 return 0;
2470
2471 if (!(cpn == 13 && cpm == 0))
2472 return 0;
2473
2474 if (insn & ARM_CP_RW_BIT) {
2475 switch (op) {
2476 case 2:
2477 tmp = load_cpu_field(cp15.c13_tls1);
2478 break;
2479 case 3:
2480 tmp = load_cpu_field(cp15.c13_tls2);
2481 break;
2482 case 4:
2483 tmp = load_cpu_field(cp15.c13_tls3);
2484 break;
2485 default:
2486 return 0;
2487 }
2488 store_reg(s, rd, tmp);
2489
2490 } else {
2491 tmp = load_reg(s, rd);
2492 switch (op) {
2493 case 2:
2494 store_cpu_field(tmp, cp15.c13_tls1);
2495 break;
2496 case 3:
2497 store_cpu_field(tmp, cp15.c13_tls2);
2498 break;
2499 case 4:
2500 store_cpu_field(tmp, cp15.c13_tls3);
2501 break;
2502 default:
2503 dead_tmp(tmp);
2504 return 0;
2505 }
2506 }
2507 return 1;
2508 }
2509
2510 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2511 instruction is not defined. */
2512 static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
2513 {
2514 uint32_t rd;
2515 TCGv tmp, tmp2;
2516
2517 /* M profile cores use memory mapped registers instead of cp15. */
2518 if (arm_feature(env, ARM_FEATURE_M))
2519 return 1;
2520
2521 if ((insn & (1 << 25)) == 0) {
2522 if (insn & (1 << 20)) {
2523 /* mrrc */
2524 return 1;
2525 }
2526 /* mcrr. Used for block cache operations, so implement as no-op. */
2527 return 0;
2528 }
2529 if ((insn & (1 << 4)) == 0) {
2530 /* cdp */
2531 return 1;
2532 }
2533 if (IS_USER(s) && !cp15_user_ok(insn)) {
2534 return 1;
2535 }
2536 if ((insn & 0x0fff0fff) == 0x0e070f90
2537 || (insn & 0x0fff0fff) == 0x0e070f58) {
2538 /* Wait for interrupt. */
2539 gen_set_pc_im(s->pc);
2540 s->is_jmp = DISAS_WFI;
2541 return 0;
2542 }
2543 rd = (insn >> 12) & 0xf;
2544
2545 if (cp15_tls_load_store(env, s, insn, rd))
2546 return 0;
2547
2548 tmp2 = tcg_const_i32(insn);
2549 if (insn & ARM_CP_RW_BIT) {
2550 tmp = new_tmp();
2551 gen_helper_get_cp15(tmp, cpu_env, tmp2);
2552 /* If the destination register is r15 then sets condition codes. */
2553 if (rd != 15)
2554 store_reg(s, rd, tmp);
2555 else
2556 dead_tmp(tmp);
2557 } else {
2558 tmp = load_reg(s, rd);
2559 gen_helper_set_cp15(cpu_env, tmp2, tmp);
2560 dead_tmp(tmp);
2561 /* Normally we would always end the TB here, but Linux
2562 * arch/arm/mach-pxa/sleep.S expects two instructions following
2563 * an MMU enable to execute from cache. Imitate this behaviour. */
2564 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2565 (insn & 0x0fff0fff) != 0x0e010f10)
2566 gen_lookup_tb(s);
2567 }
2568 tcg_temp_free_i32(tmp2);
2569 return 0;
2570 }
2571
2572 #define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2573 #define VFP_SREG(insn, bigbit, smallbit) \
2574 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2575 #define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2576 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2577 reg = (((insn) >> (bigbit)) & 0x0f) \
2578 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2579 } else { \
2580 if (insn & (1 << (smallbit))) \
2581 return 1; \
2582 reg = ((insn) >> (bigbit)) & 0x0f; \
2583 }} while (0)
2584
2585 #define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2586 #define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2587 #define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2588 #define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2589 #define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2590 #define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2591
2592 /* Move between integer and VFP cores. */
2593 static TCGv gen_vfp_mrs(void)
2594 {
2595 TCGv tmp = new_tmp();
2596 tcg_gen_mov_i32(tmp, cpu_F0s);
2597 return tmp;
2598 }
2599
2600 static void gen_vfp_msr(TCGv tmp)
2601 {
2602 tcg_gen_mov_i32(cpu_F0s, tmp);
2603 dead_tmp(tmp);
2604 }
2605
2606 static inline int
2607 vfp_enabled(CPUState * env)
2608 {
2609 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2610 }
2611
2612 static void gen_neon_dup_u8(TCGv var, int shift)
2613 {
2614 TCGv tmp = new_tmp();
2615 if (shift)
2616 tcg_gen_shri_i32(var, var, shift);
2617 tcg_gen_ext8u_i32(var, var);
2618 tcg_gen_shli_i32(tmp, var, 8);
2619 tcg_gen_or_i32(var, var, tmp);
2620 tcg_gen_shli_i32(tmp, var, 16);
2621 tcg_gen_or_i32(var, var, tmp);
2622 dead_tmp(tmp);
2623 }
2624
2625 static void gen_neon_dup_low16(TCGv var)
2626 {
2627 TCGv tmp = new_tmp();
2628 tcg_gen_ext16u_i32(var, var);
2629 tcg_gen_shli_i32(tmp, var, 16);
2630 tcg_gen_or_i32(var, var, tmp);
2631 dead_tmp(tmp);
2632 }
2633
2634 static void gen_neon_dup_high16(TCGv var)
2635 {
2636 TCGv tmp = new_tmp();
2637 tcg_gen_andi_i32(var, var, 0xffff0000);
2638 tcg_gen_shri_i32(tmp, var, 16);
2639 tcg_gen_or_i32(var, var, tmp);
2640 dead_tmp(tmp);
2641 }
2642
2643 /* Disassemble a VFP instruction. Returns nonzero if an error occured
2644 (ie. an undefined instruction). */
2645 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2646 {
2647 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2648 int dp, veclen;
2649 TCGv addr;
2650 TCGv tmp;
2651 TCGv tmp2;
2652
2653 if (!arm_feature(env, ARM_FEATURE_VFP))
2654 return 1;
2655
2656 if (!vfp_enabled(env)) {
2657 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
2658 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2659 return 1;
2660 rn = (insn >> 16) & 0xf;
2661 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2662 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
2663 return 1;
2664 }
2665 dp = ((insn & 0xf00) == 0xb00);
2666 switch ((insn >> 24) & 0xf) {
2667 case 0xe:
2668 if (insn & (1 << 4)) {
2669 /* single register transfer */
2670 rd = (insn >> 12) & 0xf;
2671 if (dp) {
2672 int size;
2673 int pass;
2674
2675 VFP_DREG_N(rn, insn);
2676 if (insn & 0xf)
2677 return 1;
2678 if (insn & 0x00c00060
2679 && !arm_feature(env, ARM_FEATURE_NEON))
2680 return 1;
2681
2682 pass = (insn >> 21) & 1;
2683 if (insn & (1 << 22)) {
2684 size = 0;
2685 offset = ((insn >> 5) & 3) * 8;
2686 } else if (insn & (1 << 5)) {
2687 size = 1;
2688 offset = (insn & (1 << 6)) ? 16 : 0;
2689 } else {
2690 size = 2;
2691 offset = 0;
2692 }
2693 if (insn & ARM_CP_RW_BIT) {
2694 /* vfp->arm */
2695 tmp = neon_load_reg(rn, pass);
2696 switch (size) {
2697 case 0:
2698 if (offset)
2699 tcg_gen_shri_i32(tmp, tmp, offset);
2700 if (insn & (1 << 23))
2701 gen_uxtb(tmp);
2702 else
2703 gen_sxtb(tmp);
2704 break;
2705 case 1:
2706 if (insn & (1 << 23)) {
2707 if (offset) {
2708 tcg_gen_shri_i32(tmp, tmp, 16);
2709 } else {
2710 gen_uxth(tmp);
2711 }
2712 } else {
2713 if (offset) {
2714 tcg_gen_sari_i32(tmp, tmp, 16);
2715 } else {
2716 gen_sxth(tmp);
2717 }
2718 }
2719 break;
2720 case 2:
2721 break;
2722 }
2723 store_reg(s, rd, tmp);
2724 } else {
2725 /* arm->vfp */
2726 tmp = load_reg(s, rd);
2727 if (insn & (1 << 23)) {
2728 /* VDUP */
2729 if (size == 0) {
2730 gen_neon_dup_u8(tmp, 0);
2731 } else if (size == 1) {
2732 gen_neon_dup_low16(tmp);
2733 }
2734 for (n = 0; n <= pass * 2; n++) {
2735 tmp2 = new_tmp();
2736 tcg_gen_mov_i32(tmp2, tmp);
2737 neon_store_reg(rn, n, tmp2);
2738 }
2739 neon_store_reg(rn, n, tmp);
2740 } else {
2741 /* VMOV */
2742 switch (size) {
2743 case 0:
2744 tmp2 = neon_load_reg(rn, pass);
2745 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2746 dead_tmp(tmp2);
2747 break;
2748 case 1:
2749 tmp2 = neon_load_reg(rn, pass);
2750 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2751 dead_tmp(tmp2);
2752 break;
2753 case 2:
2754 break;
2755 }
2756 neon_store_reg(rn, pass, tmp);
2757 }
2758 }
2759 } else { /* !dp */
2760 if ((insn & 0x6f) != 0x00)
2761 return 1;
2762 rn = VFP_SREG_N(insn);
2763 if (insn & ARM_CP_RW_BIT) {
2764 /* vfp->arm */
2765 if (insn & (1 << 21)) {
2766 /* system register */
2767 rn >>= 1;
2768
2769 switch (rn) {
2770 case ARM_VFP_FPSID:
2771 /* VFP2 allows access to FSID from userspace.
2772 VFP3 restricts all id registers to privileged
2773 accesses. */
2774 if (IS_USER(s)
2775 && arm_feature(env, ARM_FEATURE_VFP3))
2776 return 1;
2777 tmp = load_cpu_field(vfp.xregs[rn]);
2778 break;
2779 case ARM_VFP_FPEXC:
2780 if (IS_USER(s))
2781 return 1;
2782 tmp = load_cpu_field(vfp.xregs[rn]);
2783 break;
2784 case ARM_VFP_FPINST:
2785 case ARM_VFP_FPINST2:
2786 /* Not present in VFP3. */
2787 if (IS_USER(s)
2788 || arm_feature(env, ARM_FEATURE_VFP3))
2789 return 1;
2790 tmp = load_cpu_field(vfp.xregs[rn]);
2791 break;
2792 case ARM_VFP_FPSCR:
2793 if (rd == 15) {
2794 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2795 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2796 } else {
2797 tmp = new_tmp();
2798 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2799 }
2800 break;
2801 case ARM_VFP_MVFR0:
2802 case ARM_VFP_MVFR1:
2803 if (IS_USER(s)
2804 || !arm_feature(env, ARM_FEATURE_VFP3))
2805 return 1;
2806 tmp = load_cpu_field(vfp.xregs[rn]);
2807 break;
2808 default:
2809 return 1;
2810 }
2811 } else {
2812 gen_mov_F0_vreg(0, rn);
2813 tmp = gen_vfp_mrs();
2814 }
2815 if (rd == 15) {
2816 /* Set the 4 flag bits in the CPSR. */
2817 gen_set_nzcv(tmp);
2818 dead_tmp(tmp);
2819 } else {
2820 store_reg(s, rd, tmp);
2821 }
2822 } else {
2823 /* arm->vfp */
2824 tmp = load_reg(s, rd);
2825 if (insn & (1 << 21)) {
2826 rn >>= 1;
2827 /* system register */
2828 switch (rn) {
2829 case ARM_VFP_FPSID:
2830 case ARM_VFP_MVFR0:
2831 case ARM_VFP_MVFR1:
2832 /* Writes are ignored. */
2833 break;
2834 case ARM_VFP_FPSCR:
2835 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2836 dead_tmp(tmp);
2837 gen_lookup_tb(s);
2838 break;
2839 case ARM_VFP_FPEXC:
2840 if (IS_USER(s))
2841 return 1;
2842 /* TODO: VFP subarchitecture support.
2843 * For now, keep the EN bit only */
2844 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
2845 store_cpu_field(tmp, vfp.xregs[rn]);
2846 gen_lookup_tb(s);
2847 break;
2848 case ARM_VFP_FPINST:
2849 case ARM_VFP_FPINST2:
2850 store_cpu_field(tmp, vfp.xregs[rn]);
2851 break;
2852 default:
2853 return 1;
2854 }
2855 } else {
2856 gen_vfp_msr(tmp);
2857 gen_mov_vreg_F0(0, rn);
2858 }
2859 }
2860 }
2861 } else {
2862 /* data processing */
2863 /* The opcode is in bits 23, 21, 20 and 6. */
2864 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2865 if (dp) {
2866 if (op == 15) {
2867 /* rn is opcode */
2868 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2869 } else {
2870 /* rn is register number */
2871 VFP_DREG_N(rn, insn);
2872 }
2873
2874 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18))) {
2875 /* Integer or single precision destination. */
2876 rd = VFP_SREG_D(insn);
2877 } else {
2878 VFP_DREG_D(rd, insn);
2879 }
2880 if (op == 15 &&
2881 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14))) {
2882 /* VCVT from int is always from S reg regardless of dp bit.
2883 * VCVT with immediate frac_bits has same format as SREG_M
2884 */
2885 rm = VFP_SREG_M(insn);
2886 } else {
2887 VFP_DREG_M(rm, insn);
2888 }
2889 } else {
2890 rn = VFP_SREG_N(insn);
2891 if (op == 15 && rn == 15) {
2892 /* Double precision destination. */
2893 VFP_DREG_D(rd, insn);
2894 } else {
2895 rd = VFP_SREG_D(insn);
2896 }
2897 /* NB that we implicitly rely on the encoding for the frac_bits
2898 * in VCVT of fixed to float being the same as that of an SREG_M
2899 */
2900 rm = VFP_SREG_M(insn);
2901 }
2902
2903 veclen = env->vfp.vec_len;
2904 if (op == 15 && rn > 3)
2905 veclen = 0;
2906
2907 /* Shut up compiler warnings. */
2908 delta_m = 0;
2909 delta_d = 0;
2910 bank_mask = 0;
2911
2912 if (veclen > 0) {
2913 if (dp)
2914 bank_mask = 0xc;
2915 else
2916 bank_mask = 0x18;
2917
2918 /* Figure out what type of vector operation this is. */
2919 if ((rd & bank_mask) == 0) {
2920 /* scalar */
2921 veclen = 0;
2922 } else {
2923 if (dp)
2924 delta_d = (env->vfp.vec_stride >> 1) + 1;
2925 else
2926 delta_d = env->vfp.vec_stride + 1;
2927
2928 if ((rm & bank_mask) == 0) {
2929 /* mixed scalar/vector */
2930 delta_m = 0;
2931 } else {
2932 /* vector */
2933 delta_m = delta_d;
2934 }
2935 }
2936 }
2937
2938 /* Load the initial operands. */
2939 if (op == 15) {
2940 switch (rn) {
2941 case 16:
2942 case 17:
2943 /* Integer source */
2944 gen_mov_F0_vreg(0, rm);
2945 break;
2946 case 8:
2947 case 9:
2948 /* Compare */
2949 gen_mov_F0_vreg(dp, rd);
2950 gen_mov_F1_vreg(dp, rm);
2951 break;
2952 case 10:
2953 case 11:
2954 /* Compare with zero */
2955 gen_mov_F0_vreg(dp, rd);
2956 gen_vfp_F1_ld0(dp);
2957 break;
2958 case 20:
2959 case 21:
2960 case 22:
2961 case 23:
2962 case 28:
2963 case 29:
2964 case 30:
2965 case 31:
2966 /* Source and destination the same. */
2967 gen_mov_F0_vreg(dp, rd);
2968 break;
2969 default:
2970 /* One source operand. */
2971 gen_mov_F0_vreg(dp, rm);
2972 break;
2973 }
2974 } else {
2975 /* Two source operands. */
2976 gen_mov_F0_vreg(dp, rn);
2977 gen_mov_F1_vreg(dp, rm);
2978 }
2979
2980 for (;;) {
2981 /* Perform the calculation. */
2982 switch (op) {
2983 case 0: /* mac: fd + (fn * fm) */
2984 gen_vfp_mul(dp);
2985 gen_mov_F1_vreg(dp, rd);
2986 gen_vfp_add(dp);
2987 break;
2988 case 1: /* nmac: fd - (fn * fm) */
2989 gen_vfp_mul(dp);
2990 gen_vfp_neg(dp);
2991 gen_mov_F1_vreg(dp, rd);
2992 gen_vfp_add(dp);
2993 break;
2994 case 2: /* msc: -fd + (fn * fm) */
2995 gen_vfp_mul(dp);
2996 gen_mov_F1_vreg(dp, rd);
2997 gen_vfp_sub(dp);
2998 break;
2999 case 3: /* nmsc: -fd - (fn * fm) */
3000 gen_vfp_mul(dp);
3001 gen_vfp_neg(dp);
3002 gen_mov_F1_vreg(dp, rd);
3003 gen_vfp_sub(dp);
3004 break;
3005 case 4: /* mul: fn * fm */
3006 gen_vfp_mul(dp);
3007 break;
3008 case 5: /* nmul: -(fn * fm) */
3009 gen_vfp_mul(dp);
3010 gen_vfp_neg(dp);
3011 break;
3012 case 6: /* add: fn + fm */
3013 gen_vfp_add(dp);
3014 break;
3015 case 7: /* sub: fn - fm */
3016 gen_vfp_sub(dp);
3017 break;
3018 case 8: /* div: fn / fm */
3019 gen_vfp_div(dp);
3020 break;
3021 case 14: /* fconst */
3022 if (!arm_feature(env, ARM_FEATURE_VFP3))
3023 return 1;
3024
3025 n = (insn << 12) & 0x80000000;
3026 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3027 if (dp) {
3028 if (i & 0x40)
3029 i |= 0x3f80;
3030 else
3031 i |= 0x4000;
3032 n |= i << 16;
3033 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
3034 } else {
3035 if (i & 0x40)
3036 i |= 0x780;
3037 else
3038 i |= 0x800;
3039 n |= i << 19;
3040 tcg_gen_movi_i32(cpu_F0s, n);
3041 }
3042 break;
3043 case 15: /* extension space */
3044 switch (rn) {
3045 case 0: /* cpy */
3046 /* no-op */
3047 break;
3048 case 1: /* abs */
3049 gen_vfp_abs(dp);
3050 break;
3051 case 2: /* neg */
3052 gen_vfp_neg(dp);
3053 break;
3054 case 3: /* sqrt */
3055 gen_vfp_sqrt(dp);
3056 break;
3057 case 4: /* vcvtb.f32.f16 */
3058 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3059 return 1;
3060 tmp = gen_vfp_mrs();
3061 tcg_gen_ext16u_i32(tmp, tmp);
3062 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3063 dead_tmp(tmp);
3064 break;
3065 case 5: /* vcvtt.f32.f16 */
3066 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3067 return 1;
3068 tmp = gen_vfp_mrs();
3069 tcg_gen_shri_i32(tmp, tmp, 16);
3070 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
3071 dead_tmp(tmp);
3072 break;
3073 case 6: /* vcvtb.f16.f32 */
3074 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3075 return 1;
3076 tmp = new_tmp();
3077 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3078 gen_mov_F0_vreg(0, rd);
3079 tmp2 = gen_vfp_mrs();
3080 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3081 tcg_gen_or_i32(tmp, tmp, tmp2);
3082 dead_tmp(tmp2);
3083 gen_vfp_msr(tmp);
3084 break;
3085 case 7: /* vcvtt.f16.f32 */
3086 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
3087 return 1;
3088 tmp = new_tmp();
3089 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
3090 tcg_gen_shli_i32(tmp, tmp, 16);
3091 gen_mov_F0_vreg(0, rd);
3092 tmp2 = gen_vfp_mrs();
3093 tcg_gen_ext16u_i32(tmp2, tmp2);
3094 tcg_gen_or_i32(tmp, tmp, tmp2);
3095 dead_tmp(tmp2);
3096 gen_vfp_msr(tmp);
3097 break;
3098 case 8: /* cmp */
3099 gen_vfp_cmp(dp);
3100 break;
3101 case 9: /* cmpe */
3102 gen_vfp_cmpe(dp);
3103 break;
3104 case 10: /* cmpz */
3105 gen_vfp_cmp(dp);
3106 break;
3107 case 11: /* cmpez */
3108 gen_vfp_F1_ld0(dp);
3109 gen_vfp_cmpe(dp);
3110 break;
3111 case 15: /* single<->double conversion */
3112 if (dp)
3113 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
3114 else
3115 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
3116 break;
3117 case 16: /* fuito */
3118 gen_vfp_uito(dp);
3119 break;
3120 case 17: /* fsito */
3121 gen_vfp_sito(dp);
3122 break;
3123 case 20: /* fshto */
3124 if (!arm_feature(env, ARM_FEATURE_VFP3))
3125 return 1;
3126 gen_vfp_shto(dp, 16 - rm);
3127 break;
3128 case 21: /* fslto */
3129 if (!arm_feature(env, ARM_FEATURE_VFP3))
3130 return 1;
3131 gen_vfp_slto(dp, 32 - rm);
3132 break;
3133 case 22: /* fuhto */
3134 if (!arm_feature(env, ARM_FEATURE_VFP3))
3135 return 1;
3136 gen_vfp_uhto(dp, 16 - rm);
3137 break;
3138 case 23: /* fulto */
3139 if (!arm_feature(env, ARM_FEATURE_VFP3))
3140 return 1;
3141 gen_vfp_ulto(dp, 32 - rm);
3142 break;
3143 case 24: /* ftoui */
3144 gen_vfp_toui(dp);
3145 break;
3146 case 25: /* ftouiz */
3147 gen_vfp_touiz(dp);
3148 break;
3149 case 26: /* ftosi */
3150 gen_vfp_tosi(dp);
3151 break;
3152 case 27: /* ftosiz */
3153 gen_vfp_tosiz(dp);
3154 break;
3155 case 28: /* ftosh */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
3158 gen_vfp_tosh(dp, 16 - rm);
3159 break;
3160 case 29: /* ftosl */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
3163 gen_vfp_tosl(dp, 32 - rm);
3164 break;
3165 case 30: /* ftouh */
3166 if (!arm_feature(env, ARM_FEATURE_VFP3))
3167 return 1;
3168 gen_vfp_touh(dp, 16 - rm);
3169 break;
3170 case 31: /* ftoul */
3171 if (!arm_feature(env, ARM_FEATURE_VFP3))
3172 return 1;
3173 gen_vfp_toul(dp, 32 - rm);
3174 break;
3175 default: /* undefined */
3176 printf ("rn:%d\n", rn);
3177 return 1;
3178 }
3179 break;
3180 default: /* undefined */
3181 printf ("op:%d\n", op);
3182 return 1;
3183 }
3184
3185 /* Write back the result. */
3186 if (op == 15 && (rn >= 8 && rn <= 11))
3187 ; /* Comparison, do nothing. */
3188 else if (op == 15 && dp && ((rn & 0x1c) == 0x18))
3189 /* VCVT double to int: always integer result. */
3190 gen_mov_vreg_F0(0, rd);
3191 else if (op == 15 && rn == 15)
3192 /* conversion */
3193 gen_mov_vreg_F0(!dp, rd);
3194 else
3195 gen_mov_vreg_F0(dp, rd);
3196
3197 /* break out of the loop if we have finished */
3198 if (veclen == 0)
3199 break;
3200
3201 if (op == 15 && delta_m == 0) {
3202 /* single source one-many */
3203 while (veclen--) {
3204 rd = ((rd + delta_d) & (bank_mask - 1))
3205 | (rd & bank_mask);
3206 gen_mov_vreg_F0(dp, rd);
3207 }
3208 break;
3209 }
3210 /* Setup the next operands. */
3211 veclen--;
3212 rd = ((rd + delta_d) & (bank_mask - 1))
3213 | (rd & bank_mask);
3214
3215 if (op == 15) {
3216 /* One source operand. */
3217 rm = ((rm + delta_m) & (bank_mask - 1))
3218 | (rm & bank_mask);
3219 gen_mov_F0_vreg(dp, rm);
3220 } else {
3221 /* Two source operands. */
3222 rn = ((rn + delta_d) & (bank_mask - 1))
3223 | (rn & bank_mask);
3224 gen_mov_F0_vreg(dp, rn);
3225 if (delta_m) {
3226 rm = ((rm + delta_m) & (bank_mask - 1))
3227 | (rm & bank_mask);
3228 gen_mov_F1_vreg(dp, rm);
3229 }
3230 }
3231 }
3232 }
3233 break;
3234 case 0xc:
3235 case 0xd:
3236 if (dp && (insn & 0x03e00000) == 0x00400000) {
3237 /* two-register transfer */
3238 rn = (insn >> 16) & 0xf;
3239 rd = (insn >> 12) & 0xf;
3240 if (dp) {
3241 VFP_DREG_M(rm, insn);
3242 } else {
3243 rm = VFP_SREG_M(insn);
3244 }
3245
3246 if (insn & ARM_CP_RW_BIT) {
3247 /* vfp->arm */
3248 if (dp) {
3249 gen_mov_F0_vreg(0, rm * 2);
3250 tmp = gen_vfp_mrs();
3251 store_reg(s, rd, tmp);
3252 gen_mov_F0_vreg(0, rm * 2 + 1);
3253 tmp = gen_vfp_mrs();
3254 store_reg(s, rn, tmp);
3255 } else {
3256 gen_mov_F0_vreg(0, rm);
3257 tmp = gen_vfp_mrs();
3258 store_reg(s, rn, tmp);
3259 gen_mov_F0_vreg(0, rm + 1);
3260 tmp = gen_vfp_mrs();
3261 store_reg(s, rd, tmp);
3262 }
3263 } else {
3264 /* arm->vfp */
3265 if (dp) {
3266 tmp = load_reg(s, rd);
3267 gen_vfp_msr(tmp);
3268 gen_mov_vreg_F0(0, rm * 2);
3269 tmp = load_reg(s, rn);
3270 gen_vfp_msr(tmp);
3271 gen_mov_vreg_F0(0, rm * 2 + 1);
3272 } else {
3273 tmp = load_reg(s, rn);
3274 gen_vfp_msr(tmp);
3275 gen_mov_vreg_F0(0, rm);
3276 tmp = load_reg(s, rd);
3277 gen_vfp_msr(tmp);
3278 gen_mov_vreg_F0(0, rm + 1);
3279 }
3280 }
3281 } else {
3282 /* Load/store */
3283 rn = (insn >> 16) & 0xf;
3284 if (dp)
3285 VFP_DREG_D(rd, insn);
3286 else
3287 rd = VFP_SREG_D(insn);
3288 if (s->thumb && rn == 15) {
3289 addr = new_tmp();
3290 tcg_gen_movi_i32(addr, s->pc & ~2);
3291 } else {
3292 addr = load_reg(s, rn);
3293 }
3294 if ((insn & 0x01200000) == 0x01000000) {
3295 /* Single load/store */
3296 offset = (insn & 0xff) << 2;
3297 if ((insn & (1 << 23)) == 0)
3298 offset = -offset;
3299 tcg_gen_addi_i32(addr, addr, offset);
3300 if (insn & (1 << 20)) {
3301 gen_vfp_ld(s, dp, addr);
3302 gen_mov_vreg_F0(dp, rd);
3303 } else {
3304 gen_mov_F0_vreg(dp, rd);
3305 gen_vfp_st(s, dp, addr);
3306 }
3307 dead_tmp(addr);
3308 } else {
3309 /* load/store multiple */
3310 if (dp)
3311 n = (insn >> 1) & 0x7f;
3312 else
3313 n = insn & 0xff;
3314
3315 if (insn & (1 << 24)) /* pre-decrement */
3316 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
3317
3318 if (dp)
3319 offset = 8;
3320 else
3321 offset = 4;
3322 for (i = 0; i < n; i++) {
3323 if (insn & ARM_CP_RW_BIT) {
3324 /* load */
3325 gen_vfp_ld(s, dp, addr);
3326 gen_mov_vreg_F0(dp, rd + i);
3327 } else {
3328 /* store */
3329 gen_mov_F0_vreg(dp, rd + i);
3330 gen_vfp_st(s, dp, addr);
3331 }
3332 tcg_gen_addi_i32(addr, addr, offset);
3333 }
3334 if (insn & (1 << 21)) {
3335 /* writeback */
3336 if (insn & (1 << 24))
3337 offset = -offset * n;
3338 else if (dp && (insn & 1))
3339 offset = 4;
3340 else
3341 offset = 0;
3342
3343 if (offset != 0)
3344 tcg_gen_addi_i32(addr, addr, offset);
3345 store_reg(s, rn, addr);
3346 } else {
3347 dead_tmp(addr);
3348 }
3349 }
3350 }
3351 break;
3352 default:
3353 /* Should never happen. */
3354 return 1;
3355 }
3356 return 0;
3357 }
3358
3359 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
3360 {
3361 TranslationBlock *tb;
3362
3363 tb = s->tb;
3364 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
3365 tcg_gen_goto_tb(n);
3366 gen_set_pc_im(dest);
3367 tcg_gen_exit_tb((long)tb + n);
3368 } else {
3369 gen_set_pc_im(dest);
3370 tcg_gen_exit_tb(0);
3371 }
3372 }
3373
3374 static inline void gen_jmp (DisasContext *s, uint32_t dest)
3375 {
3376 if (unlikely(s->singlestep_enabled)) {
3377 /* An indirect jump so that we still trigger the debug exception. */
3378 if (s->thumb)
3379 dest |= 1;
3380 gen_bx_im(s, dest);
3381 } else {
3382 gen_goto_tb(s, 0, dest);
3383 s->is_jmp = DISAS_TB_JUMP;
3384 }
3385 }
3386
3387 static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
3388 {
3389 if (x)
3390 tcg_gen_sari_i32(t0, t0, 16);
3391 else
3392 gen_sxth(t0);
3393 if (y)
3394 tcg_gen_sari_i32(t1, t1, 16);
3395 else
3396 gen_sxth(t1);
3397 tcg_gen_mul_i32(t0, t0, t1);
3398 }
3399
3400 /* Return the mask of PSR bits set by a MSR instruction. */
3401 static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
3402 uint32_t mask;
3403
3404 mask = 0;
3405 if (flags & (1 << 0))
3406 mask |= 0xff;
3407 if (flags & (1 << 1))
3408 mask |= 0xff00;
3409 if (flags & (1 << 2))
3410 mask |= 0xff0000;
3411 if (flags & (1 << 3))
3412 mask |= 0xff000000;
3413
3414 /* Mask out undefined bits. */
3415 mask &= ~CPSR_RESERVED;
3416 if (!arm_feature(env, ARM_FEATURE_V6))
3417 mask &= ~(CPSR_E | CPSR_GE);
3418 if (!arm_feature(env, ARM_FEATURE_THUMB2))
3419 mask &= ~CPSR_IT;
3420 /* Mask out execution state bits. */
3421 if (!spsr)
3422 mask &= ~CPSR_EXEC;
3423 /* Mask out privileged bits. */
3424 if (IS_USER(s))
3425 mask &= CPSR_USER;
3426 return mask;
3427 }
3428
3429 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
3430 static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv t0)
3431 {
3432 TCGv tmp;
3433 if (spsr) {
3434 /* ??? This is also undefined in system mode. */
3435 if (IS_USER(s))
3436 return 1;
3437
3438 tmp = load_cpu_field(spsr);
3439 tcg_gen_andi_i32(tmp, tmp, ~mask);
3440 tcg_gen_andi_i32(t0, t0, mask);
3441 tcg_gen_or_i32(tmp, tmp, t0);
3442 store_cpu_field(tmp, spsr);
3443 } else {
3444 gen_set_cpsr(t0, mask);
3445 }
3446 dead_tmp(t0);
3447 gen_lookup_tb(s);
3448 return 0;
3449 }
3450
3451 /* Returns nonzero if access to the PSR is not permitted. */
3452 static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
3453 {
3454 TCGv tmp;
3455 tmp = new_tmp();
3456 tcg_gen_movi_i32(tmp, val);
3457 return gen_set_psr(s, mask, spsr, tmp);
3458 }
3459
3460 /* Generate an old-style exception return. Marks pc as dead. */
3461 static void gen_exception_return(DisasContext *s, TCGv pc)
3462 {
3463 TCGv tmp;
3464 store_reg(s, 15, pc);
3465 tmp = load_cpu_field(spsr);
3466 gen_set_cpsr(tmp, 0xffffffff);
3467 dead_tmp(tmp);
3468 s->is_jmp = DISAS_UPDATE;
3469 }
3470
3471 /* Generate a v6 exception return. Marks both values as dead. */
3472 static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
3473 {
3474 gen_set_cpsr(cpsr, 0xffffffff);
3475 dead_tmp(cpsr);
3476 store_reg(s, 15, pc);
3477 s->is_jmp = DISAS_UPDATE;
3478 }
3479
3480 static inline void
3481 gen_set_condexec (DisasContext *s)
3482 {
3483 if (s->condexec_mask) {
3484 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3485 TCGv tmp = new_tmp();
3486 tcg_gen_movi_i32(tmp, val);
3487 store_cpu_field(tmp, condexec_bits);
3488 }
3489 }
3490
3491 static void gen_nop_hint(DisasContext *s, int val)
3492 {
3493 switch (val) {
3494 case 3: /* wfi */
3495 gen_set_pc_im(s->pc);
3496 s->is_jmp = DISAS_WFI;
3497 break;
3498 case 2: /* wfe */
3499 case 4: /* sev */
3500 /* TODO: Implement SEV and WFE. May help SMP performance. */
3501 default: /* nop */
3502 break;
3503 }
3504 }
3505
3506 #define CPU_V001 cpu_V0, cpu_V0, cpu_V1
3507
3508 static inline int gen_neon_add(int size, TCGv t0, TCGv t1)
3509 {
3510 switch (size) {
3511 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
3512 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
3513 case 2: tcg_gen_add_i32(t0, t0, t1); break;
3514 default: return 1;
3515 }
3516 return 0;
3517 }
3518
3519 static inline void gen_neon_rsb(int size, TCGv t0, TCGv t1)
3520 {
3521 switch (size) {
3522 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
3523 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
3524 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
3525 default: return;
3526 }
3527 }
3528
3529 /* 32-bit pairwise ops end up the same as the elementwise versions. */
3530 #define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3531 #define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3532 #define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3533 #define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3534
3535 /* FIXME: This is wrong. They set the wrong overflow bit. */
3536 #define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3537 #define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3538 #define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3539 #define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3540
3541 #define GEN_NEON_INTEGER_OP_ENV(name) do { \
3542 switch ((size << 1) | u) { \
3543 case 0: \
3544 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
3545 break; \
3546 case 1: \
3547 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
3548 break; \
3549 case 2: \
3550 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
3551 break; \
3552 case 3: \
3553 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
3554 break; \
3555 case 4: \
3556 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
3557 break; \
3558 case 5: \
3559 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
3560 break; \
3561 default: return 1; \
3562 }} while (0)
3563
3564 #define GEN_NEON_INTEGER_OP(name) do { \
3565 switch ((size << 1) | u) { \
3566 case 0: \
3567 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
3568 break; \
3569 case 1: \
3570 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
3571 break; \
3572 case 2: \
3573 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
3574 break; \
3575 case 3: \
3576 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
3577 break; \
3578 case 4: \
3579 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
3580 break; \
3581 case 5: \
3582 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
3583 break; \
3584 default: return 1; \
3585 }} while (0)
3586
3587 static TCGv neon_load_scratch(int scratch)
3588 {
3589 TCGv tmp = new_tmp();
3590 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3591 return tmp;
3592 }
3593
3594 static void neon_store_scratch(int scratch, TCGv var)
3595 {
3596 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
3597 dead_tmp(var);
3598 }
3599
3600 static inline TCGv neon_get_scalar(int size, int reg)
3601 {
3602 TCGv tmp;
3603 if (size == 1) {
3604 tmp = neon_load_reg(reg >> 1, reg & 1);
3605 } else {
3606 tmp = neon_load_reg(reg >> 2, (reg >> 1) & 1);
3607 if (reg & 1) {
3608 gen_neon_dup_low16(tmp);
3609 } else {
3610 gen_neon_dup_high16(tmp);
3611 }
3612 }
3613 return tmp;
3614 }
3615
3616 static void gen_neon_unzip_u8(TCGv t0, TCGv t1)
3617 {
3618 TCGv rd, rm, tmp;
3619
3620 rd = new_tmp();
3621 rm = new_tmp();
3622 tmp = new_tmp();
3623
3624 tcg_gen_andi_i32(rd, t0, 0xff);
3625 tcg_gen_shri_i32(tmp, t0, 8);
3626 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3627 tcg_gen_or_i32(rd, rd, tmp);
3628 tcg_gen_shli_i32(tmp, t1, 16);
3629 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3630 tcg_gen_or_i32(rd, rd, tmp);
3631 tcg_gen_shli_i32(tmp, t1, 8);
3632 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3633 tcg_gen_or_i32(rd, rd, tmp);
3634
3635 tcg_gen_shri_i32(rm, t0, 8);
3636 tcg_gen_andi_i32(rm, rm, 0xff);
3637 tcg_gen_shri_i32(tmp, t0, 16);
3638 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3639 tcg_gen_or_i32(rm, rm, tmp);
3640 tcg_gen_shli_i32(tmp, t1, 8);
3641 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3642 tcg_gen_or_i32(rm, rm, tmp);
3643 tcg_gen_andi_i32(tmp, t1, 0xff000000);
3644 tcg_gen_or_i32(t1, rm, tmp);
3645 tcg_gen_mov_i32(t0, rd);
3646
3647 dead_tmp(tmp);
3648 dead_tmp(rm);
3649 dead_tmp(rd);
3650 }
3651
3652 static void gen_neon_zip_u8(TCGv t0, TCGv t1)
3653 {
3654 TCGv rd, rm, tmp;
3655
3656 rd = new_tmp();
3657 rm = new_tmp();
3658 tmp = new_tmp();
3659
3660 tcg_gen_andi_i32(rd, t0, 0xff);
3661 tcg_gen_shli_i32(tmp, t1, 8);
3662 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3663 tcg_gen_or_i32(rd, rd, tmp);
3664 tcg_gen_shli_i32(tmp, t0, 16);
3665 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3666 tcg_gen_or_i32(rd, rd, tmp);
3667 tcg_gen_shli_i32(tmp, t1, 24);
3668 tcg_gen_andi_i32(tmp, tmp, 0xff000000);
3669 tcg_gen_or_i32(rd, rd, tmp);
3670
3671 tcg_gen_andi_i32(rm, t1, 0xff000000);
3672 tcg_gen_shri_i32(tmp, t0, 8);
3673 tcg_gen_andi_i32(tmp, tmp, 0xff0000);
3674 tcg_gen_or_i32(rm, rm, tmp);
3675 tcg_gen_shri_i32(tmp, t1, 8);
3676 tcg_gen_andi_i32(tmp, tmp, 0xff00);
3677 tcg_gen_or_i32(rm, rm, tmp);
3678 tcg_gen_shri_i32(tmp, t0, 16);
3679 tcg_gen_andi_i32(tmp, tmp, 0xff);
3680 tcg_gen_or_i32(t1, rm, tmp);
3681 tcg_gen_mov_i32(t0, rd);
3682
3683 dead_tmp(tmp);
3684 dead_tmp(rm);
3685 dead_tmp(rd);
3686 }
3687
3688 static void gen_neon_zip_u16(TCGv t0, TCGv t1)
3689 {
3690 TCGv tmp, tmp2;
3691
3692 tmp = new_tmp();
3693 tmp2 = new_tmp();
3694
3695 tcg_gen_andi_i32(tmp, t0, 0xffff);
3696 tcg_gen_shli_i32(tmp2, t1, 16);
3697 tcg_gen_or_i32(tmp, tmp, tmp2);
3698 tcg_gen_andi_i32(t1, t1, 0xffff0000);
3699 tcg_gen_shri_i32(tmp2, t0, 16);
3700 tcg_gen_or_i32(t1, t1, tmp2);
3701 tcg_gen_mov_i32(t0, tmp);
3702
3703 dead_tmp(tmp2);
3704 dead_tmp(tmp);
3705 }
3706
3707 static void gen_neon_unzip(int reg, int q, int tmp, int size)
3708 {
3709 int n;
3710 TCGv t0, t1;
3711
3712 for (n = 0; n < q + 1; n += 2) {
3713 t0 = neon_load_reg(reg, n);
3714 t1 = neon_load_reg(reg, n + 1);
3715 switch (size) {
3716 case 0: gen_neon_unzip_u8(t0, t1); break;
3717 case 1: gen_neon_zip_u16(t0, t1); break; /* zip and unzip are the same. */
3718 case 2: /* no-op */; break;
3719 default: abort();
3720 }
3721 neon_store_scratch(tmp + n, t0);
3722 neon_store_scratch(tmp + n + 1, t1);
3723 }
3724 }
3725
3726 static void gen_neon_trn_u8(TCGv t0, TCGv t1)
3727 {
3728 TCGv rd, tmp;
3729
3730 rd = new_tmp();
3731 tmp = new_tmp();
3732
3733 tcg_gen_shli_i32(rd, t0, 8);
3734 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
3735 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
3736 tcg_gen_or_i32(rd, rd, tmp);
3737
3738 tcg_gen_shri_i32(t1, t1, 8);
3739 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
3740 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
3741 tcg_gen_or_i32(t1, t1, tmp);
3742 tcg_gen_mov_i32(t0, rd);
3743
3744 dead_tmp(tmp);
3745 dead_tmp(rd);
3746 }
3747
3748 static void gen_neon_trn_u16(TCGv t0, TCGv t1)
3749 {
3750 TCGv rd, tmp;
3751
3752 rd = new_tmp();
3753 tmp = new_tmp();
3754
3755 tcg_gen_shli_i32(rd, t0, 16);
3756 tcg_gen_andi_i32(tmp, t1, 0xffff);
3757 tcg_gen_or_i32(rd, rd, tmp);
3758 tcg_gen_shri_i32(t1, t1, 16);
3759 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
3760 tcg_gen_or_i32(t1, t1, tmp);
3761 tcg_gen_mov_i32(t0, rd);
3762
3763 dead_tmp(tmp);
3764 dead_tmp(rd);
3765 }
3766
3767
3768 static struct {
3769 int nregs;
3770 int interleave;
3771 int spacing;
3772 } neon_ls_element_type[11] = {
3773 {4, 4, 1},
3774 {4, 4, 2},
3775 {4, 1, 1},
3776 {4, 2, 1},
3777 {3, 3, 1},
3778 {3, 3, 2},
3779 {3, 1, 1},
3780 {1, 1, 1},
3781 {2, 2, 1},
3782 {2, 2, 2},
3783 {2, 1, 1}
3784 };
3785
3786 /* Translate a NEON load/store element instruction. Return nonzero if the
3787 instruction is invalid. */
3788 static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3789 {
3790 int rd, rn, rm;
3791 int op;
3792 int nregs;
3793 int interleave;
3794 int spacing;
3795 int stride;
3796 int size;
3797 int reg;
3798 int pass;
3799 int load;
3800 int shift;
3801 int n;
3802 TCGv addr;
3803 TCGv tmp;
3804 TCGv tmp2;
3805 TCGv_i64 tmp64;
3806
3807 if (!vfp_enabled(env))
3808 return 1;
3809 VFP_DREG_D(rd, insn);
3810 rn = (insn >> 16) & 0xf;
3811 rm = insn & 0xf;
3812 load = (insn & (1 << 21)) != 0;
3813 addr = new_tmp();
3814 if ((insn & (1 << 23)) == 0) {
3815 /* Load store all elements. */
3816 op = (insn >> 8) & 0xf;
3817 size = (insn >> 6) & 3;
3818 if (op > 10)
3819 return 1;
3820 nregs = neon_ls_element_type[op].nregs;
3821 interleave = neon_ls_element_type[op].interleave;
3822 spacing = neon_ls_element_type[op].spacing;
3823 if (size == 3 && (interleave | spacing) != 1)
3824 return 1;
3825 load_reg_var(s, addr, rn);
3826 stride = (1 << size) * interleave;
3827 for (reg = 0; reg < nregs; reg++) {
3828 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3829 load_reg_var(s, addr, rn);
3830 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
3831 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3832 load_reg_var(s, addr, rn);
3833 tcg_gen_addi_i32(addr, addr, 1 << size);
3834 }
3835 if (size == 3) {
3836 if (load) {
3837 tmp64 = gen_ld64(addr, IS_USER(s));
3838 neon_store_reg64(tmp64, rd);
3839 tcg_temp_free_i64(tmp64);
3840 } else {
3841 tmp64 = tcg_temp_new_i64();
3842 neon_load_reg64(tmp64, rd);
3843 gen_st64(tmp64, addr, IS_USER(s));
3844 }
3845 tcg_gen_addi_i32(addr, addr, stride);
3846 } else {
3847 for (pass = 0; pass < 2; pass++) {
3848 if (size == 2) {
3849 if (load) {
3850 tmp = gen_ld32(addr, IS_USER(s));
3851 neon_store_reg(rd, pass, tmp);
3852 } else {
3853 tmp = neon_load_reg(rd, pass);
3854 gen_st32(tmp, addr, IS_USER(s));
3855 }
3856 tcg_gen_addi_i32(addr, addr, stride);
3857 } else if (size == 1) {
3858 if (load) {
3859 tmp = gen_ld16u(addr, IS_USER(s));
3860 tcg_gen_addi_i32(addr, addr, stride);
3861 tmp2 = gen_ld16u(addr, IS_USER(s));
3862 tcg_gen_addi_i32(addr, addr, stride);
3863 tcg_gen_shli_i32(tmp2, tmp2, 16);
3864 tcg_gen_or_i32(tmp, tmp, tmp2);
3865 dead_tmp(tmp2);
3866 neon_store_reg(rd, pass, tmp);
3867 } else {
3868 tmp = neon_load_reg(rd, pass);
3869 tmp2 = new_tmp();
3870 tcg_gen_shri_i32(tmp2, tmp, 16);
3871 gen_st16(tmp, addr, IS_USER(s));
3872 tcg_gen_addi_i32(addr, addr, stride);
3873 gen_st16(tmp2, addr, IS_USER(s));
3874 tcg_gen_addi_i32(addr, addr, stride);
3875 }
3876 } else /* size == 0 */ {
3877 if (load) {
3878 TCGV_UNUSED(tmp2);
3879 for (n = 0; n < 4; n++) {
3880 tmp = gen_ld8u(addr, IS_USER(s));
3881 tcg_gen_addi_i32(addr, addr, stride);
3882 if (n == 0) {
3883 tmp2 = tmp;
3884 } else {
3885 tcg_gen_shli_i32(tmp, tmp, n * 8);
3886 tcg_gen_or_i32(tmp2, tmp2, tmp);
3887 dead_tmp(tmp);
3888 }
3889 }
3890 neon_store_reg(rd, pass, tmp2);
3891 } else {
3892 tmp2 = neon_load_reg(rd, pass);
3893 for (n = 0; n < 4; n++) {
3894 tmp = new_tmp();
3895 if (n == 0) {
3896 tcg_gen_mov_i32(tmp, tmp2);
3897 } else {
3898 tcg_gen_shri_i32(tmp, tmp2, n * 8);
3899 }
3900 gen_st8(tmp, addr, IS_USER(s));
3901 tcg_gen_addi_i32(addr, addr, stride);
3902 }
3903 dead_tmp(tmp2);
3904 }
3905 }
3906 }
3907 }
3908 rd += spacing;
3909 }
3910 stride = nregs * 8;
3911 } else {
3912 size = (insn >> 10) & 3;
3913 if (size == 3) {
3914 /* Load single element to all lanes. */
3915 if (!load)
3916 return 1;
3917 size = (insn >> 6) & 3;
3918 nregs = ((insn >> 8) & 3) + 1;
3919 stride = (insn & (1 << 5)) ? 2 : 1;
3920 load_reg_var(s, addr, rn);
3921 for (reg = 0; reg < nregs; reg++) {
3922 switch (size) {
3923 case 0:
3924 tmp = gen_ld8u(addr, IS_USER(s));
3925 gen_neon_dup_u8(tmp, 0);
3926 break;
3927 case 1:
3928 tmp = gen_ld16u(addr, IS_USER(s));
3929 gen_neon_dup_low16(tmp);
3930 break;
3931 case 2:
3932 tmp = gen_ld32(addr, IS_USER(s));
3933 break;
3934 case 3:
3935 return 1;
3936 default: /* Avoid compiler warnings. */
3937 abort();
3938 }
3939 tcg_gen_addi_i32(addr, addr, 1 << size);
3940 tmp2 = new_tmp();
3941 tcg_gen_mov_i32(tmp2, tmp);
3942 neon_store_reg(rd, 0, tmp2);
3943 neon_store_reg(rd, 1, tmp);
3944 rd += stride;
3945 }
3946 stride = (1 << size) * nregs;
3947 } else {
3948 /* Single element. */
3949 pass = (insn >> 7) & 1;
3950 switch (size) {
3951 case 0:
3952 shift = ((insn >> 5) & 3) * 8;
3953 stride = 1;
3954 break;
3955 case 1:
3956 shift = ((insn >> 6) & 1) * 16;
3957 stride = (insn & (1 << 5)) ? 2 : 1;
3958 break;
3959 case 2:
3960 shift = 0;
3961 stride = (insn & (1 << 6)) ? 2 : 1;
3962 break;
3963 default:
3964 abort();
3965 }
3966 nregs = ((insn >> 8) & 3) + 1;
3967 load_reg_var(s, addr, rn);
3968 for (reg = 0; reg < nregs; reg++) {
3969 if (load) {
3970 switch (size) {
3971 case 0:
3972 tmp = gen_ld8u(addr, IS_USER(s));
3973 break;
3974 case 1:
3975 tmp = gen_ld16u(addr, IS_USER(s));
3976 break;
3977 case 2:
3978 tmp = gen_ld32(addr, IS_USER(s));
3979 break;
3980 default: /* Avoid compiler warnings. */
3981 abort();
3982 }
3983 if (size != 2) {
3984 tmp2 = neon_load_reg(rd, pass);
3985 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3986 dead_tmp(tmp2);
3987 }
3988 neon_store_reg(rd, pass, tmp);
3989 } else { /* Store */
3990 tmp = neon_load_reg(rd, pass);
3991 if (shift)
3992 tcg_gen_shri_i32(tmp, tmp, shift);
3993 switch (size) {
3994 case 0:
3995 gen_st8(tmp, addr, IS_USER(s));
3996 break;
3997 case 1:
3998 gen_st16(tmp, addr, IS_USER(s));
3999 break;
4000 case 2:
4001 gen_st32(tmp, addr, IS_USER(s));
4002 break;
4003 }
4004 }
4005 rd += stride;
4006 tcg_gen_addi_i32(addr, addr, 1 << size);
4007 }
4008 stride = nregs * (1 << size);
4009 }
4010 }
4011 dead_tmp(addr);
4012 if (rm != 15) {
4013 TCGv base;
4014
4015 base = load_reg(s, rn);
4016 if (rm == 13) {
4017 tcg_gen_addi_i32(base, base, stride);
4018 } else {
4019 TCGv index;
4020 index = load_reg(s, rm);
4021 tcg_gen_add_i32(base, base, index);
4022 dead_tmp(index);
4023 }
4024 store_reg(s, rn, base);
4025 }
4026 return 0;
4027 }
4028
4029 /* Bitwise select. dest = c ? t : f. Clobbers T and F. */
4030 static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
4031 {
4032 tcg_gen_and_i32(t, t, c);
4033 tcg_gen_andc_i32(f, f, c);
4034 tcg_gen_or_i32(dest, t, f);
4035 }
4036
4037 static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
4038 {
4039 switch (size) {
4040 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4041 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4042 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4043 default: abort();
4044 }
4045 }
4046
4047 static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
4048 {
4049 switch (size) {
4050 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4051 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4052 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
4053 default: abort();
4054 }
4055 }
4056
4057 static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
4058 {
4059 switch (size) {
4060 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4061 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4062 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
4063 default: abort();
4064 }
4065 }
4066
4067 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4068 int q, int u)
4069 {
4070 if (q) {
4071 if (u) {
4072 switch (size) {
4073 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4074 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4075 default: abort();
4076 }
4077 } else {
4078 switch (size) {
4079 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4080 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4081 default: abort();
4082 }
4083 }
4084 } else {
4085 if (u) {
4086 switch (size) {
4087 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4088 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4089 default: abort();
4090 }
4091 } else {
4092 switch (size) {
4093 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4094 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4095 default: abort();
4096 }
4097 }
4098 }
4099 }
4100
4101 static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
4102 {
4103 if (u) {
4104 switch (size) {
4105 case 0: gen_helper_neon_widen_u8(dest, src); break;
4106 case 1: gen_helper_neon_widen_u16(dest, src); break;
4107 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4108 default: abort();
4109 }
4110 } else {
4111 switch (size) {
4112 case 0: gen_helper_neon_widen_s8(dest, src); break;
4113 case 1: gen_helper_neon_widen_s16(dest, src); break;
4114 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4115 default: abort();
4116 }
4117 }
4118 dead_tmp(src);
4119 }
4120
4121 static inline void gen_neon_addl(int size)
4122 {
4123 switch (size) {
4124 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4125 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4126 case 2: tcg_gen_add_i64(CPU_V001); break;
4127 default: abort();
4128 }
4129 }
4130
4131 static inline void gen_neon_subl(int size)
4132 {
4133 switch (size) {
4134 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4135 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4136 case 2: tcg_gen_sub_i64(CPU_V001); break;
4137 default: abort();
4138 }
4139 }
4140
4141 static inline void gen_neon_negl(TCGv_i64 var, int size)
4142 {
4143 switch (size) {
4144 case 0: gen_helper_neon_negl_u16(var, var); break;
4145 case 1: gen_helper_neon_negl_u32(var, var); break;
4146 case 2: gen_helper_neon_negl_u64(var, var); break;
4147 default: abort();
4148 }
4149 }
4150
4151 static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
4152 {
4153 switch (size) {
4154 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4155 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4156 default: abort();
4157 }
4158 }
4159
4160 static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
4161 {
4162 TCGv_i64 tmp;
4163
4164 switch ((size << 1) | u) {
4165 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4166 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4167 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4168 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4169 case 4:
4170 tmp = gen_muls_i64_i32(a, b);
4171 tcg_gen_mov_i64(dest, tmp);
4172 break;
4173 case 5:
4174 tmp = gen_mulu_i64_i32(a, b);
4175 tcg_gen_mov_i64(dest, tmp);
4176 break;
4177 default: abort();
4178 }
4179 }
4180
4181 /* Translate a NEON data processing instruction. Return nonzero if the
4182 instruction is invalid.
4183 We process data in a mixture of 32-bit and 64-bit chunks.
4184 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
4185
4186 static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4187 {
4188 int op;
4189 int q;
4190 int rd, rn, rm;
4191 int size;
4192 int shift;
4193 int pass;
4194 int count;
4195 int pairwise;
4196 int u;
4197 int n;
4198 uint32_t imm, mask;
4199 TCGv tmp, tmp2, tmp3, tmp4, tmp5;
4200 TCGv_i64 tmp64;
4201
4202 if (!vfp_enabled(env))
4203 return 1;
4204 q = (insn & (1 << 6)) != 0;
4205 u = (insn >> 24) & 1;
4206 VFP_DREG_D(rd, insn);
4207 VFP_DREG_N(rn, insn);
4208 VFP_DREG_M(rm, insn);
4209 size = (insn >> 20) & 3;
4210 if ((insn & (1 << 23)) == 0) {
4211 /* Three register same length. */
4212 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
4213 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4214 || op == 10 || op == 11 || op == 16)) {
4215 /* 64-bit element instructions. */
4216 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4217 neon_load_reg64(cpu_V0, rn + pass);
4218 neon_load_reg64(cpu_V1, rm + pass);
4219 switch (op) {
4220 case 1: /* VQADD */
4221 if (u) {
4222 gen_helper_neon_add_saturate_u64(CPU_V001);
4223 } else {
4224 gen_helper_neon_add_saturate_s64(CPU_V001);
4225 }
4226 break;
4227 case 5: /* VQSUB */
4228 if (u) {
4229 gen_helper_neon_sub_saturate_u64(CPU_V001);
4230 } else {
4231 gen_helper_neon_sub_saturate_s64(CPU_V001);
4232 }
4233 break;
4234 case 8: /* VSHL */
4235 if (u) {
4236 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4237 } else {
4238 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4239 }
4240 break;
4241 case 9: /* VQSHL */
4242 if (u) {
4243 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4244 cpu_V1, cpu_V0);
4245 } else {
4246 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
4247 cpu_V1, cpu_V0);
4248 }
4249 break;
4250 case 10: /* VRSHL */
4251 if (u) {
4252 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
4253 } else {
4254 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4255 }
4256 break;
4257 case 11: /* VQRSHL */
4258 if (u) {
4259 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4260 cpu_V1, cpu_V0);
4261 } else {
4262 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4263 cpu_V1, cpu_V0);
4264 }
4265 break;
4266 case 16:
4267 if (u) {
4268 tcg_gen_sub_i64(CPU_V001);
4269 } else {
4270 tcg_gen_add_i64(CPU_V001);
4271 }
4272 break;
4273 default:
4274 abort();
4275 }
4276 neon_store_reg64(cpu_V0, rd + pass);
4277 }
4278 return 0;
4279 }
4280 switch (op) {
4281 case 8: /* VSHL */
4282 case 9: /* VQSHL */
4283 case 10: /* VRSHL */
4284 case 11: /* VQRSHL */
4285 {
4286 int rtmp;
4287 /* Shift instruction operands are reversed. */
4288 rtmp = rn;
4289 rn = rm;
4290 rm = rtmp;
4291 pairwise = 0;
4292 }
4293 break;
4294 case 20: /* VPMAX */
4295 case 21: /* VPMIN */
4296 case 23: /* VPADD */
4297 pairwise = 1;
4298 break;
4299 case 26: /* VPADD (float) */
4300 pairwise = (u && size < 2);
4301 break;
4302 case 30: /* VPMIN/VPMAX (float) */
4303 pairwise = u;
4304 break;
4305 default:
4306 pairwise = 0;
4307 break;
4308 }
4309
4310 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4311
4312 if (pairwise) {
4313 /* Pairwise. */
4314 if (q)
4315 n = (pass & 1) * 2;
4316 else
4317 n = 0;
4318 if (pass < q + 1) {
4319 tmp = neon_load_reg(rn, n);
4320 tmp2 = neon_load_reg(rn, n + 1);
4321 } else {
4322 tmp = neon_load_reg(rm, n);
4323 tmp2 = neon_load_reg(rm, n + 1);
4324 }
4325 } else {
4326 /* Elementwise. */
4327 tmp = neon_load_reg(rn, pass);
4328 tmp2 = neon_load_reg(rm, pass);
4329 }
4330 switch (op) {
4331 case 0: /* VHADD */
4332 GEN_NEON_INTEGER_OP(hadd);
4333 break;
4334 case 1: /* VQADD */
4335 GEN_NEON_INTEGER_OP_ENV(qadd);
4336 break;
4337 case 2: /* VRHADD */
4338 GEN_NEON_INTEGER_OP(rhadd);
4339 break;
4340 case 3: /* Logic ops. */
4341 switch ((u << 2) | size) {
4342 case 0: /* VAND */
4343 tcg_gen_and_i32(tmp, tmp, tmp2);
4344 break;
4345 case 1: /* BIC */
4346 tcg_gen_andc_i32(tmp, tmp, tmp2);
4347 break;
4348 case 2: /* VORR */
4349 tcg_gen_or_i32(tmp, tmp, tmp2);
4350 break;
4351 case 3: /* VORN */
4352 tcg_gen_orc_i32(tmp, tmp, tmp2);
4353 break;
4354 case 4: /* VEOR */
4355 tcg_gen_xor_i32(tmp, tmp, tmp2);
4356 break;
4357 case 5: /* VBSL */
4358 tmp3 = neon_load_reg(rd, pass);
4359 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
4360 dead_tmp(tmp3);
4361 break;
4362 case 6: /* VBIT */
4363 tmp3 = neon_load_reg(rd, pass);
4364 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
4365 dead_tmp(tmp3);
4366 break;
4367 case 7: /* VBIF */
4368 tmp3 = neon_load_reg(rd, pass);
4369 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
4370 dead_tmp(tmp3);
4371 break;
4372 }
4373 break;
4374 case 4: /* VHSUB */
4375 GEN_NEON_INTEGER_OP(hsub);
4376 break;
4377 case 5: /* VQSUB */
4378 GEN_NEON_INTEGER_OP_ENV(qsub);
4379 break;
4380 case 6: /* VCGT */
4381 GEN_NEON_INTEGER_OP(cgt);
4382 break;
4383 case 7: /* VCGE */
4384 GEN_NEON_INTEGER_OP(cge);
4385 break;
4386 case 8: /* VSHL */
4387 GEN_NEON_INTEGER_OP(shl);
4388 break;
4389 case 9: /* VQSHL */
4390 GEN_NEON_INTEGER_OP_ENV(qshl);
4391 break;
4392 case 10: /* VRSHL */
4393 GEN_NEON_INTEGER_OP(rshl);
4394 break;
4395 case 11: /* VQRSHL */
4396 GEN_NEON_INTEGER_OP_ENV(qrshl);
4397 break;
4398 case 12: /* VMAX */
4399 GEN_NEON_INTEGER_OP(max);
4400 break;
4401 case 13: /* VMIN */
4402 GEN_NEON_INTEGER_OP(min);
4403 break;
4404 case 14: /* VABD */
4405 GEN_NEON_INTEGER_OP(abd);
4406 break;
4407 case 15: /* VABA */
4408 GEN_NEON_INTEGER_OP(abd);
4409 dead_tmp(tmp2);
4410 tmp2 = neon_load_reg(rd, pass);
4411 gen_neon_add(size, tmp, tmp2);
4412 break;
4413 case 16:
4414 if (!u) { /* VADD */
4415 if (gen_neon_add(size, tmp, tmp2))
4416 return 1;
4417 } else { /* VSUB */
4418 switch (size) {
4419 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
4420 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
4421 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
4422 default: return 1;
4423 }
4424 }
4425 break;
4426 case 17:
4427 if (!u) { /* VTST */
4428 switch (size) {
4429 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
4430 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
4431 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
4432 default: return 1;
4433 }
4434 } else { /* VCEQ */
4435 switch (size) {
4436 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
4437 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
4438 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
4439 default: return 1;
4440 }
4441 }
4442 break;
4443 case 18: /* Multiply. */
4444 switch (size) {
4445 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4446 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4447 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4448 default: return 1;
4449 }
4450 dead_tmp(tmp2);
4451 tmp2 = neon_load_reg(rd, pass);
4452 if (u) { /* VMLS */
4453 gen_neon_rsb(size, tmp, tmp2);
4454 } else { /* VMLA */
4455 gen_neon_add(size, tmp, tmp2);
4456 }
4457 break;
4458 case 19: /* VMUL */
4459 if (u) { /* polynomial */
4460 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
4461 } else { /* Integer */
4462 switch (size) {
4463 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
4464 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
4465 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
4466 default: return 1;
4467 }
4468 }
4469 break;
4470 case 20: /* VPMAX */
4471 GEN_NEON_INTEGER_OP(pmax);
4472 break;
4473 case 21: /* VPMIN */
4474 GEN_NEON_INTEGER_OP(pmin);
4475 break;
4476 case 22: /* Hultiply high. */
4477 if (!u) { /* VQDMULH */
4478 switch (size) {
4479 case 1: gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4480 case 2: gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4481 default: return 1;
4482 }
4483 } else { /* VQRDHMUL */
4484 switch (size) {
4485 case 1: gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2); break;
4486 case 2: gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2); break;
4487 default: return 1;
4488 }
4489 }
4490 break;
4491 case 23: /* VPADD */
4492 if (u)
4493 return 1;
4494 switch (size) {
4495 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
4496 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
4497 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
4498 default: return 1;
4499 }
4500 break;
4501 case 26: /* Floating point arithnetic. */
4502 switch ((u << 2) | size) {
4503 case 0: /* VADD */
4504 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4505 break;
4506 case 2: /* VSUB */
4507 gen_helper_neon_sub_f32(tmp, tmp, tmp2);
4508 break;
4509 case 4: /* VPADD */
4510 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4511 break;
4512 case 6: /* VABD */
4513 gen_helper_neon_abd_f32(tmp, tmp, tmp2);
4514 break;
4515 default:
4516 return 1;
4517 }
4518 break;
4519 case 27: /* Float multiply. */
4520 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
4521 if (!u) {
4522 dead_tmp(tmp2);
4523 tmp2 = neon_load_reg(rd, pass);
4524 if (size == 0) {
4525 gen_helper_neon_add_f32(tmp, tmp, tmp2);
4526 } else {
4527 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
4528 }
4529 }
4530 break;
4531 case 28: /* Float compare. */
4532 if (!u) {
4533 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
4534 } else {
4535 if (size == 0)
4536 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
4537 else
4538 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
4539 }
4540 break;
4541 case 29: /* Float compare absolute. */
4542 if (!u)
4543 return 1;
4544 if (size == 0)
4545 gen_helper_neon_acge_f32(tmp, tmp, tmp2);
4546 else
4547 gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
4548 break;
4549 case 30: /* Float min/max. */
4550 if (size == 0)
4551 gen_helper_neon_max_f32(tmp, tmp, tmp2);
4552 else
4553 gen_helper_neon_min_f32(tmp, tmp, tmp2);
4554 break;
4555 case 31:
4556 if (size == 0)
4557 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
4558 else
4559 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
4560 break;
4561 default:
4562 abort();
4563 }
4564 dead_tmp(tmp2);
4565
4566 /* Save the result. For elementwise operations we can put it
4567 straight into the destination register. For pairwise operations
4568 we have to be careful to avoid clobbering the source operands. */
4569 if (pairwise && rd == rm) {
4570 neon_store_scratch(pass, tmp);
4571 } else {
4572 neon_store_reg(rd, pass, tmp);
4573 }
4574
4575 } /* for pass */
4576 if (pairwise && rd == rm) {
4577 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4578 tmp = neon_load_scratch(pass);
4579 neon_store_reg(rd, pass, tmp);
4580 }
4581 }
4582 /* End of 3 register same size operations. */
4583 } else if (insn & (1 << 4)) {
4584 if ((insn & 0x00380080) != 0) {
4585 /* Two registers and shift. */
4586 op = (insn >> 8) & 0xf;
4587 if (insn & (1 << 7)) {
4588 /* 64-bit shift. */
4589 size = 3;
4590 } else {
4591 size = 2;
4592 while ((insn & (1 << (size + 19))) == 0)
4593 size--;
4594 }
4595 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4596 /* To avoid excessive dumplication of ops we implement shift
4597 by immediate using the variable shift operations. */
4598 if (op < 8) {
4599 /* Shift by immediate:
4600 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4601 /* Right shifts are encoded as N - shift, where N is the
4602 element size in bits. */
4603 if (op <= 4)
4604 shift = shift - (1 << (size + 3));
4605 if (size == 3) {
4606 count = q + 1;
4607 } else {
4608 count = q ? 4: 2;
4609 }
4610 switch (size) {
4611 case 0:
4612 imm = (uint8_t) shift;
4613 imm |= imm << 8;
4614 imm |= imm << 16;
4615 break;
4616 case 1:
4617 imm = (uint16_t) shift;
4618 imm |= imm << 16;
4619 break;
4620 case 2:
4621 case 3:
4622 imm = shift;
4623 break;
4624 default:
4625 abort();
4626 }
4627
4628 for (pass = 0; pass < count; pass++) {
4629 if (size == 3) {
4630 neon_load_reg64(cpu_V0, rm + pass);
4631 tcg_gen_movi_i64(cpu_V1, imm);
4632 switch (op) {
4633 case 0: /* VSHR */
4634 case 1: /* VSRA */
4635 if (u)
4636 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4637 else
4638 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
4639 break;
4640 case 2: /* VRSHR */
4641 case 3: /* VRSRA */
4642 if (u)
4643 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
4644 else
4645 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
4646 break;
4647 case 4: /* VSRI */
4648 if (!u)
4649 return 1;
4650 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4651 break;
4652 case 5: /* VSHL, VSLI */
4653 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4654 break;
4655 case 6: /* VQSHL */
4656 if (u)
4657 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4658 else
4659 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4660 break;
4661 case 7: /* VQSHLU */
4662 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4663 break;
4664 }
4665 if (op == 1 || op == 3) {
4666 /* Accumulate. */
4667 neon_load_reg64(cpu_V0, rd + pass);
4668 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4669 } else if (op == 4 || (op == 5 && u)) {
4670 /* Insert */
4671 cpu_abort(env, "VS[LR]I.64 not implemented");
4672 }
4673 neon_store_reg64(cpu_V0, rd + pass);
4674 } else { /* size < 3 */
4675 /* Operands in T0 and T1. */
4676 tmp = neon_load_reg(rm, pass);
4677 tmp2 = new_tmp();
4678 tcg_gen_movi_i32(tmp2, imm);
4679 switch (op) {
4680 case 0: /* VSHR */
4681 case 1: /* VSRA */
4682 GEN_NEON_INTEGER_OP(shl);
4683 break;
4684 case 2: /* VRSHR */
4685 case 3: /* VRSRA */
4686 GEN_NEON_INTEGER_OP(rshl);
4687 break;
4688 case 4: /* VSRI */
4689 if (!u)
4690 return 1;
4691 GEN_NEON_INTEGER_OP(shl);
4692 break;
4693 case 5: /* VSHL, VSLI */
4694 switch (size) {
4695 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
4696 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
4697 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
4698 default: return 1;
4699 }
4700 break;
4701 case 6: /* VQSHL */
4702 GEN_NEON_INTEGER_OP_ENV(qshl);
4703 break;
4704 case 7: /* VQSHLU */
4705 switch (size) {
4706 case 0: gen_helper_neon_qshl_u8(tmp, cpu_env, tmp, tmp2); break;
4707 case 1: gen_helper_neon_qshl_u16(tmp, cpu_env, tmp, tmp2); break;
4708 case 2: gen_helper_neon_qshl_u32(tmp, cpu_env, tmp, tmp2); break;
4709 default: return 1;
4710 }
4711 break;
4712 }
4713 dead_tmp(tmp2);
4714
4715 if (op == 1 || op == 3) {
4716 /* Accumulate. */
4717 tmp2 = neon_load_reg(rd, pass);
4718 gen_neon_add(size, tmp2, tmp);
4719 dead_tmp(tmp2);
4720 } else if (op == 4 || (op == 5 && u)) {
4721 /* Insert */
4722 switch (size) {
4723 case 0:
4724 if (op == 4)
4725 mask = 0xff >> -shift;
4726 else
4727 mask = (uint8_t)(0xff << shift);
4728 mask |= mask << 8;
4729 mask |= mask << 16;
4730 break;
4731 case 1:
4732 if (op == 4)
4733 mask = 0xffff >> -shift;
4734 else
4735 mask = (uint16_t)(0xffff << shift);
4736 mask |= mask << 16;
4737 break;
4738 case 2:
4739 if (shift < -31 || shift > 31) {
4740 mask = 0;
4741 } else {
4742 if (op == 4)
4743 mask = 0xffffffffu >> -shift;
4744 else
4745 mask = 0xffffffffu << shift;
4746 }
4747 break;
4748 default:
4749 abort();
4750 }
4751 tmp2 = neon_load_reg(rd, pass);
4752 tcg_gen_andi_i32(tmp, tmp, mask);
4753 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
4754 tcg_gen_or_i32(tmp, tmp, tmp2);
4755 dead_tmp(tmp2);
4756 }
4757 neon_store_reg(rd, pass, tmp);
4758 }
4759 } /* for pass */
4760 } else if (op < 10) {
4761 /* Shift by immediate and narrow:
4762 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4763 shift = shift - (1 << (size + 3));
4764 size++;
4765 switch (size) {
4766 case 1:
4767 imm = (uint16_t)shift;
4768 imm |= imm << 16;
4769 tmp2 = tcg_const_i32(imm);
4770 TCGV_UNUSED_I64(tmp64);
4771 break;
4772 case 2:
4773 imm = (uint32_t)shift;
4774 tmp2 = tcg_const_i32(imm);
4775 TCGV_UNUSED_I64(tmp64);
4776 break;
4777 case 3:
4778 tmp64 = tcg_const_i64(shift);
4779 TCGV_UNUSED(tmp2);
4780 break;
4781 default:
4782 abort();
4783 }
4784
4785 for (pass = 0; pass < 2; pass++) {
4786 if (size == 3) {
4787 neon_load_reg64(cpu_V0, rm + pass);
4788 if (q) {
4789 if (u)
4790 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
4791 else
4792 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
4793 } else {
4794 if (u)
4795 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
4796 else
4797 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
4798 }
4799 } else {
4800 tmp = neon_load_reg(rm + pass, 0);
4801 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4802 tmp3 = neon_load_reg(rm + pass, 1);
4803 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4804 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
4805 dead_tmp(tmp);
4806 dead_tmp(tmp3);
4807 }
4808 tmp = new_tmp();
4809 if (op == 8 && !u) {
4810 gen_neon_narrow(size - 1, tmp, cpu_V0);
4811 } else {
4812 if (op == 8)
4813 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
4814 else
4815 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4816 }
4817 neon_store_reg(rd, pass, tmp);
4818 } /* for pass */
4819 if (size == 3) {
4820 tcg_temp_free_i64(tmp64);
4821 } else {
4822 dead_tmp(tmp2);
4823 }
4824 } else if (op == 10) {
4825 /* VSHLL */
4826 if (q || size == 3)
4827 return 1;
4828 tmp = neon_load_reg(rm, 0);
4829 tmp2 = neon_load_reg(rm, 1);
4830 for (pass = 0; pass < 2; pass++) {
4831 if (pass == 1)
4832 tmp = tmp2;
4833
4834 gen_neon_widen(cpu_V0, tmp, size, u);
4835
4836 if (shift != 0) {
4837 /* The shift is less than the width of the source
4838 type, so we can just shift the whole register. */
4839 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4840 if (size < 2 || !u) {
4841 uint64_t imm64;
4842 if (size == 0) {
4843 imm = (0xffu >> (8 - shift));
4844 imm |= imm << 16;
4845 } else {
4846 imm = 0xffff >> (16 - shift);
4847 }
4848 imm64 = imm | (((uint64_t)imm) << 32);
4849 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
4850 }
4851 }
4852 neon_store_reg64(cpu_V0, rd + pass);
4853 }
4854 } else if (op >= 14) {
4855 /* VCVT fixed-point. */
4856 /* We have already masked out the must-be-1 top bit of imm6,
4857 * hence this 32-shift where the ARM ARM has 64-imm6.
4858 */
4859 shift = 32 - shift;
4860 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4861 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
4862 if (!(op & 1)) {
4863 if (u)
4864 gen_vfp_ulto(0, shift);
4865 else
4866 gen_vfp_slto(0, shift);
4867 } else {
4868 if (u)
4869 gen_vfp_toul(0, shift);
4870 else
4871 gen_vfp_tosl(0, shift);
4872 }
4873 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
4874 }
4875 } else {
4876 return 1;
4877 }
4878 } else { /* (insn & 0x00380080) == 0 */
4879 int invert;
4880
4881 op = (insn >> 8) & 0xf;
4882 /* One register and immediate. */
4883 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4884 invert = (insn & (1 << 5)) != 0;
4885 switch (op) {
4886 case 0: case 1:
4887 /* no-op */
4888 break;
4889 case 2: case 3:
4890 imm <<= 8;
4891 break;
4892 case 4: case 5:
4893 imm <<= 16;
4894 break;
4895 case 6: case 7:
4896 imm <<= 24;
4897 break;
4898 case 8: case 9:
4899 imm |= imm << 16;
4900 break;
4901 case 10: case 11:
4902 imm = (imm << 8) | (imm << 24);
4903 break;
4904 case 12:
4905 imm = (imm << 8) | 0xff;
4906 break;
4907 case 13:
4908 imm = (imm << 16) | 0xffff;
4909 break;
4910 case 14:
4911 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4912 if (invert)
4913 imm = ~imm;
4914 break;
4915 case 15:
4916 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4917 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4918 break;
4919 }
4920 if (invert)
4921 imm = ~imm;
4922
4923 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4924 if (op & 1 && op < 12) {
4925 tmp = neon_load_reg(rd, pass);
4926 if (invert) {
4927 /* The immediate value has already been inverted, so
4928 BIC becomes AND. */
4929 tcg_gen_andi_i32(tmp, tmp, imm);
4930 } else {
4931 tcg_gen_ori_i32(tmp, tmp, imm);
4932 }
4933 } else {
4934 /* VMOV, VMVN. */
4935 tmp = new_tmp();
4936 if (op == 14 && invert) {
4937 uint32_t val;
4938 val = 0;
4939 for (n = 0; n < 4; n++) {
4940 if (imm & (1 << (n + (pass & 1) * 4)))
4941 val |= 0xff << (n * 8);
4942 }
4943 tcg_gen_movi_i32(tmp, val);
4944 } else {
4945 tcg_gen_movi_i32(tmp, imm);
4946 }
4947 }
4948 neon_store_reg(rd, pass, tmp);
4949 }
4950 }
4951 } else { /* (insn & 0x00800010 == 0x00800000) */
4952 if (size != 3) {
4953 op = (insn >> 8) & 0xf;
4954 if ((insn & (1 << 6)) == 0) {
4955 /* Three registers of different lengths. */
4956 int src1_wide;
4957 int src2_wide;
4958 int prewiden;
4959 /* prewiden, src1_wide, src2_wide */
4960 static const int neon_3reg_wide[16][3] = {
4961 {1, 0, 0}, /* VADDL */
4962 {1, 1, 0}, /* VADDW */
4963 {1, 0, 0}, /* VSUBL */
4964 {1, 1, 0}, /* VSUBW */
4965 {0, 1, 1}, /* VADDHN */
4966 {0, 0, 0}, /* VABAL */
4967 {0, 1, 1}, /* VSUBHN */
4968 {0, 0, 0}, /* VABDL */
4969 {0, 0, 0}, /* VMLAL */
4970 {0, 0, 0}, /* VQDMLAL */
4971 {0, 0, 0}, /* VMLSL */
4972 {0, 0, 0}, /* VQDMLSL */
4973 {0, 0, 0}, /* Integer VMULL */
4974 {0, 0, 0}, /* VQDMULL */
4975 {0, 0, 0} /* Polynomial VMULL */
4976 };
4977
4978 prewiden = neon_3reg_wide[op][0];
4979 src1_wide = neon_3reg_wide[op][1];
4980 src2_wide = neon_3reg_wide[op][2];
4981
4982 if (size == 0 && (op == 9 || op == 11 || op == 13))
4983 return 1;
4984
4985 /* Avoid overlapping operands. Wide source operands are
4986 always aligned so will never overlap with wide
4987 destinations in problematic ways. */
4988 if (rd == rm && !src2_wide) {
4989 tmp = neon_load_reg(rm, 1);
4990 neon_store_scratch(2, tmp);
4991 } else if (rd == rn && !src1_wide) {
4992 tmp = neon_load_reg(rn, 1);
4993 neon_store_scratch(2, tmp);
4994 }
4995 TCGV_UNUSED(tmp3);
4996 for (pass = 0; pass < 2; pass++) {
4997 if (src1_wide) {
4998 neon_load_reg64(cpu_V0, rn + pass);
4999 TCGV_UNUSED(tmp);
5000 } else {
5001 if (pass == 1 && rd == rn) {
5002 tmp = neon_load_scratch(2);
5003 } else {
5004 tmp = neon_load_reg(rn, pass);
5005 }
5006 if (prewiden) {
5007 gen_neon_widen(cpu_V0, tmp, size, u);
5008 }
5009 }
5010 if (src2_wide) {
5011 neon_load_reg64(cpu_V1, rm + pass);
5012 TCGV_UNUSED(tmp2);
5013 } else {
5014 if (pass == 1 && rd == rm) {
5015 tmp2 = neon_load_scratch(2);
5016 } else {
5017 tmp2 = neon_load_reg(rm, pass);
5018 }
5019 if (prewiden) {
5020 gen_neon_widen(cpu_V1, tmp2, size, u);
5021 }
5022 }
5023 switch (op) {
5024 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
5025 gen_neon_addl(size);
5026 break;
5027 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
5028 gen_neon_subl(size);
5029 break;
5030 case 5: case 7: /* VABAL, VABDL */
5031 switch ((size << 1) | u) {
5032 case 0:
5033 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
5034 break;
5035 case 1:
5036 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
5037 break;
5038 case 2:
5039 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
5040 break;
5041 case 3:
5042 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
5043 break;
5044 case 4:
5045 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
5046 break;
5047 case 5:
5048 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
5049 break;
5050 default: abort();
5051 }
5052 dead_tmp(tmp2);
5053 dead_tmp(tmp);
5054 break;
5055 case 8: case 9: case 10: case 11: case 12: case 13:
5056 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
5057 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5058 dead_tmp(tmp2);
5059 dead_tmp(tmp);
5060 break;
5061 case 14: /* Polynomial VMULL */
5062 cpu_abort(env, "Polynomial VMULL not implemented");
5063
5064 default: /* 15 is RESERVED. */
5065 return 1;
5066 }
5067 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
5068 /* Accumulate. */
5069 if (op == 10 || op == 11) {
5070 gen_neon_negl(cpu_V0, size);
5071 }
5072
5073 if (op != 13) {
5074 neon_load_reg64(cpu_V1, rd + pass);
5075 }
5076
5077 switch (op) {
5078 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
5079 gen_neon_addl(size);
5080 break;
5081 case 9: case 11: /* VQDMLAL, VQDMLSL */
5082 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5083 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5084 break;
5085 /* Fall through. */
5086 case 13: /* VQDMULL */
5087 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5088 break;
5089 default:
5090 abort();
5091 }
5092 neon_store_reg64(cpu_V0, rd + pass);
5093 } else if (op == 4 || op == 6) {
5094 /* Narrowing operation. */
5095 tmp = new_tmp();
5096 if (!u) {
5097 switch (size) {
5098 case 0:
5099 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
5100 break;
5101 case 1:
5102 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
5103 break;
5104 case 2:
5105 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5106 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5107 break;
5108 default: abort();
5109 }
5110 } else {
5111 switch (size) {
5112 case 0:
5113 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
5114 break;
5115 case 1:
5116 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
5117 break;
5118 case 2:
5119 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
5120 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
5121 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5122 break;
5123 default: abort();
5124 }
5125 }
5126 if (pass == 0) {
5127 tmp3 = tmp;
5128 } else {
5129 neon_store_reg(rd, 0, tmp3);
5130 neon_store_reg(rd, 1, tmp);
5131 }
5132 } else {
5133 /* Write back the result. */
5134 neon_store_reg64(cpu_V0, rd + pass);
5135 }
5136 }
5137 } else {
5138 /* Two registers and a scalar. */
5139 switch (op) {
5140 case 0: /* Integer VMLA scalar */
5141 case 1: /* Float VMLA scalar */
5142 case 4: /* Integer VMLS scalar */
5143 case 5: /* Floating point VMLS scalar */
5144 case 8: /* Integer VMUL scalar */
5145 case 9: /* Floating point VMUL scalar */
5146 case 12: /* VQDMULH scalar */
5147 case 13: /* VQRDMULH scalar */
5148 tmp = neon_get_scalar(size, rm);
5149 neon_store_scratch(0, tmp);
5150 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5151 tmp = neon_load_scratch(0);
5152 tmp2 = neon_load_reg(rn, pass);
5153 if (op == 12) {
5154 if (size == 1) {
5155 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5156 } else {
5157 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5158 }
5159 } else if (op == 13) {
5160 if (size == 1) {
5161 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5162 } else {
5163 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5164 }
5165 } else if (op & 1) {
5166 gen_helper_neon_mul_f32(tmp, tmp, tmp2);
5167 } else {
5168 switch (size) {
5169 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5170 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5171 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
5172 default: return 1;
5173 }
5174 }
5175 dead_tmp(tmp2);
5176 if (op < 8) {
5177 /* Accumulate. */
5178 tmp2 = neon_load_reg(rd, pass);
5179 switch (op) {
5180 case 0:
5181 gen_neon_add(size, tmp, tmp2);
5182 break;
5183 case 1:
5184 gen_helper_neon_add_f32(tmp, tmp, tmp2);
5185 break;
5186 case 4:
5187 gen_neon_rsb(size, tmp, tmp2);
5188 break;
5189 case 5:
5190 gen_helper_neon_sub_f32(tmp, tmp2, tmp);
5191 break;
5192 default:
5193 abort();
5194 }
5195 dead_tmp(tmp2);
5196 }
5197 neon_store_reg(rd, pass, tmp);
5198 }
5199 break;
5200 case 2: /* VMLAL sclar */
5201 case 3: /* VQDMLAL scalar */
5202 case 6: /* VMLSL scalar */
5203 case 7: /* VQDMLSL scalar */
5204 case 10: /* VMULL scalar */
5205 case 11: /* VQDMULL scalar */
5206 if (size == 0 && (op == 3 || op == 7 || op == 11))
5207 return 1;
5208
5209 tmp2 = neon_get_scalar(size, rm);
5210 tmp3 = neon_load_reg(rn, 1);
5211
5212 for (pass = 0; pass < 2; pass++) {
5213 if (pass == 0) {
5214 tmp = neon_load_reg(rn, 0);
5215 } else {
5216 tmp = tmp3;
5217 }
5218 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
5219 dead_tmp(tmp);
5220 if (op == 6 || op == 7) {
5221 gen_neon_negl(cpu_V0, size);
5222 }
5223 if (op != 11) {
5224 neon_load_reg64(cpu_V1, rd + pass);
5225 }
5226 switch (op) {
5227 case 2: case 6:
5228 gen_neon_addl(size);
5229 break;
5230 case 3: case 7:
5231 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5232 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
5233 break;
5234 case 10:
5235 /* no-op */
5236 break;
5237 case 11:
5238 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5239 break;
5240 default:
5241 abort();
5242 }
5243 neon_store_reg64(cpu_V0, rd + pass);
5244 }
5245
5246 dead_tmp(tmp2);
5247
5248 break;
5249 default: /* 14 and 15 are RESERVED */
5250 return 1;
5251 }
5252 }
5253 } else { /* size == 3 */
5254 if (!u) {
5255 /* Extract. */
5256 imm = (insn >> 8) & 0xf;
5257
5258 if (imm > 7 && !q)
5259 return 1;
5260
5261 if (imm == 0) {
5262 neon_load_reg64(cpu_V0, rn);
5263 if (q) {
5264 neon_load_reg64(cpu_V1, rn + 1);
5265 }
5266 } else if (imm == 8) {
5267 neon_load_reg64(cpu_V0, rn + 1);
5268 if (q) {
5269 neon_load_reg64(cpu_V1, rm);
5270 }
5271 } else if (q) {
5272 tmp64 = tcg_temp_new_i64();
5273 if (imm < 8) {
5274 neon_load_reg64(cpu_V0, rn);
5275 neon_load_reg64(tmp64, rn + 1);
5276 } else {
5277 neon_load_reg64(cpu_V0, rn + 1);
5278 neon_load_reg64(tmp64, rm);
5279 }
5280 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5281 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
5282 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5283 if (imm < 8) {
5284 neon_load_reg64(cpu_V1, rm);
5285 } else {
5286 neon_load_reg64(cpu_V1, rm + 1);
5287 imm -= 8;
5288 }
5289 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5290 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5291 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
5292 tcg_temp_free_i64(tmp64);
5293 } else {
5294 /* BUGFIX */
5295 neon_load_reg64(cpu_V0, rn);
5296 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
5297 neon_load_reg64(cpu_V1, rm);
5298 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5299 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5300 }
5301 neon_store_reg64(cpu_V0, rd);
5302 if (q) {
5303 neon_store_reg64(cpu_V1, rd + 1);
5304 }
5305 } else if ((insn & (1 << 11)) == 0) {
5306 /* Two register misc. */
5307 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5308 size = (insn >> 18) & 3;
5309 switch (op) {
5310 case 0: /* VREV64 */
5311 if (size == 3)
5312 return 1;
5313 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5314 tmp = neon_load_reg(rm, pass * 2);
5315 tmp2 = neon_load_reg(rm, pass * 2 + 1);
5316 switch (size) {
5317 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5318 case 1: gen_swap_half(tmp); break;
5319 case 2: /* no-op */ break;
5320 default: abort();
5321 }
5322 neon_store_reg(rd, pass * 2 + 1, tmp);
5323 if (size == 2) {
5324 neon_store_reg(rd, pass * 2, tmp2);
5325 } else {
5326 switch (size) {
5327 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
5328 case 1: gen_swap_half(tmp2); break;
5329 default: abort();
5330 }
5331 neon_store_reg(rd, pass * 2, tmp2);
5332 }
5333 }
5334 break;
5335 case 4: case 5: /* VPADDL */
5336 case 12: case 13: /* VPADAL */
5337 if (size == 3)
5338 return 1;
5339 for (pass = 0; pass < q + 1; pass++) {
5340 tmp = neon_load_reg(rm, pass * 2);
5341 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5342 tmp = neon_load_reg(rm, pass * 2 + 1);
5343 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5344 switch (size) {
5345 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5346 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5347 case 2: tcg_gen_add_i64(CPU_V001); break;
5348 default: abort();
5349 }
5350 if (op >= 12) {
5351 /* Accumulate. */
5352 neon_load_reg64(cpu_V1, rd + pass);
5353 gen_neon_addl(size);
5354 }
5355 neon_store_reg64(cpu_V0, rd + pass);
5356 }
5357 break;
5358 case 33: /* VTRN */
5359 if (size == 2) {
5360 for (n = 0; n < (q ? 4 : 2); n += 2) {
5361 tmp = neon_load_reg(rm, n);
5362 tmp2 = neon_load_reg(rd, n + 1);
5363 neon_store_reg(rm, n, tmp2);
5364 neon_store_reg(rd, n + 1, tmp);
5365 }
5366 } else {
5367 goto elementwise;
5368 }
5369 break;
5370 case 34: /* VUZP */
5371 /* Reg Before After
5372 Rd A3 A2 A1 A0 B2 B0 A2 A0
5373 Rm B3 B2 B1 B0 B3 B1 A3 A1
5374 */
5375 if (size == 3)
5376 return 1;
5377 gen_neon_unzip(rd, q, 0, size);
5378 gen_neon_unzip(rm, q, 4, size);
5379 if (q) {
5380 static int unzip_order_q[8] =
5381 {0, 2, 4, 6, 1, 3, 5, 7};
5382 for (n = 0; n < 8; n++) {
5383 int reg = (n < 4) ? rd : rm;
5384 tmp = neon_load_scratch(unzip_order_q[n]);
5385 neon_store_reg(reg, n % 4, tmp);
5386 }
5387 } else {
5388 static int unzip_order[4] =
5389 {0, 4, 1, 5};
5390 for (n = 0; n < 4; n++) {
5391 int reg = (n < 2) ? rd : rm;
5392 tmp = neon_load_scratch(unzip_order[n]);
5393 neon_store_reg(reg, n % 2, tmp);
5394 }
5395 }
5396 break;
5397 case 35: /* VZIP */
5398 /* Reg Before After
5399 Rd A3 A2 A1 A0 B1 A1 B0 A0
5400 Rm B3 B2 B1 B0 B3 A3 B2 A2
5401 */
5402 if (size == 3)
5403 return 1;
5404 count = (q ? 4 : 2);
5405 for (n = 0; n < count; n++) {
5406 tmp = neon_load_reg(rd, n);
5407 tmp2 = neon_load_reg(rd, n);
5408 switch (size) {
5409 case 0: gen_neon_zip_u8(tmp, tmp2); break;
5410 case 1: gen_neon_zip_u16(tmp, tmp2); break;
5411 case 2: /* no-op */; break;
5412 default: abort();
5413 }
5414 neon_store_scratch(n * 2, tmp);
5415 neon_store_scratch(n * 2 + 1, tmp2);
5416 }
5417 for (n = 0; n < count * 2; n++) {
5418 int reg = (n < count) ? rd : rm;
5419 tmp = neon_load_scratch(n);
5420 neon_store_reg(reg, n % count, tmp);
5421 }
5422 break;
5423 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
5424 if (size == 3)
5425 return 1;
5426 TCGV_UNUSED(tmp2);
5427 for (pass = 0; pass < 2; pass++) {
5428 neon_load_reg64(cpu_V0, rm + pass);
5429 tmp = new_tmp();
5430 if (op == 36 && q == 0) {
5431 gen_neon_narrow(size, tmp, cpu_V0);
5432 } else if (q) {
5433 gen_neon_narrow_satu(size, tmp, cpu_V0);
5434 } else {
5435 gen_neon_narrow_sats(size, tmp, cpu_V0);
5436 }
5437 if (pass == 0) {
5438 tmp2 = tmp;
5439 } else {
5440 neon_store_reg(rd, 0, tmp2);
5441 neon_store_reg(rd, 1, tmp);
5442 }
5443 }
5444 break;
5445 case 38: /* VSHLL */
5446 if (q || size == 3)
5447 return 1;
5448 tmp = neon_load_reg(rm, 0);
5449 tmp2 = neon_load_reg(rm, 1);
5450 for (pass = 0; pass < 2; pass++) {
5451 if (pass == 1)
5452 tmp = tmp2;
5453 gen_neon_widen(cpu_V0, tmp, size, 1);
5454 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
5455 neon_store_reg64(cpu_V0, rd + pass);
5456 }
5457 break;
5458 case 44: /* VCVT.F16.F32 */
5459 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5460 return 1;
5461 tmp = new_tmp();
5462 tmp2 = new_tmp();
5463 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
5464 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5465 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
5466 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5467 tcg_gen_shli_i32(tmp2, tmp2, 16);
5468 tcg_gen_or_i32(tmp2, tmp2, tmp);
5469 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
5470 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
5471 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
5472 neon_store_reg(rd, 0, tmp2);
5473 tmp2 = new_tmp();
5474 gen_helper_vfp_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
5475 tcg_gen_shli_i32(tmp2, tmp2, 16);
5476 tcg_gen_or_i32(tmp2, tmp2, tmp);
5477 neon_store_reg(rd, 1, tmp2);
5478 dead_tmp(tmp);
5479 break;
5480 case 46: /* VCVT.F32.F16 */
5481 if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
5482 return 1;
5483 tmp3 = new_tmp();
5484 tmp = neon_load_reg(rm, 0);
5485 tmp2 = neon_load_reg(rm, 1);
5486 tcg_gen_ext16u_i32(tmp3, tmp);
5487 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5488 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
5489 tcg_gen_shri_i32(tmp3, tmp, 16);
5490 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5491 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
5492 dead_tmp(tmp);
5493 tcg_gen_ext16u_i32(tmp3, tmp2);
5494 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5495 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
5496 tcg_gen_shri_i32(tmp3, tmp2, 16);
5497 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
5498 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
5499 dead_tmp(tmp2);
5500 dead_tmp(tmp3);
5501 break;
5502 default:
5503 elementwise:
5504 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5505 if (op == 30 || op == 31 || op >= 58) {
5506 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5507 neon_reg_offset(rm, pass));
5508 TCGV_UNUSED(tmp);
5509 } else {
5510 tmp = neon_load_reg(rm, pass);
5511 }
5512 switch (op) {
5513 case 1: /* VREV32 */
5514 switch (size) {
5515 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
5516 case 1: gen_swap_half(tmp); break;
5517 default: return 1;
5518 }
5519 break;
5520 case 2: /* VREV16 */
5521 if (size != 0)
5522 return 1;
5523 gen_rev16(tmp);
5524 break;
5525 case 8: /* CLS */
5526 switch (size) {
5527 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
5528 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
5529 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
5530 default: return 1;
5531 }
5532 break;
5533 case 9: /* CLZ */
5534 switch (size) {
5535 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
5536 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
5537 case 2: gen_helper_clz(tmp, tmp); break;
5538 default: return 1;
5539 }
5540 break;
5541 case 10: /* CNT */
5542 if (size != 0)
5543 return 1;
5544 gen_helper_neon_cnt_u8(tmp, tmp);
5545 break;
5546 case 11: /* VNOT */
5547 if (size != 0)
5548 return 1;
5549 tcg_gen_not_i32(tmp, tmp);
5550 break;
5551 case 14: /* VQABS */
5552 switch (size) {
5553 case 0: gen_helper_neon_qabs_s8(tmp, cpu_env, tmp); break;
5554 case 1: gen_helper_neon_qabs_s16(tmp, cpu_env, tmp); break;
5555 case 2: gen_helper_neon_qabs_s32(tmp, cpu_env, tmp); break;
5556 default: return 1;
5557 }
5558 break;
5559 case 15: /* VQNEG */
5560 switch (size) {
5561 case 0: gen_helper_neon_qneg_s8(tmp, cpu_env, tmp); break;
5562 case 1: gen_helper_neon_qneg_s16(tmp, cpu_env, tmp); break;
5563 case 2: gen_helper_neon_qneg_s32(tmp, cpu_env, tmp); break;
5564 default: return 1;
5565 }
5566 break;
5567 case 16: case 19: /* VCGT #0, VCLE #0 */
5568 tmp2 = tcg_const_i32(0);
5569 switch(size) {
5570 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
5571 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
5572 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
5573 default: return 1;
5574 }
5575 tcg_temp_free(tmp2);
5576 if (op == 19)
5577 tcg_gen_not_i32(tmp, tmp);
5578 break;
5579 case 17: case 20: /* VCGE #0, VCLT #0 */
5580 tmp2 = tcg_const_i32(0);
5581 switch(size) {
5582 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
5583 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
5584 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
5585 default: return 1;
5586 }
5587 tcg_temp_free(tmp2);
5588 if (op == 20)
5589 tcg_gen_not_i32(tmp, tmp);
5590 break;
5591 case 18: /* VCEQ #0 */
5592 tmp2 = tcg_const_i32(0);
5593 switch(size) {
5594 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5595 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5596 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
5597 default: return 1;
5598 }
5599 tcg_temp_free(tmp2);
5600 break;
5601 case 22: /* VABS */
5602 switch(size) {
5603 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
5604 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
5605 case 2: tcg_gen_abs_i32(tmp, tmp); break;
5606 default: return 1;
5607 }
5608 break;
5609 case 23: /* VNEG */
5610 if (size == 3)
5611 return 1;
5612 tmp2 = tcg_const_i32(0);
5613 gen_neon_rsb(size, tmp, tmp2);
5614 tcg_temp_free(tmp2);
5615 break;
5616 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5617 tmp2 = tcg_const_i32(0);
5618 gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
5619 tcg_temp_free(tmp2);
5620 if (op == 27)
5621 tcg_gen_not_i32(tmp, tmp);
5622 break;
5623 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5624 tmp2 = tcg_const_i32(0);
5625 gen_helper_neon_cge_f32(tmp, tmp, tmp2);
5626 tcg_temp_free(tmp2);
5627 if (op == 28)
5628 tcg_gen_not_i32(tmp, tmp);
5629 break;
5630 case 26: /* Float VCEQ #0 */
5631 tmp2 = tcg_const_i32(0);
5632 gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
5633 tcg_temp_free(tmp2);
5634 break;
5635 case 30: /* Float VABS */
5636 gen_vfp_abs(0);
5637 break;
5638 case 31: /* Float VNEG */
5639 gen_vfp_neg(0);
5640 break;
5641 case 32: /* VSWP */
5642 tmp2 = neon_load_reg(rd, pass);
5643 neon_store_reg(rm, pass, tmp2);
5644 break;
5645 case 33: /* VTRN */
5646 tmp2 = neon_load_reg(rd, pass);
5647 switch (size) {
5648 case 0: gen_neon_trn_u8(tmp, tmp2); break;
5649 case 1: gen_neon_trn_u16(tmp, tmp2); break;
5650 case 2: abort();
5651 default: return 1;
5652 }
5653 neon_store_reg(rm, pass, tmp2);
5654 break;
5655 case 56: /* Integer VRECPE */
5656 gen_helper_recpe_u32(tmp, tmp, cpu_env);
5657 break;
5658 case 57: /* Integer VRSQRTE */
5659 gen_helper_rsqrte_u32(tmp, tmp, cpu_env);
5660 break;
5661 case 58: /* Float VRECPE */
5662 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
5663 break;
5664 case 59: /* Float VRSQRTE */
5665 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
5666 break;
5667 case 60: /* VCVT.F32.S32 */
5668 gen_vfp_sito(0);
5669 break;
5670 case 61: /* VCVT.F32.U32 */
5671 gen_vfp_uito(0);
5672 break;
5673 case 62: /* VCVT.S32.F32 */
5674 gen_vfp_tosiz(0);
5675 break;
5676 case 63: /* VCVT.U32.F32 */
5677 gen_vfp_touiz(0);
5678 break;
5679 default:
5680 /* Reserved: 21, 29, 39-56 */
5681 return 1;
5682 }
5683 if (op == 30 || op == 31 || op >= 58) {
5684 tcg_gen_st_f32(cpu_F0s, cpu_env,
5685 neon_reg_offset(rd, pass));
5686 } else {
5687 neon_store_reg(rd, pass, tmp);
5688 }
5689 }
5690 break;
5691 }
5692 } else if ((insn & (1 << 10)) == 0) {
5693 /* VTBL, VTBX. */
5694 n = ((insn >> 5) & 0x18) + 8;
5695 if (insn & (1 << 6)) {
5696 tmp = neon_load_reg(rd, 0);
5697 } else {
5698 tmp = new_tmp();
5699 tcg_gen_movi_i32(tmp, 0);
5700 }
5701 tmp2 = neon_load_reg(rm, 0);
5702 tmp4 = tcg_const_i32(rn);
5703 tmp5 = tcg_const_i32(n);
5704 gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
5705 dead_tmp(tmp);
5706 if (insn & (1 << 6)) {
5707 tmp = neon_load_reg(rd, 1);
5708 } else {
5709 tmp = new_tmp();
5710 tcg_gen_movi_i32(tmp, 0);
5711 }
5712 tmp3 = neon_load_reg(rm, 1);
5713 gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
5714 tcg_temp_free_i32(tmp5);
5715 tcg_temp_free_i32(tmp4);
5716 neon_store_reg(rd, 0, tmp2);
5717 neon_store_reg(rd, 1, tmp3);
5718 dead_tmp(tmp);
5719 } else if ((insn & 0x380) == 0) {
5720 /* VDUP */
5721 if (insn & (1 << 19)) {
5722 tmp = neon_load_reg(rm, 1);
5723 } else {
5724 tmp = neon_load_reg(rm, 0);
5725 }
5726 if (insn & (1 << 16)) {
5727 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
5728 } else if (insn & (1 << 17)) {
5729 if ((insn >> 18) & 1)
5730 gen_neon_dup_high16(tmp);
5731 else
5732 gen_neon_dup_low16(tmp);
5733 }
5734 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5735 tmp2 = new_tmp();
5736 tcg_gen_mov_i32(tmp2, tmp);
5737 neon_store_reg(rd, pass, tmp2);
5738 }
5739 dead_tmp(tmp);
5740 } else {
5741 return 1;
5742 }
5743 }
5744 }
5745 return 0;
5746 }
5747
5748 static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
5749 {
5750 int crn = (insn >> 16) & 0xf;
5751 int crm = insn & 0xf;
5752 int op1 = (insn >> 21) & 7;
5753 int op2 = (insn >> 5) & 7;
5754 int rt = (insn >> 12) & 0xf;
5755 TCGv tmp;
5756
5757 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5758 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5759 /* TEECR */
5760 if (IS_USER(s))
5761 return 1;
5762 tmp = load_cpu_field(teecr);
5763 store_reg(s, rt, tmp);
5764 return 0;
5765 }
5766 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5767 /* TEEHBR */
5768 if (IS_USER(s) && (env->teecr & 1))
5769 return 1;
5770 tmp = load_cpu_field(teehbr);
5771 store_reg(s, rt, tmp);
5772 return 0;
5773 }
5774 }
5775 fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
5776 op1, crn, crm, op2);
5777 return 1;
5778 }
5779
5780 static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
5781 {
5782 int crn = (insn >> 16) & 0xf;
5783 int crm = insn & 0xf;
5784 int op1 = (insn >> 21) & 7;
5785 int op2 = (insn >> 5) & 7;
5786 int rt = (insn >> 12) & 0xf;
5787 TCGv tmp;
5788
5789 if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
5790 if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
5791 /* TEECR */
5792 if (IS_USER(s))
5793 return 1;
5794 tmp = load_reg(s, rt);
5795 gen_helper_set_teecr(cpu_env, tmp);
5796 dead_tmp(tmp);
5797 return 0;
5798 }
5799 if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
5800 /* TEEHBR */
5801 if (IS_USER(s) && (env->teecr & 1))
5802 return 1;
5803 tmp = load_reg(s, rt);
5804 store_cpu_field(tmp, teehbr);
5805 return 0;
5806 }
5807 }
5808 fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
5809 op1, crn, crm, op2);
5810 return 1;
5811 }
5812
5813 static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5814 {
5815 int cpnum;
5816
5817 cpnum = (insn >> 8) & 0xf;
5818 if (arm_feature(env, ARM_FEATURE_XSCALE)
5819 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5820 return 1;
5821
5822 switch (cpnum) {
5823 case 0:
5824 case 1:
5825 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5826 return disas_iwmmxt_insn(env, s, insn);
5827 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5828 return disas_dsp_insn(env, s, insn);
5829 }
5830 return 1;
5831 case 10:
5832 case 11:
5833 return disas_vfp_insn (env, s, insn);
5834 case 14:
5835 /* Coprocessors 7-15 are architecturally reserved by ARM.
5836 Unfortunately Intel decided to ignore this. */
5837 if (arm_feature(env, ARM_FEATURE_XSCALE))
5838 goto board;
5839 if (insn & (1 << 20))
5840 return disas_cp14_read(env, s, insn);
5841 else
5842 return disas_cp14_write(env, s, insn);
5843 case 15:
5844 return disas_cp15_insn (env, s, insn);
5845 default:
5846 board:
5847 /* Unknown coprocessor. See if the board has hooked it. */
5848 return disas_cp_insn (env, s, insn);
5849 }
5850 }
5851
5852
5853 /* Store a 64-bit value to a register pair. Clobbers val. */
5854 static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5855 {
5856 TCGv tmp;
5857 tmp = new_tmp();
5858 tcg_gen_trunc_i64_i32(tmp, val);
5859 store_reg(s, rlow, tmp);
5860 tmp = new_tmp();
5861 tcg_gen_shri_i64(val, val, 32);
5862 tcg_gen_trunc_i64_i32(tmp, val);
5863 store_reg(s, rhigh, tmp);
5864 }
5865
5866 /* load a 32-bit value from a register and perform a 64-bit accumulate. */
5867 static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5868 {
5869 TCGv_i64 tmp;
5870 TCGv tmp2;
5871
5872 /* Load value and extend to 64 bits. */
5873 tmp = tcg_temp_new_i64();
5874 tmp2 = load_reg(s, rlow);
5875 tcg_gen_extu_i32_i64(tmp, tmp2);
5876 dead_tmp(tmp2);
5877 tcg_gen_add_i64(val, val, tmp);
5878 tcg_temp_free_i64(tmp);
5879 }
5880
5881 /* load and add a 64-bit value from a register pair. */
5882 static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5883 {
5884 TCGv_i64 tmp;
5885 TCGv tmpl;
5886 TCGv tmph;
5887
5888 /* Load 64-bit value rd:rn. */
5889 tmpl = load_reg(s, rlow);
5890 tmph = load_reg(s, rhigh);
5891 tmp = tcg_temp_new_i64();
5892 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5893 dead_tmp(tmpl);
5894 dead_tmp(tmph);
5895 tcg_gen_add_i64(val, val, tmp);
5896 tcg_temp_free_i64(tmp);
5897 }
5898
5899 /* Set N and Z flags from a 64-bit value. */
5900 static void gen_logicq_cc(TCGv_i64 val)
5901 {
5902 TCGv tmp = new_tmp();
5903 gen_helper_logicq_cc(tmp, val);
5904 gen_logic_CC(tmp);
5905 dead_tmp(tmp);
5906 }
5907
5908 /* Load/Store exclusive instructions are implemented by remembering
5909 the value/address loaded, and seeing if these are the same
5910 when the store is performed. This should be is sufficient to implement
5911 the architecturally mandated semantics, and avoids having to monitor
5912 regular stores.
5913
5914 In system emulation mode only one CPU will be running at once, so
5915 this sequence is effectively atomic. In user emulation mode we
5916 throw an exception and handle the atomic operation elsewhere. */
5917 static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
5918 TCGv addr, int size)
5919 {
5920 TCGv tmp;
5921
5922 switch (size) {
5923 case 0:
5924 tmp = gen_ld8u(addr, IS_USER(s));
5925 break;
5926 case 1:
5927 tmp = gen_ld16u(addr, IS_USER(s));
5928 break;
5929 case 2:
5930 case 3:
5931 tmp = gen_ld32(addr, IS_USER(s));
5932 break;
5933 default:
5934 abort();
5935 }
5936 tcg_gen_mov_i32(cpu_exclusive_val, tmp);
5937 store_reg(s, rt, tmp);
5938 if (size == 3) {
5939 TCGv tmp2 = new_tmp();
5940 tcg_gen_addi_i32(tmp2, addr, 4);
5941 tmp = gen_ld32(tmp2, IS_USER(s));
5942 dead_tmp(tmp2);
5943 tcg_gen_mov_i32(cpu_exclusive_high, tmp);
5944 store_reg(s, rt2, tmp);
5945 }
5946 tcg_gen_mov_i32(cpu_exclusive_addr, addr);
5947 }
5948
5949 static void gen_clrex(DisasContext *s)
5950 {
5951 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
5952 }
5953
5954 #ifdef CONFIG_USER_ONLY
5955 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5956 TCGv addr, int size)
5957 {
5958 tcg_gen_mov_i32(cpu_exclusive_test, addr);
5959 tcg_gen_movi_i32(cpu_exclusive_info,
5960 size | (rd << 4) | (rt << 8) | (rt2 << 12));
5961 gen_set_condexec(s);
5962 gen_set_pc_im(s->pc - 4);
5963 gen_exception(EXCP_STREX);
5964 s->is_jmp = DISAS_JUMP;
5965 }
5966 #else
5967 static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
5968 TCGv addr, int size)
5969 {
5970 TCGv tmp;
5971 int done_label;
5972 int fail_label;
5973
5974 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
5975 [addr] = {Rt};
5976 {Rd} = 0;
5977 } else {
5978 {Rd} = 1;
5979 } */
5980 fail_label = gen_new_label();
5981 done_label = gen_new_label();
5982 tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
5983 switch (size) {
5984 case 0:
5985 tmp = gen_ld8u(addr, IS_USER(s));
5986 break;
5987 case 1:
5988 tmp = gen_ld16u(addr, IS_USER(s));
5989 break;
5990 case 2:
5991 case 3:
5992 tmp = gen_ld32(addr, IS_USER(s));
5993 break;
5994 default:
5995 abort();
5996 }
5997 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
5998 dead_tmp(tmp);
5999 if (size == 3) {
6000 TCGv tmp2 = new_tmp();
6001 tcg_gen_addi_i32(tmp2, addr, 4);
6002 tmp = gen_ld32(tmp2, IS_USER(s));
6003 dead_tmp(tmp2);
6004 tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
6005 dead_tmp(tmp);
6006 }
6007 tmp = load_reg(s, rt);
6008 switch (size) {
6009 case 0:
6010 gen_st8(tmp, addr, IS_USER(s));
6011 break;
6012 case 1:
6013 gen_st16(tmp, addr, IS_USER(s));
6014 break;
6015 case 2:
6016 case 3:
6017 gen_st32(tmp, addr, IS_USER(s));
6018 break;
6019 default:
6020 abort();
6021 }
6022 if (size == 3) {
6023 tcg_gen_addi_i32(addr, addr, 4);
6024 tmp = load_reg(s, rt2);
6025 gen_st32(tmp, addr, IS_USER(s));
6026 }
6027 tcg_gen_movi_i32(cpu_R[rd], 0);
6028 tcg_gen_br(done_label);
6029 gen_set_label(fail_label);
6030 tcg_gen_movi_i32(cpu_R[rd], 1);
6031 gen_set_label(done_label);
6032 tcg_gen_movi_i32(cpu_exclusive_addr, -1);
6033 }
6034 #endif
6035
6036 static void disas_arm_insn(CPUState * env, DisasContext *s)
6037 {
6038 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6039 TCGv tmp;
6040 TCGv tmp2;
6041 TCGv tmp3;
6042 TCGv addr;
6043 TCGv_i64 tmp64;
6044
6045 insn = ldl_code(s->pc);
6046 s->pc += 4;
6047
6048 /* M variants do not implement ARM mode. */
6049 if (IS_M(env))
6050 goto illegal_op;
6051 cond = insn >> 28;
6052 if (cond == 0xf){
6053 /* Unconditional instructions. */
6054 if (((insn >> 25) & 7) == 1) {
6055 /* NEON Data processing. */
6056 if (!arm_feature(env, ARM_FEATURE_NEON))
6057 goto illegal_op;
6058
6059 if (disas_neon_data_insn(env, s, insn))
6060 goto illegal_op;
6061 return;
6062 }
6063 if ((insn & 0x0f100000) == 0x04000000) {
6064 /* NEON load/store. */
6065 if (!arm_feature(env, ARM_FEATURE_NEON))
6066 goto illegal_op;
6067
6068 if (disas_neon_ls_insn(env, s, insn))
6069 goto illegal_op;
6070 return;
6071 }
6072 if ((insn & 0x0d70f000) == 0x0550f000)
6073 return; /* PLD */
6074 else if ((insn & 0x0ffffdff) == 0x01010000) {
6075 ARCH(6);
6076 /* setend */
6077 if (insn & (1 << 9)) {
6078 /* BE8 mode not implemented. */
6079 goto illegal_op;
6080 }
6081 return;
6082 } else if ((insn & 0x0fffff00) == 0x057ff000) {
6083 switch ((insn >> 4) & 0xf) {
6084 case 1: /* clrex */
6085 ARCH(6K);
6086 gen_clrex(s);
6087 return;
6088 case 4: /* dsb */
6089 case 5: /* dmb */
6090 case 6: /* isb */
6091 ARCH(7);
6092 /* We don't emulate caches so these are a no-op. */
6093 return;
6094 default:
6095 goto illegal_op;
6096 }
6097 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
6098 /* srs */
6099 int32_t offset;
6100 if (IS_USER(s))
6101 goto illegal_op;
6102 ARCH(6);
6103 op1 = (insn & 0x1f);
6104 if (op1 == (env->uncached_cpsr & CPSR_M)) {
6105 addr = load_reg(s, 13);
6106 } else {
6107 addr = new_tmp();
6108 tmp = tcg_const_i32(op1);
6109 gen_helper_get_r13_banked(addr, cpu_env, tmp);
6110 tcg_temp_free_i32(tmp);
6111 }
6112 i = (insn >> 23) & 3;
6113 switch (i) {
6114 case 0: offset = -4; break; /* DA */
6115 case 1: offset = 0; break; /* IA */
6116 case 2: offset = -8; break; /* DB */
6117 case 3: offset = 4; break; /* IB */
6118 default: abort();
6119 }
6120 if (offset)
6121 tcg_gen_addi_i32(addr, addr, offset);
6122 tmp = load_reg(s, 14);
6123 gen_st32(tmp, addr, 0);
6124 tmp = load_cpu_field(spsr);
6125 tcg_gen_addi_i32(addr, addr, 4);
6126 gen_st32(tmp, addr, 0);
6127 if (insn & (1 << 21)) {
6128 /* Base writeback. */
6129 switch (i) {
6130 case 0: offset = -8; break;
6131 case 1: offset = 4; break;
6132 case 2: offset = -4; break;
6133 case 3: offset = 0; break;
6134 default: abort();
6135 }
6136 if (offset)
6137 tcg_gen_addi_i32(addr, addr, offset);
6138 if (op1 == (env->uncached_cpsr & CPSR_M)) {
6139 store_reg(s, 13, addr);
6140 } else {
6141 tmp = tcg_const_i32(op1);
6142 gen_helper_set_r13_banked(cpu_env, tmp, addr);
6143 tcg_temp_free_i32(tmp);
6144 dead_tmp(addr);
6145 }
6146 } else {
6147 dead_tmp(addr);
6148 }
6149 return;
6150 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
6151 /* rfe */
6152 int32_t offset;
6153 if (IS_USER(s))
6154 goto illegal_op;
6155 ARCH(6);
6156 rn = (insn >> 16) & 0xf;
6157 addr = load_reg(s, rn);
6158 i = (insn >> 23) & 3;
6159 switch (i) {
6160 case 0: offset = -4; break; /* DA */
6161 case 1: offset = 0; break; /* IA */
6162 case 2: offset = -8; break; /* DB */
6163 case 3: offset = 4; break; /* IB */
6164 default: abort();
6165 }
6166 if (offset)
6167 tcg_gen_addi_i32(addr, addr, offset);
6168 /* Load PC into tmp and CPSR into tmp2. */
6169 tmp = gen_ld32(addr, 0);
6170 tcg_gen_addi_i32(addr, addr, 4);
6171 tmp2 = gen_ld32(addr, 0);
6172 if (insn & (1 << 21)) {
6173 /* Base writeback. */
6174 switch (i) {
6175 case 0: offset = -8; break;
6176 case 1: offset = 4; break;
6177 case 2: offset = -4; break;
6178 case 3: offset = 0; break;
6179 default: abort();
6180 }
6181 if (offset)
6182 tcg_gen_addi_i32(addr, addr, offset);
6183 store_reg(s, rn, addr);
6184 } else {
6185 dead_tmp(addr);
6186 }
6187 gen_rfe(s, tmp, tmp2);
6188 return;
6189 } else if ((insn & 0x0e000000) == 0x0a000000) {
6190 /* branch link and change to thumb (blx <offset>) */
6191 int32_t offset;
6192
6193 val = (uint32_t)s->pc;
6194 tmp = new_tmp();
6195 tcg_gen_movi_i32(tmp, val);
6196 store_reg(s, 14, tmp);
6197 /* Sign-extend the 24-bit offset */
6198 offset = (((int32_t)insn) << 8) >> 8;
6199 /* offset * 4 + bit24 * 2 + (thumb bit) */
6200 val += (offset << 2) | ((insn >> 23) & 2) | 1;
6201 /* pipeline offset */
6202 val += 4;
6203 gen_bx_im(s, val);
6204 return;
6205 } else if ((insn & 0x0e000f00) == 0x0c000100) {
6206 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
6207 /* iWMMXt register transfer. */
6208 if (env->cp15.c15_cpar & (1 << 1))
6209 if (!disas_iwmmxt_insn(env, s, insn))
6210 return;
6211 }
6212 } else if ((insn & 0x0fe00000) == 0x0c400000) {
6213 /* Coprocessor double register transfer. */
6214 } else if ((insn & 0x0f000010) == 0x0e000010) {
6215 /* Additional coprocessor register transfer. */
6216 } else if ((insn & 0x0ff10020) == 0x01000000) {
6217 uint32_t mask;
6218 uint32_t val;
6219 /* cps (privileged) */
6220 if (IS_USER(s))
6221 return;
6222 mask = val = 0;
6223 if (insn & (1 << 19)) {
6224 if (insn & (1 << 8))
6225 mask |= CPSR_A;
6226 if (insn & (1 << 7))
6227 mask |= CPSR_I;
6228 if (insn & (1 << 6))
6229 mask |= CPSR_F;
6230 if (insn & (1 << 18))
6231 val |= mask;
6232 }
6233 if (insn & (1 << 17)) {
6234 mask |= CPSR_M;
6235 val |= (insn & 0x1f);
6236 }
6237 if (mask) {
6238 gen_set_psr_im(s, mask, 0, val);
6239 }
6240 return;
6241 }
6242 goto illegal_op;
6243 }
6244 if (cond != 0xe) {
6245 /* if not always execute, we generate a conditional jump to
6246 next instruction */
6247 s->condlabel = gen_new_label();
6248 gen_test_cc(cond ^ 1, s->condlabel);
6249 s->condjmp = 1;
6250 }
6251 if ((insn & 0x0f900000) == 0x03000000) {
6252 if ((insn & (1 << 21)) == 0) {
6253 ARCH(6T2);
6254 rd = (insn >> 12) & 0xf;
6255 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
6256 if ((insn & (1 << 22)) == 0) {
6257 /* MOVW */
6258 tmp = new_tmp();
6259 tcg_gen_movi_i32(tmp, val);
6260 } else {
6261 /* MOVT */
6262 tmp = load_reg(s, rd);
6263 tcg_gen_ext16u_i32(tmp, tmp);
6264 tcg_gen_ori_i32(tmp, tmp, val << 16);
6265 }
6266 store_reg(s, rd, tmp);
6267 } else {
6268 if (((insn >> 12) & 0xf) != 0xf)
6269 goto illegal_op;
6270 if (((insn >> 16) & 0xf) == 0) {
6271 gen_nop_hint(s, insn & 0xff);
6272 } else {
6273 /* CPSR = immediate */
6274 val = insn & 0xff;
6275 shift = ((insn >> 8) & 0xf) * 2;
6276 if (shift)
6277 val = (val >> shift) | (val << (32 - shift));
6278 i = ((insn & (1 << 22)) != 0);
6279 if (gen_set_psr_im(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, val))
6280 goto illegal_op;
6281 }
6282 }
6283 } else if ((insn & 0x0f900000) == 0x01000000
6284 && (insn & 0x00000090) != 0x00000090) {
6285 /* miscellaneous instructions */
6286 op1 = (insn >> 21) & 3;
6287 sh = (insn >> 4) & 0xf;
6288 rm = insn & 0xf;
6289 switch (sh) {
6290 case 0x0: /* move program status register */
6291 if (op1 & 1) {
6292 /* PSR = reg */
6293 tmp = load_reg(s, rm);
6294 i = ((op1 & 2) != 0);
6295 if (gen_set_psr(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i, tmp))
6296 goto illegal_op;
6297 } else {
6298 /* reg = PSR */
6299 rd = (insn >> 12) & 0xf;
6300 if (op1 & 2) {
6301 if (IS_USER(s))
6302 goto illegal_op;
6303 tmp = load_cpu_field(spsr);
6304 } else {
6305 tmp = new_tmp();
6306 gen_helper_cpsr_read(tmp);
6307 }
6308 store_reg(s, rd, tmp);
6309 }
6310 break;
6311 case 0x1:
6312 if (op1 == 1) {
6313 /* branch/exchange thumb (bx). */
6314 tmp = load_reg(s, rm);
6315 gen_bx(s, tmp);
6316 } else if (op1 == 3) {
6317 /* clz */
6318 rd = (insn >> 12) & 0xf;
6319 tmp = load_reg(s, rm);
6320 gen_helper_clz(tmp, tmp);
6321 store_reg(s, rd, tmp);
6322 } else {
6323 goto illegal_op;
6324 }
6325 break;
6326 case 0x2:
6327 if (op1 == 1) {
6328 ARCH(5J); /* bxj */
6329 /* Trivial implementation equivalent to bx. */
6330 tmp = load_reg(s, rm);
6331 gen_bx(s, tmp);
6332 } else {
6333 goto illegal_op;
6334 }
6335 break;
6336 case 0x3:
6337 if (op1 != 1)
6338 goto illegal_op;
6339
6340 /* branch link/exchange thumb (blx) */
6341 tmp = load_reg(s, rm);
6342 tmp2 = new_tmp();
6343 tcg_gen_movi_i32(tmp2, s->pc);
6344 store_reg(s, 14, tmp2);
6345 gen_bx(s, tmp);
6346 break;
6347 case 0x5: /* saturating add/subtract */
6348 rd = (insn >> 12) & 0xf;
6349 rn = (insn >> 16) & 0xf;
6350 tmp = load_reg(s, rm);
6351 tmp2 = load_reg(s, rn);
6352 if (op1 & 2)
6353 gen_helper_double_saturate(tmp2, tmp2);
6354 if (op1 & 1)
6355 gen_helper_sub_saturate(tmp, tmp, tmp2);
6356 else
6357 gen_helper_add_saturate(tmp, tmp, tmp2);
6358 dead_tmp(tmp2);
6359 store_reg(s, rd, tmp);
6360 break;
6361 case 7:
6362 /* SMC instruction (op1 == 3)
6363 and undefined instructions (op1 == 0 || op1 == 2)
6364 will trap */
6365 if (op1 != 1) {
6366 goto illegal_op;
6367 }
6368 /* bkpt */
6369 gen_set_condexec(s);
6370 gen_set_pc_im(s->pc - 4);
6371 gen_exception(EXCP_BKPT);
6372 s->is_jmp = DISAS_JUMP;
6373 break;
6374 case 0x8: /* signed multiply */
6375 case 0xa:
6376 case 0xc:
6377 case 0xe:
6378 rs = (insn >> 8) & 0xf;
6379 rn = (insn >> 12) & 0xf;
6380 rd = (insn >> 16) & 0xf;
6381 if (op1 == 1) {
6382 /* (32 * 16) >> 16 */
6383 tmp = load_reg(s, rm);
6384 tmp2 = load_reg(s, rs);
6385 if (sh & 4)
6386 tcg_gen_sari_i32(tmp2, tmp2, 16);
6387 else
6388 gen_sxth(tmp2);
6389 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6390 tcg_gen_shri_i64(tmp64, tmp64, 16);
6391 tmp = new_tmp();
6392 tcg_gen_trunc_i64_i32(tmp, tmp64);
6393 tcg_temp_free_i64(tmp64);
6394 if ((sh & 2) == 0) {
6395 tmp2 = load_reg(s, rn);
6396 gen_helper_add_setq(tmp, tmp, tmp2);
6397 dead_tmp(tmp2);
6398 }
6399 store_reg(s, rd, tmp);
6400 } else {
6401 /* 16 * 16 */
6402 tmp = load_reg(s, rm);
6403 tmp2 = load_reg(s, rs);
6404 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6405 dead_tmp(tmp2);
6406 if (op1 == 2) {
6407 tmp64 = tcg_temp_new_i64();
6408 tcg_gen_ext_i32_i64(tmp64, tmp);
6409 dead_tmp(tmp);
6410 gen_addq(s, tmp64, rn, rd);
6411 gen_storeq_reg(s, rn, rd, tmp64);
6412 tcg_temp_free_i64(tmp64);
6413 } else {
6414 if (op1 == 0) {
6415 tmp2 = load_reg(s, rn);
6416 gen_helper_add_setq(tmp, tmp, tmp2);
6417 dead_tmp(tmp2);
6418 }
6419 store_reg(s, rd, tmp);
6420 }
6421 }
6422 break;
6423 default:
6424 goto illegal_op;
6425 }
6426 } else if (((insn & 0x0e000000) == 0 &&
6427 (insn & 0x00000090) != 0x90) ||
6428 ((insn & 0x0e000000) == (1 << 25))) {
6429 int set_cc, logic_cc, shiftop;
6430
6431 op1 = (insn >> 21) & 0xf;
6432 set_cc = (insn >> 20) & 1;
6433 logic_cc = table_logic_cc[op1] & set_cc;
6434
6435 /* data processing instruction */
6436 if (insn & (1 << 25)) {
6437 /* immediate operand */
6438 val = insn & 0xff;
6439 shift = ((insn >> 8) & 0xf) * 2;
6440 if (shift) {
6441 val = (val >> shift) | (val << (32 - shift));
6442 }
6443 tmp2 = new_tmp();
6444 tcg_gen_movi_i32(tmp2, val);
6445 if (logic_cc && shift) {
6446 gen_set_CF_bit31(tmp2);
6447 }
6448 } else {
6449 /* register */
6450 rm = (insn) & 0xf;
6451 tmp2 = load_reg(s, rm);
6452 shiftop = (insn >> 5) & 3;
6453 if (!(insn & (1 << 4))) {
6454 shift = (insn >> 7) & 0x1f;
6455 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
6456 } else {
6457 rs = (insn >> 8) & 0xf;
6458 tmp = load_reg(s, rs);
6459 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
6460 }
6461 }
6462 if (op1 != 0x0f && op1 != 0x0d) {
6463 rn = (insn >> 16) & 0xf;
6464 tmp = load_reg(s, rn);
6465 } else {
6466 TCGV_UNUSED(tmp);
6467 }
6468 rd = (insn >> 12) & 0xf;
6469 switch(op1) {
6470 case 0x00:
6471 tcg_gen_and_i32(tmp, tmp, tmp2);
6472 if (logic_cc) {
6473 gen_logic_CC(tmp);
6474 }
6475 store_reg_bx(env, s, rd, tmp);
6476 break;
6477 case 0x01:
6478 tcg_gen_xor_i32(tmp, tmp, tmp2);
6479 if (logic_cc) {
6480 gen_logic_CC(tmp);
6481 }
6482 store_reg_bx(env, s, rd, tmp);
6483 break;
6484 case 0x02:
6485 if (set_cc && rd == 15) {
6486 /* SUBS r15, ... is used for exception return. */
6487 if (IS_USER(s)) {
6488 goto illegal_op;
6489 }
6490 gen_helper_sub_cc(tmp, tmp, tmp2);
6491 gen_exception_return(s, tmp);
6492 } else {
6493 if (set_cc) {
6494 gen_helper_sub_cc(tmp, tmp, tmp2);
6495 } else {
6496 tcg_gen_sub_i32(tmp, tmp, tmp2);
6497 }
6498 store_reg_bx(env, s, rd, tmp);
6499 }
6500 break;
6501 case 0x03:
6502 if (set_cc) {
6503 gen_helper_sub_cc(tmp, tmp2, tmp);
6504 } else {
6505 tcg_gen_sub_i32(tmp, tmp2, tmp);
6506 }
6507 store_reg_bx(env, s, rd, tmp);
6508 break;
6509 case 0x04:
6510 if (set_cc) {
6511 gen_helper_add_cc(tmp, tmp, tmp2);
6512 } else {
6513 tcg_gen_add_i32(tmp, tmp, tmp2);
6514 }
6515 store_reg_bx(env, s, rd, tmp);
6516 break;
6517 case 0x05:
6518 if (set_cc) {
6519 gen_helper_adc_cc(tmp, tmp, tmp2);
6520 } else {
6521 gen_add_carry(tmp, tmp, tmp2);
6522 }
6523 store_reg_bx(env, s, rd, tmp);
6524 break;
6525 case 0x06:
6526 if (set_cc) {
6527 gen_helper_sbc_cc(tmp, tmp, tmp2);
6528 } else {
6529 gen_sub_carry(tmp, tmp, tmp2);
6530 }
6531 store_reg_bx(env, s, rd, tmp);
6532 break;
6533 case 0x07:
6534 if (set_cc) {
6535 gen_helper_sbc_cc(tmp, tmp2, tmp);
6536 } else {
6537 gen_sub_carry(tmp, tmp2, tmp);
6538 }
6539 store_reg_bx(env, s, rd, tmp);
6540 break;
6541 case 0x08:
6542 if (set_cc) {
6543 tcg_gen_and_i32(tmp, tmp, tmp2);
6544 gen_logic_CC(tmp);
6545 }
6546 dead_tmp(tmp);
6547 break;
6548 case 0x09:
6549 if (set_cc) {
6550 tcg_gen_xor_i32(tmp, tmp, tmp2);
6551 gen_logic_CC(tmp);
6552 }
6553 dead_tmp(tmp);
6554 break;
6555 case 0x0a:
6556 if (set_cc) {
6557 gen_helper_sub_cc(tmp, tmp, tmp2);
6558 }
6559 dead_tmp(tmp);
6560 break;
6561 case 0x0b:
6562 if (set_cc) {
6563 gen_helper_add_cc(tmp, tmp, tmp2);
6564 }
6565 dead_tmp(tmp);
6566 break;
6567 case 0x0c:
6568 tcg_gen_or_i32(tmp, tmp, tmp2);
6569 if (logic_cc) {
6570 gen_logic_CC(tmp);
6571 }
6572 store_reg_bx(env, s, rd, tmp);
6573 break;
6574 case 0x0d:
6575 if (logic_cc && rd == 15) {
6576 /* MOVS r15, ... is used for exception return. */
6577 if (IS_USER(s)) {
6578 goto illegal_op;
6579 }
6580 gen_exception_return(s, tmp2);
6581 } else {
6582 if (logic_cc) {
6583 gen_logic_CC(tmp2);
6584 }
6585 store_reg_bx(env, s, rd, tmp2);
6586 }
6587 break;
6588 case 0x0e:
6589 tcg_gen_andc_i32(tmp, tmp, tmp2);
6590 if (logic_cc) {
6591 gen_logic_CC(tmp);
6592 }
6593 store_reg_bx(env, s, rd, tmp);
6594 break;
6595 default:
6596 case 0x0f:
6597 tcg_gen_not_i32(tmp2, tmp2);
6598 if (logic_cc) {
6599 gen_logic_CC(tmp2);
6600 }
6601 store_reg_bx(env, s, rd, tmp2);
6602 break;
6603 }
6604 if (op1 != 0x0f && op1 != 0x0d) {
6605 dead_tmp(tmp2);
6606 }
6607 } else {
6608 /* other instructions */
6609 op1 = (insn >> 24) & 0xf;
6610 switch(op1) {
6611 case 0x0:
6612 case 0x1:
6613 /* multiplies, extra load/stores */
6614 sh = (insn >> 5) & 3;
6615 if (sh == 0) {
6616 if (op1 == 0x0) {
6617 rd = (insn >> 16) & 0xf;
6618 rn = (insn >> 12) & 0xf;
6619 rs = (insn >> 8) & 0xf;
6620 rm = (insn) & 0xf;
6621 op1 = (insn >> 20) & 0xf;
6622 switch (op1) {
6623 case 0: case 1: case 2: case 3: case 6:
6624 /* 32 bit mul */
6625 tmp = load_reg(s, rs);
6626 tmp2 = load_reg(s, rm);
6627 tcg_gen_mul_i32(tmp, tmp, tmp2);
6628 dead_tmp(tmp2);
6629 if (insn & (1 << 22)) {
6630 /* Subtract (mls) */
6631 ARCH(6T2);
6632 tmp2 = load_reg(s, rn);
6633 tcg_gen_sub_i32(tmp, tmp2, tmp);
6634 dead_tmp(tmp2);
6635 } else if (insn & (1 << 21)) {
6636 /* Add */
6637 tmp2 = load_reg(s, rn);
6638 tcg_gen_add_i32(tmp, tmp, tmp2);
6639 dead_tmp(tmp2);
6640 }
6641 if (insn & (1 << 20))
6642 gen_logic_CC(tmp);
6643 store_reg(s, rd, tmp);
6644 break;
6645 case 4:
6646 /* 64 bit mul double accumulate (UMAAL) */
6647 ARCH(6);
6648 tmp = load_reg(s, rs);
6649 tmp2 = load_reg(s, rm);
6650 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6651 gen_addq_lo(s, tmp64, rn);
6652 gen_addq_lo(s, tmp64, rd);
6653 gen_storeq_reg(s, rn, rd, tmp64);
6654 tcg_temp_free_i64(tmp64);
6655 break;
6656 case 8: case 9: case 10: case 11:
6657 case 12: case 13: case 14: case 15:
6658 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
6659 tmp = load_reg(s, rs);
6660 tmp2 = load_reg(s, rm);
6661 if (insn & (1 << 22)) {
6662 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6663 } else {
6664 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
6665 }
6666 if (insn & (1 << 21)) { /* mult accumulate */
6667 gen_addq(s, tmp64, rn, rd);
6668 }
6669 if (insn & (1 << 20)) {
6670 gen_logicq_cc(tmp64);
6671 }
6672 gen_storeq_reg(s, rn, rd, tmp64);
6673 tcg_temp_free_i64(tmp64);
6674 break;
6675 default:
6676 goto illegal_op;
6677 }
6678 } else {
6679 rn = (insn >> 16) & 0xf;
6680 rd = (insn >> 12) & 0xf;
6681 if (insn & (1 << 23)) {
6682 /* load/store exclusive */
6683 op1 = (insn >> 21) & 0x3;
6684 if (op1)
6685 ARCH(6K);
6686 else
6687 ARCH(6);
6688 addr = tcg_temp_local_new_i32();
6689 load_reg_var(s, addr, rn);
6690 if (insn & (1 << 20)) {
6691 switch (op1) {
6692 case 0: /* ldrex */
6693 gen_load_exclusive(s, rd, 15, addr, 2);
6694 break;
6695 case 1: /* ldrexd */
6696 gen_load_exclusive(s, rd, rd + 1, addr, 3);
6697 break;
6698 case 2: /* ldrexb */
6699 gen_load_exclusive(s, rd, 15, addr, 0);
6700 break;
6701 case 3: /* ldrexh */
6702 gen_load_exclusive(s, rd, 15, addr, 1);
6703 break;
6704 default:
6705 abort();
6706 }
6707 } else {
6708 rm = insn & 0xf;
6709 switch (op1) {
6710 case 0: /* strex */
6711 gen_store_exclusive(s, rd, rm, 15, addr, 2);
6712 break;
6713 case 1: /* strexd */
6714 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
6715 break;
6716 case 2: /* strexb */
6717 gen_store_exclusive(s, rd, rm, 15, addr, 0);
6718 break;
6719 case 3: /* strexh */
6720 gen_store_exclusive(s, rd, rm, 15, addr, 1);
6721 break;
6722 default:
6723 abort();
6724 }
6725 }
6726 tcg_temp_free(addr);
6727 } else {
6728 /* SWP instruction */
6729 rm = (insn) & 0xf;
6730
6731 /* ??? This is not really atomic. However we know
6732 we never have multiple CPUs running in parallel,
6733 so it is good enough. */
6734 addr = load_reg(s, rn);
6735 tmp = load_reg(s, rm);
6736 if (insn & (1 << 22)) {
6737 tmp2 = gen_ld8u(addr, IS_USER(s));
6738 gen_st8(tmp, addr, IS_USER(s));
6739 } else {
6740 tmp2 = gen_ld32(addr, IS_USER(s));
6741 gen_st32(tmp, addr, IS_USER(s));
6742 }
6743 dead_tmp(addr);
6744 store_reg(s, rd, tmp2);
6745 }
6746 }
6747 } else {
6748 int address_offset;
6749 int load;
6750 /* Misc load/store */
6751 rn = (insn >> 16) & 0xf;
6752 rd = (insn >> 12) & 0xf;
6753 addr = load_reg(s, rn);
6754 if (insn & (1 << 24))
6755 gen_add_datah_offset(s, insn, 0, addr);
6756 address_offset = 0;
6757 if (insn & (1 << 20)) {
6758 /* load */
6759 switch(sh) {
6760 case 1:
6761 tmp = gen_ld16u(addr, IS_USER(s));
6762 break;
6763 case 2:
6764 tmp = gen_ld8s(addr, IS_USER(s));
6765 break;
6766 default:
6767 case 3:
6768 tmp = gen_ld16s(addr, IS_USER(s));
6769 break;
6770 }
6771 load = 1;
6772 } else if (sh & 2) {
6773 /* doubleword */
6774 if (sh & 1) {
6775 /* store */
6776 tmp = load_reg(s, rd);
6777 gen_st32(tmp, addr, IS_USER(s));
6778 tcg_gen_addi_i32(addr, addr, 4);
6779 tmp = load_reg(s, rd + 1);
6780 gen_st32(tmp, addr, IS_USER(s));
6781 load = 0;
6782 } else {
6783 /* load */
6784 tmp = gen_ld32(addr, IS_USER(s));
6785 store_reg(s, rd, tmp);
6786 tcg_gen_addi_i32(addr, addr, 4);
6787 tmp = gen_ld32(addr, IS_USER(s));
6788 rd++;
6789 load = 1;
6790 }
6791 address_offset = -4;
6792 } else {
6793 /* store */
6794 tmp = load_reg(s, rd);
6795 gen_st16(tmp, addr, IS_USER(s));
6796 load = 0;
6797 }
6798 /* Perform base writeback before the loaded value to
6799 ensure correct behavior with overlapping index registers.
6800 ldrd with base writeback is is undefined if the
6801 destination and index registers overlap. */
6802 if (!(insn & (1 << 24))) {
6803 gen_add_datah_offset(s, insn, address_offset, addr);
6804 store_reg(s, rn, addr);
6805 } else if (insn & (1 << 21)) {
6806 if (address_offset)
6807 tcg_gen_addi_i32(addr, addr, address_offset);
6808 store_reg(s, rn, addr);
6809 } else {
6810 dead_tmp(addr);
6811 }
6812 if (load) {
6813 /* Complete the load. */
6814 store_reg(s, rd, tmp);
6815 }
6816 }
6817 break;
6818 case 0x4:
6819 case 0x5:
6820 goto do_ldst;
6821 case 0x6:
6822 case 0x7:
6823 if (insn & (1 << 4)) {
6824 ARCH(6);
6825 /* Armv6 Media instructions. */
6826 rm = insn & 0xf;
6827 rn = (insn >> 16) & 0xf;
6828 rd = (insn >> 12) & 0xf;
6829 rs = (insn >> 8) & 0xf;
6830 switch ((insn >> 23) & 3) {
6831 case 0: /* Parallel add/subtract. */
6832 op1 = (insn >> 20) & 7;
6833 tmp = load_reg(s, rn);
6834 tmp2 = load_reg(s, rm);
6835 sh = (insn >> 5) & 7;
6836 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6837 goto illegal_op;
6838 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6839 dead_tmp(tmp2);
6840 store_reg(s, rd, tmp);
6841 break;
6842 case 1:
6843 if ((insn & 0x00700020) == 0) {
6844 /* Halfword pack. */
6845 tmp = load_reg(s, rn);
6846 tmp2 = load_reg(s, rm);
6847 shift = (insn >> 7) & 0x1f;
6848 if (insn & (1 << 6)) {
6849 /* pkhtb */
6850 if (shift == 0)
6851 shift = 31;
6852 tcg_gen_sari_i32(tmp2, tmp2, shift);
6853 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6854 tcg_gen_ext16u_i32(tmp2, tmp2);
6855 } else {
6856 /* pkhbt */
6857 if (shift)
6858 tcg_gen_shli_i32(tmp2, tmp2, shift);
6859 tcg_gen_ext16u_i32(tmp, tmp);
6860 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6861 }
6862 tcg_gen_or_i32(tmp, tmp, tmp2);
6863 dead_tmp(tmp2);
6864 store_reg(s, rd, tmp);
6865 } else if ((insn & 0x00200020) == 0x00200000) {
6866 /* [us]sat */
6867 tmp = load_reg(s, rm);
6868 shift = (insn >> 7) & 0x1f;
6869 if (insn & (1 << 6)) {
6870 if (shift == 0)
6871 shift = 31;
6872 tcg_gen_sari_i32(tmp, tmp, shift);
6873 } else {
6874 tcg_gen_shli_i32(tmp, tmp, shift);
6875 }
6876 sh = (insn >> 16) & 0x1f;
6877 if (sh != 0) {
6878 tmp2 = tcg_const_i32(sh);
6879 if (insn & (1 << 22))
6880 gen_helper_usat(tmp, tmp, tmp2);
6881 else
6882 gen_helper_ssat(tmp, tmp, tmp2);
6883 tcg_temp_free_i32(tmp2);
6884 }
6885 store_reg(s, rd, tmp);
6886 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6887 /* [us]sat16 */
6888 tmp = load_reg(s, rm);
6889 sh = (insn >> 16) & 0x1f;
6890 if (sh != 0) {
6891 tmp2 = tcg_const_i32(sh);
6892 if (insn & (1 << 22))
6893 gen_helper_usat16(tmp, tmp, tmp2);
6894 else
6895 gen_helper_ssat16(tmp, tmp, tmp2);
6896 tcg_temp_free_i32(tmp2);
6897 }
6898 store_reg(s, rd, tmp);
6899 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6900 /* Select bytes. */
6901 tmp = load_reg(s, rn);
6902 tmp2 = load_reg(s, rm);
6903 tmp3 = new_tmp();
6904 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6905 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6906 dead_tmp(tmp3);
6907 dead_tmp(tmp2);
6908 store_reg(s, rd, tmp);
6909 } else if ((insn & 0x000003e0) == 0x00000060) {
6910 tmp = load_reg(s, rm);
6911 shift = (insn >> 10) & 3;
6912 /* ??? In many cases it's not neccessary to do a
6913 rotate, a shift is sufficient. */
6914 if (shift != 0)
6915 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
6916 op1 = (insn >> 20) & 7;
6917 switch (op1) {
6918 case 0: gen_sxtb16(tmp); break;
6919 case 2: gen_sxtb(tmp); break;
6920 case 3: gen_sxth(tmp); break;
6921 case 4: gen_uxtb16(tmp); break;
6922 case 6: gen_uxtb(tmp); break;
6923 case 7: gen_uxth(tmp); break;
6924 default: goto illegal_op;
6925 }
6926 if (rn != 15) {
6927 tmp2 = load_reg(s, rn);
6928 if ((op1 & 3) == 0) {
6929 gen_add16(tmp, tmp2);
6930 } else {
6931 tcg_gen_add_i32(tmp, tmp, tmp2);
6932 dead_tmp(tmp2);
6933 }
6934 }
6935 store_reg(s, rd, tmp);
6936 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6937 /* rev */
6938 tmp = load_reg(s, rm);
6939 if (insn & (1 << 22)) {
6940 if (insn & (1 << 7)) {
6941 gen_revsh(tmp);
6942 } else {
6943 ARCH(6T2);
6944 gen_helper_rbit(tmp, tmp);
6945 }
6946 } else {
6947 if (insn & (1 << 7))
6948 gen_rev16(tmp);
6949 else
6950 tcg_gen_bswap32_i32(tmp, tmp);
6951 }
6952 store_reg(s, rd, tmp);
6953 } else {
6954 goto illegal_op;
6955 }
6956 break;
6957 case 2: /* Multiplies (Type 3). */
6958 tmp = load_reg(s, rm);
6959 tmp2 = load_reg(s, rs);
6960 if (insn & (1 << 20)) {
6961 /* Signed multiply most significant [accumulate].
6962 (SMMUL, SMMLA, SMMLS) */
6963 tmp64 = gen_muls_i64_i32(tmp, tmp2);
6964
6965 if (rd != 15) {
6966 tmp = load_reg(s, rd);
6967 if (insn & (1 << 6)) {
6968 tmp64 = gen_subq_msw(tmp64, tmp);
6969 } else {
6970 tmp64 = gen_addq_msw(tmp64, tmp);
6971 }
6972 }
6973 if (insn & (1 << 5)) {
6974 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6975 }
6976 tcg_gen_shri_i64(tmp64, tmp64, 32);
6977 tmp = new_tmp();
6978 tcg_gen_trunc_i64_i32(tmp, tmp64);
6979 tcg_temp_free_i64(tmp64);
6980 store_reg(s, rn, tmp);
6981 } else {
6982 if (insn & (1 << 5))
6983 gen_swap_half(tmp2);
6984 gen_smul_dual(tmp, tmp2);
6985 /* This addition cannot overflow. */
6986 if (insn & (1 << 6)) {
6987 tcg_gen_sub_i32(tmp, tmp, tmp2);
6988 } else {
6989 tcg_gen_add_i32(tmp, tmp, tmp2);
6990 }
6991 dead_tmp(tmp2);
6992 if (insn & (1 << 22)) {
6993 /* smlald, smlsld */
6994 tmp64 = tcg_temp_new_i64();
6995 tcg_gen_ext_i32_i64(tmp64, tmp);
6996 dead_tmp(tmp);
6997 gen_addq(s, tmp64, rd, rn);
6998 gen_storeq_reg(s, rd, rn, tmp64);
6999 tcg_temp_free_i64(tmp64);
7000 } else {
7001 /* smuad, smusd, smlad, smlsd */
7002 if (rd != 15)
7003 {
7004 tmp2 = load_reg(s, rd);
7005 gen_helper_add_setq(tmp, tmp, tmp2);
7006 dead_tmp(tmp2);
7007 }
7008 store_reg(s, rn, tmp);
7009 }
7010 }
7011 break;
7012 case 3:
7013 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
7014 switch (op1) {
7015 case 0: /* Unsigned sum of absolute differences. */
7016 ARCH(6);
7017 tmp = load_reg(s, rm);
7018 tmp2 = load_reg(s, rs);
7019 gen_helper_usad8(tmp, tmp, tmp2);
7020 dead_tmp(tmp2);
7021 if (rd != 15) {
7022 tmp2 = load_reg(s, rd);
7023 tcg_gen_add_i32(tmp, tmp, tmp2);
7024 dead_tmp(tmp2);
7025 }
7026 store_reg(s, rn, tmp);
7027 break;
7028 case 0x20: case 0x24: case 0x28: case 0x2c:
7029 /* Bitfield insert/clear. */
7030 ARCH(6T2);
7031 shift = (insn >> 7) & 0x1f;
7032 i = (insn >> 16) & 0x1f;
7033 i = i + 1 - shift;
7034 if (rm == 15) {
7035 tmp = new_tmp();
7036 tcg_gen_movi_i32(tmp, 0);
7037 } else {
7038 tmp = load_reg(s, rm);
7039 }
7040 if (i != 32) {
7041 tmp2 = load_reg(s, rd);
7042 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7043 dead_tmp(tmp2);
7044 }
7045 store_reg(s, rd, tmp);
7046 break;
7047 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
7048 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
7049 ARCH(6T2);
7050 tmp = load_reg(s, rm);
7051 shift = (insn >> 7) & 0x1f;
7052 i = ((insn >> 16) & 0x1f) + 1;
7053 if (shift + i > 32)
7054 goto illegal_op;
7055 if (i < 32) {
7056 if (op1 & 0x20) {
7057 gen_ubfx(tmp, shift, (1u << i) - 1);
7058 } else {
7059 gen_sbfx(tmp, shift, i);
7060 }
7061 }
7062 store_reg(s, rd, tmp);
7063 break;
7064 default:
7065 goto illegal_op;
7066 }
7067 break;
7068 }
7069 break;
7070 }
7071 do_ldst:
7072 /* Check for undefined extension instructions
7073 * per the ARM Bible IE:
7074 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
7075 */
7076 sh = (0xf << 20) | (0xf << 4);
7077 if (op1 == 0x7 && ((insn & sh) == sh))
7078 {
7079 goto illegal_op;
7080 }
7081 /* load/store byte/word */
7082 rn = (insn >> 16) & 0xf;
7083 rd = (insn >> 12) & 0xf;
7084 tmp2 = load_reg(s, rn);
7085 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
7086 if (insn & (1 << 24))
7087 gen_add_data_offset(s, insn, tmp2);
7088 if (insn & (1 << 20)) {
7089 /* load */
7090 if (insn & (1 << 22)) {
7091 tmp = gen_ld8u(tmp2, i);
7092 } else {
7093 tmp = gen_ld32(tmp2, i);
7094 }
7095 } else {
7096 /* store */
7097 tmp = load_reg(s, rd);
7098 if (insn & (1 << 22))
7099 gen_st8(tmp, tmp2, i);
7100 else
7101 gen_st32(tmp, tmp2, i);
7102 }
7103 if (!(insn & (1 << 24))) {
7104 gen_add_data_offset(s, insn, tmp2);
7105 store_reg(s, rn, tmp2);
7106 } else if (insn & (1 << 21)) {
7107 store_reg(s, rn, tmp2);
7108 } else {
7109 dead_tmp(tmp2);
7110 }
7111 if (insn & (1 << 20)) {
7112 /* Complete the load. */
7113 if (rd == 15)
7114 gen_bx(s, tmp);
7115 else
7116 store_reg(s, rd, tmp);
7117 }
7118 break;
7119 case 0x08:
7120 case 0x09:
7121 {
7122 int j, n, user, loaded_base;
7123 TCGv loaded_var;
7124 /* load/store multiple words */
7125 /* XXX: store correct base if write back */
7126 user = 0;
7127 if (insn & (1 << 22)) {
7128 if (IS_USER(s))
7129 goto illegal_op; /* only usable in supervisor mode */
7130
7131 if ((insn & (1 << 15)) == 0)
7132 user = 1;
7133 }
7134 rn = (insn >> 16) & 0xf;
7135 addr = load_reg(s, rn);
7136
7137 /* compute total size */
7138 loaded_base = 0;
7139 TCGV_UNUSED(loaded_var);
7140 n = 0;
7141 for(i=0;i<16;i++) {
7142 if (insn & (1 << i))
7143 n++;
7144 }
7145 /* XXX: test invalid n == 0 case ? */
7146 if (insn & (1 << 23)) {
7147 if (insn & (1 << 24)) {
7148 /* pre increment */
7149 tcg_gen_addi_i32(addr, addr, 4);
7150 } else {
7151 /* post increment */
7152 }
7153 } else {
7154 if (insn & (1 << 24)) {
7155 /* pre decrement */
7156 tcg_gen_addi_i32(addr, addr, -(n * 4));
7157 } else {
7158 /* post decrement */
7159 if (n != 1)
7160 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7161 }
7162 }
7163 j = 0;
7164 for(i=0;i<16;i++) {
7165 if (insn & (1 << i)) {
7166 if (insn & (1 << 20)) {
7167 /* load */
7168 tmp = gen_ld32(addr, IS_USER(s));
7169 if (i == 15) {
7170 gen_bx(s, tmp);
7171 } else if (user) {
7172 tmp2 = tcg_const_i32(i);
7173 gen_helper_set_user_reg(tmp2, tmp);
7174 tcg_temp_free_i32(tmp2);
7175 dead_tmp(tmp);
7176 } else if (i == rn) {
7177 loaded_var = tmp;
7178 loaded_base = 1;
7179 } else {
7180 store_reg(s, i, tmp);
7181 }
7182 } else {
7183 /* store */
7184 if (i == 15) {
7185 /* special case: r15 = PC + 8 */
7186 val = (long)s->pc + 4;
7187 tmp = new_tmp();
7188 tcg_gen_movi_i32(tmp, val);
7189 } else if (user) {
7190 tmp = new_tmp();
7191 tmp2 = tcg_const_i32(i);
7192 gen_helper_get_user_reg(tmp, tmp2);
7193 tcg_temp_free_i32(tmp2);
7194 } else {
7195 tmp = load_reg(s, i);
7196 }
7197 gen_st32(tmp, addr, IS_USER(s));
7198 }
7199 j++;
7200 /* no need to add after the last transfer */
7201 if (j != n)
7202 tcg_gen_addi_i32(addr, addr, 4);
7203 }
7204 }
7205 if (insn & (1 << 21)) {
7206 /* write back */
7207 if (insn & (1 << 23)) {
7208 if (insn & (1 << 24)) {
7209 /* pre increment */
7210 } else {
7211 /* post increment */
7212 tcg_gen_addi_i32(addr, addr, 4);
7213 }
7214 } else {
7215 if (insn & (1 << 24)) {
7216 /* pre decrement */
7217 if (n != 1)
7218 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
7219 } else {
7220 /* post decrement */
7221 tcg_gen_addi_i32(addr, addr, -(n * 4));
7222 }
7223 }
7224 store_reg(s, rn, addr);
7225 } else {
7226 dead_tmp(addr);
7227 }
7228 if (loaded_base) {
7229 store_reg(s, rn, loaded_var);
7230 }
7231 if ((insn & (1 << 22)) && !user) {
7232 /* Restore CPSR from SPSR. */
7233 tmp = load_cpu_field(spsr);
7234 gen_set_cpsr(tmp, 0xffffffff);
7235 dead_tmp(tmp);
7236 s->is_jmp = DISAS_UPDATE;
7237 }
7238 }
7239 break;
7240 case 0xa:
7241 case 0xb:
7242 {
7243 int32_t offset;
7244
7245 /* branch (and link) */
7246 val = (int32_t)s->pc;
7247 if (insn & (1 << 24)) {
7248 tmp = new_tmp();
7249 tcg_gen_movi_i32(tmp, val);
7250 store_reg(s, 14, tmp);
7251 }
7252 offset = (((int32_t)insn << 8) >> 8);
7253 val += (offset << 2) + 4;
7254 gen_jmp(s, val);
7255 }
7256 break;
7257 case 0xc:
7258 case 0xd:
7259 case 0xe:
7260 /* Coprocessor. */
7261 if (disas_coproc_insn(env, s, insn))
7262 goto illegal_op;
7263 break;
7264 case 0xf:
7265 /* swi */
7266 gen_set_pc_im(s->pc);
7267 s->is_jmp = DISAS_SWI;
7268 break;
7269 default:
7270 illegal_op:
7271 gen_set_condexec(s);
7272 gen_set_pc_im(s->pc - 4);
7273 gen_exception(EXCP_UDEF);
7274 s->is_jmp = DISAS_JUMP;
7275 break;
7276 }
7277 }
7278 }
7279
7280 /* Return true if this is a Thumb-2 logical op. */
7281 static int
7282 thumb2_logic_op(int op)
7283 {
7284 return (op < 8);
7285 }
7286
7287 /* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
7288 then set condition code flags based on the result of the operation.
7289 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
7290 to the high bit of T1.
7291 Returns zero if the opcode is valid. */
7292
7293 static int
7294 gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
7295 {
7296 int logic_cc;
7297
7298 logic_cc = 0;
7299 switch (op) {
7300 case 0: /* and */
7301 tcg_gen_and_i32(t0, t0, t1);
7302 logic_cc = conds;
7303 break;
7304 case 1: /* bic */
7305 tcg_gen_andc_i32(t0, t0, t1);
7306 logic_cc = conds;
7307 break;
7308 case 2: /* orr */
7309 tcg_gen_or_i32(t0, t0, t1);
7310 logic_cc = conds;
7311 break;
7312 case 3: /* orn */
7313 tcg_gen_not_i32(t1, t1);
7314 tcg_gen_or_i32(t0, t0, t1);
7315 logic_cc = conds;
7316 break;
7317 case 4: /* eor */
7318 tcg_gen_xor_i32(t0, t0, t1);
7319 logic_cc = conds;
7320 break;
7321 case 8: /* add */
7322 if (conds)
7323 gen_helper_add_cc(t0, t0, t1);
7324 else
7325 tcg_gen_add_i32(t0, t0, t1);
7326 break;
7327 case 10: /* adc */
7328 if (conds)
7329 gen_helper_adc_cc(t0, t0, t1);
7330 else
7331 gen_adc(t0, t1);
7332 break;
7333 case 11: /* sbc */
7334 if (conds)
7335 gen_helper_sbc_cc(t0, t0, t1);
7336 else
7337 gen_sub_carry(t0, t0, t1);
7338 break;
7339 case 13: /* sub */
7340 if (conds)
7341 gen_helper_sub_cc(t0, t0, t1);
7342 else
7343 tcg_gen_sub_i32(t0, t0, t1);
7344 break;
7345 case 14: /* rsb */
7346 if (conds)
7347 gen_helper_sub_cc(t0, t1, t0);
7348 else
7349 tcg_gen_sub_i32(t0, t1, t0);
7350 break;
7351 default: /* 5, 6, 7, 9, 12, 15. */
7352 return 1;
7353 }
7354 if (logic_cc) {
7355 gen_logic_CC(t0);
7356 if (shifter_out)
7357 gen_set_CF_bit31(t1);
7358 }
7359 return 0;
7360 }
7361
7362 /* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
7363 is not legal. */
7364 static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
7365 {
7366 uint32_t insn, imm, shift, offset;
7367 uint32_t rd, rn, rm, rs;
7368 TCGv tmp;
7369 TCGv tmp2;
7370 TCGv tmp3;
7371 TCGv addr;
7372 TCGv_i64 tmp64;
7373 int op;
7374 int shiftop;
7375 int conds;
7376 int logic_cc;
7377
7378 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
7379 || arm_feature (env, ARM_FEATURE_M))) {
7380 /* Thumb-1 cores may need to treat bl and blx as a pair of
7381 16-bit instructions to get correct prefetch abort behavior. */
7382 insn = insn_hw1;
7383 if ((insn & (1 << 12)) == 0) {
7384 /* Second half of blx. */
7385 offset = ((insn & 0x7ff) << 1);
7386 tmp = load_reg(s, 14);
7387 tcg_gen_addi_i32(tmp, tmp, offset);
7388 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
7389
7390 tmp2 = new_tmp();
7391 tcg_gen_movi_i32(tmp2, s->pc | 1);
7392 store_reg(s, 14, tmp2);
7393 gen_bx(s, tmp);
7394 return 0;
7395 }
7396 if (insn & (1 << 11)) {
7397 /* Second half of bl. */
7398 offset = ((insn & 0x7ff) << 1) | 1;
7399 tmp = load_reg(s, 14);
7400 tcg_gen_addi_i32(tmp, tmp, offset);
7401
7402 tmp2 = new_tmp();
7403 tcg_gen_movi_i32(tmp2, s->pc | 1);
7404 store_reg(s, 14, tmp2);
7405 gen_bx(s, tmp);
7406 return 0;
7407 }
7408 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
7409 /* Instruction spans a page boundary. Implement it as two
7410 16-bit instructions in case the second half causes an
7411 prefetch abort. */
7412 offset = ((int32_t)insn << 21) >> 9;
7413 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
7414 return 0;
7415 }
7416 /* Fall through to 32-bit decode. */
7417 }
7418
7419 insn = lduw_code(s->pc);
7420 s->pc += 2;
7421 insn |= (uint32_t)insn_hw1 << 16;
7422
7423 if ((insn & 0xf800e800) != 0xf000e800) {
7424 ARCH(6T2);
7425 }
7426
7427 rn = (insn >> 16) & 0xf;
7428 rs = (insn >> 12) & 0xf;
7429 rd = (insn >> 8) & 0xf;
7430 rm = insn & 0xf;
7431 switch ((insn >> 25) & 0xf) {
7432 case 0: case 1: case 2: case 3:
7433 /* 16-bit instructions. Should never happen. */
7434 abort();
7435 case 4:
7436 if (insn & (1 << 22)) {
7437 /* Other load/store, table branch. */
7438 if (insn & 0x01200000) {
7439 /* Load/store doubleword. */
7440 if (rn == 15) {
7441 addr = new_tmp();
7442 tcg_gen_movi_i32(addr, s->pc & ~3);
7443 } else {
7444 addr = load_reg(s, rn);
7445 }
7446 offset = (insn & 0xff) * 4;
7447 if ((insn & (1 << 23)) == 0)
7448 offset = -offset;
7449 if (insn & (1 << 24)) {
7450 tcg_gen_addi_i32(addr, addr, offset);
7451 offset = 0;
7452 }
7453 if (insn & (1 << 20)) {
7454 /* ldrd */
7455 tmp = gen_ld32(addr, IS_USER(s));
7456 store_reg(s, rs, tmp);
7457 tcg_gen_addi_i32(addr, addr, 4);
7458 tmp = gen_ld32(addr, IS_USER(s));
7459 store_reg(s, rd, tmp);
7460 } else {
7461 /* strd */
7462 tmp = load_reg(s, rs);
7463 gen_st32(tmp, addr, IS_USER(s));
7464 tcg_gen_addi_i32(addr, addr, 4);
7465 tmp = load_reg(s, rd);
7466 gen_st32(tmp, addr, IS_USER(s));
7467 }
7468 if (insn & (1 << 21)) {
7469 /* Base writeback. */
7470 if (rn == 15)
7471 goto illegal_op;
7472 tcg_gen_addi_i32(addr, addr, offset - 4);
7473 store_reg(s, rn, addr);
7474 } else {
7475 dead_tmp(addr);
7476 }
7477 } else if ((insn & (1 << 23)) == 0) {
7478 /* Load/store exclusive word. */
7479 addr = tcg_temp_local_new();
7480 load_reg_var(s, addr, rn);
7481 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
7482 if (insn & (1 << 20)) {
7483 gen_load_exclusive(s, rs, 15, addr, 2);
7484 } else {
7485 gen_store_exclusive(s, rd, rs, 15, addr, 2);
7486 }
7487 tcg_temp_free(addr);
7488 } else if ((insn & (1 << 6)) == 0) {
7489 /* Table Branch. */
7490 if (rn == 15) {
7491 addr = new_tmp();
7492 tcg_gen_movi_i32(addr, s->pc);
7493 } else {
7494 addr = load_reg(s, rn);
7495 }
7496 tmp = load_reg(s, rm);
7497 tcg_gen_add_i32(addr, addr, tmp);
7498 if (insn & (1 << 4)) {
7499 /* tbh */
7500 tcg_gen_add_i32(addr, addr, tmp);
7501 dead_tmp(tmp);
7502 tmp = gen_ld16u(addr, IS_USER(s));
7503 } else { /* tbb */
7504 dead_tmp(tmp);
7505 tmp = gen_ld8u(addr, IS_USER(s));
7506 }
7507 dead_tmp(addr);
7508 tcg_gen_shli_i32(tmp, tmp, 1);
7509 tcg_gen_addi_i32(tmp, tmp, s->pc);
7510 store_reg(s, 15, tmp);
7511 } else {
7512 /* Load/store exclusive byte/halfword/doubleword. */
7513 ARCH(7);
7514 op = (insn >> 4) & 0x3;
7515 if (op == 2) {
7516 goto illegal_op;
7517 }
7518 addr = tcg_temp_local_new();
7519 load_reg_var(s, addr, rn);
7520 if (insn & (1 << 20)) {
7521 gen_load_exclusive(s, rs, rd, addr, op);
7522 } else {
7523 gen_store_exclusive(s, rm, rs, rd, addr, op);
7524 }
7525 tcg_temp_free(addr);
7526 }
7527 } else {
7528 /* Load/store multiple, RFE, SRS. */
7529 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7530 /* Not available in user mode. */
7531 if (IS_USER(s))
7532 goto illegal_op;
7533 if (insn & (1 << 20)) {
7534 /* rfe */
7535 addr = load_reg(s, rn);
7536 if ((insn & (1 << 24)) == 0)
7537 tcg_gen_addi_i32(addr, addr, -8);
7538 /* Load PC into tmp and CPSR into tmp2. */
7539 tmp = gen_ld32(addr, 0);
7540 tcg_gen_addi_i32(addr, addr, 4);
7541 tmp2 = gen_ld32(addr, 0);
7542 if (insn & (1 << 21)) {
7543 /* Base writeback. */
7544 if (insn & (1 << 24)) {
7545 tcg_gen_addi_i32(addr, addr, 4);
7546 } else {
7547 tcg_gen_addi_i32(addr, addr, -4);
7548 }
7549 store_reg(s, rn, addr);
7550 } else {
7551 dead_tmp(addr);
7552 }
7553 gen_rfe(s, tmp, tmp2);
7554 } else {
7555 /* srs */
7556 op = (insn & 0x1f);
7557 if (op == (env->uncached_cpsr & CPSR_M)) {
7558 addr = load_reg(s, 13);
7559 } else {
7560 addr = new_tmp();
7561 tmp = tcg_const_i32(op);
7562 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7563 tcg_temp_free_i32(tmp);
7564 }
7565 if ((insn & (1 << 24)) == 0) {
7566 tcg_gen_addi_i32(addr, addr, -8);
7567 }
7568 tmp = load_reg(s, 14);
7569 gen_st32(tmp, addr, 0);
7570 tcg_gen_addi_i32(addr, addr, 4);
7571 tmp = new_tmp();
7572 gen_helper_cpsr_read(tmp);
7573 gen_st32(tmp, addr, 0);
7574 if (insn & (1 << 21)) {
7575 if ((insn & (1 << 24)) == 0) {
7576 tcg_gen_addi_i32(addr, addr, -4);
7577 } else {
7578 tcg_gen_addi_i32(addr, addr, 4);
7579 }
7580 if (op == (env->uncached_cpsr & CPSR_M)) {
7581 store_reg(s, 13, addr);
7582 } else {
7583 tmp = tcg_const_i32(op);
7584 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7585 tcg_temp_free_i32(tmp);
7586 }
7587 } else {
7588 dead_tmp(addr);
7589 }
7590 }
7591 } else {
7592 int i;
7593 /* Load/store multiple. */
7594 addr = load_reg(s, rn);
7595 offset = 0;
7596 for (i = 0; i < 16; i++) {
7597 if (insn & (1 << i))
7598 offset += 4;
7599 }
7600 if (insn & (1 << 24)) {
7601 tcg_gen_addi_i32(addr, addr, -offset);
7602 }
7603
7604 for (i = 0; i < 16; i++) {
7605 if ((insn & (1 << i)) == 0)
7606 continue;
7607 if (insn & (1 << 20)) {
7608 /* Load. */
7609 tmp = gen_ld32(addr, IS_USER(s));
7610 if (i == 15) {
7611 gen_bx(s, tmp);
7612 } else {
7613 store_reg(s, i, tmp);
7614 }
7615 } else {
7616 /* Store. */
7617 tmp = load_reg(s, i);
7618 gen_st32(tmp, addr, IS_USER(s));
7619 }
7620 tcg_gen_addi_i32(addr, addr, 4);
7621 }
7622 if (insn & (1 << 21)) {
7623 /* Base register writeback. */
7624 if (insn & (1 << 24)) {
7625 tcg_gen_addi_i32(addr, addr, -offset);
7626 }
7627 /* Fault if writeback register is in register list. */
7628 if (insn & (1 << rn))
7629 goto illegal_op;
7630 store_reg(s, rn, addr);
7631 } else {
7632 dead_tmp(addr);
7633 }
7634 }
7635 }
7636 break;
7637 case 5:
7638
7639 op = (insn >> 21) & 0xf;
7640 if (op == 6) {
7641 /* Halfword pack. */
7642 tmp = load_reg(s, rn);
7643 tmp2 = load_reg(s, rm);
7644 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
7645 if (insn & (1 << 5)) {
7646 /* pkhtb */
7647 if (shift == 0)
7648 shift = 31;
7649 tcg_gen_sari_i32(tmp2, tmp2, shift);
7650 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
7651 tcg_gen_ext16u_i32(tmp2, tmp2);
7652 } else {
7653 /* pkhbt */
7654 if (shift)
7655 tcg_gen_shli_i32(tmp2, tmp2, shift);
7656 tcg_gen_ext16u_i32(tmp, tmp);
7657 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
7658 }
7659 tcg_gen_or_i32(tmp, tmp, tmp2);
7660 dead_tmp(tmp2);
7661 store_reg(s, rd, tmp);
7662 } else {
7663 /* Data processing register constant shift. */
7664 if (rn == 15) {
7665 tmp = new_tmp();
7666 tcg_gen_movi_i32(tmp, 0);
7667 } else {
7668 tmp = load_reg(s, rn);
7669 }
7670 tmp2 = load_reg(s, rm);
7671
7672 shiftop = (insn >> 4) & 3;
7673 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7674 conds = (insn & (1 << 20)) != 0;
7675 logic_cc = (conds && thumb2_logic_op(op));
7676 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7677 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
7678 goto illegal_op;
7679 dead_tmp(tmp2);
7680 if (rd != 15) {
7681 store_reg(s, rd, tmp);
7682 } else {
7683 dead_tmp(tmp);
7684 }
7685 }
7686 break;
7687 case 13: /* Misc data processing. */
7688 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7689 if (op < 4 && (insn & 0xf000) != 0xf000)
7690 goto illegal_op;
7691 switch (op) {
7692 case 0: /* Register controlled shift. */
7693 tmp = load_reg(s, rn);
7694 tmp2 = load_reg(s, rm);
7695 if ((insn & 0x70) != 0)
7696 goto illegal_op;
7697 op = (insn >> 21) & 3;
7698 logic_cc = (insn & (1 << 20)) != 0;
7699 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7700 if (logic_cc)
7701 gen_logic_CC(tmp);
7702 store_reg_bx(env, s, rd, tmp);
7703 break;
7704 case 1: /* Sign/zero extend. */
7705 tmp = load_reg(s, rm);
7706 shift = (insn >> 4) & 3;
7707 /* ??? In many cases it's not neccessary to do a
7708 rotate, a shift is sufficient. */
7709 if (shift != 0)
7710 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7711 op = (insn >> 20) & 7;
7712 switch (op) {
7713 case 0: gen_sxth(tmp); break;
7714 case 1: gen_uxth(tmp); break;
7715 case 2: gen_sxtb16(tmp); break;
7716 case 3: gen_uxtb16(tmp); break;
7717 case 4: gen_sxtb(tmp); break;
7718 case 5: gen_uxtb(tmp); break;
7719 default: goto illegal_op;
7720 }
7721 if (rn != 15) {
7722 tmp2 = load_reg(s, rn);
7723 if ((op >> 1) == 1) {
7724 gen_add16(tmp, tmp2);
7725 } else {
7726 tcg_gen_add_i32(tmp, tmp, tmp2);
7727 dead_tmp(tmp2);
7728 }
7729 }
7730 store_reg(s, rd, tmp);
7731 break;
7732 case 2: /* SIMD add/subtract. */
7733 op = (insn >> 20) & 7;
7734 shift = (insn >> 4) & 7;
7735 if ((op & 3) == 3 || (shift & 3) == 3)
7736 goto illegal_op;
7737 tmp = load_reg(s, rn);
7738 tmp2 = load_reg(s, rm);
7739 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7740 dead_tmp(tmp2);
7741 store_reg(s, rd, tmp);
7742 break;
7743 case 3: /* Other data processing. */
7744 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7745 if (op < 4) {
7746 /* Saturating add/subtract. */
7747 tmp = load_reg(s, rn);
7748 tmp2 = load_reg(s, rm);
7749 if (op & 1)
7750 gen_helper_double_saturate(tmp, tmp);
7751 if (op & 2)
7752 gen_helper_sub_saturate(tmp, tmp2, tmp);
7753 else
7754 gen_helper_add_saturate(tmp, tmp, tmp2);
7755 dead_tmp(tmp2);
7756 } else {
7757 tmp = load_reg(s, rn);
7758 switch (op) {
7759 case 0x0a: /* rbit */
7760 gen_helper_rbit(tmp, tmp);
7761 break;
7762 case 0x08: /* rev */
7763 tcg_gen_bswap32_i32(tmp, tmp);
7764 break;
7765 case 0x09: /* rev16 */
7766 gen_rev16(tmp);
7767 break;
7768 case 0x0b: /* revsh */
7769 gen_revsh(tmp);
7770 break;
7771 case 0x10: /* sel */
7772 tmp2 = load_reg(s, rm);
7773 tmp3 = new_tmp();
7774 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
7775 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7776 dead_tmp(tmp3);
7777 dead_tmp(tmp2);
7778 break;
7779 case 0x18: /* clz */
7780 gen_helper_clz(tmp, tmp);
7781 break;
7782 default:
7783 goto illegal_op;
7784 }
7785 }
7786 store_reg(s, rd, tmp);
7787 break;
7788 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7789 op = (insn >> 4) & 0xf;
7790 tmp = load_reg(s, rn);
7791 tmp2 = load_reg(s, rm);
7792 switch ((insn >> 20) & 7) {
7793 case 0: /* 32 x 32 -> 32 */
7794 tcg_gen_mul_i32(tmp, tmp, tmp2);
7795 dead_tmp(tmp2);
7796 if (rs != 15) {
7797 tmp2 = load_reg(s, rs);
7798 if (op)
7799 tcg_gen_sub_i32(tmp, tmp2, tmp);
7800 else
7801 tcg_gen_add_i32(tmp, tmp, tmp2);
7802 dead_tmp(tmp2);
7803 }
7804 break;
7805 case 1: /* 16 x 16 -> 32 */
7806 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7807 dead_tmp(tmp2);
7808 if (rs != 15) {
7809 tmp2 = load_reg(s, rs);
7810 gen_helper_add_setq(tmp, tmp, tmp2);
7811 dead_tmp(tmp2);
7812 }
7813 break;
7814 case 2: /* Dual multiply add. */
7815 case 4: /* Dual multiply subtract. */
7816 if (op)
7817 gen_swap_half(tmp2);
7818 gen_smul_dual(tmp, tmp2);
7819 /* This addition cannot overflow. */
7820 if (insn & (1 << 22)) {
7821 tcg_gen_sub_i32(tmp, tmp, tmp2);
7822 } else {
7823 tcg_gen_add_i32(tmp, tmp, tmp2);
7824 }
7825 dead_tmp(tmp2);
7826 if (rs != 15)
7827 {
7828 tmp2 = load_reg(s, rs);
7829 gen_helper_add_setq(tmp, tmp, tmp2);
7830 dead_tmp(tmp2);
7831 }
7832 break;
7833 case 3: /* 32 * 16 -> 32msb */
7834 if (op)
7835 tcg_gen_sari_i32(tmp2, tmp2, 16);
7836 else
7837 gen_sxth(tmp2);
7838 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7839 tcg_gen_shri_i64(tmp64, tmp64, 16);
7840 tmp = new_tmp();
7841 tcg_gen_trunc_i64_i32(tmp, tmp64);
7842 tcg_temp_free_i64(tmp64);
7843 if (rs != 15)
7844 {
7845 tmp2 = load_reg(s, rs);
7846 gen_helper_add_setq(tmp, tmp, tmp2);
7847 dead_tmp(tmp2);
7848 }
7849 break;
7850 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
7851 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7852 if (rs != 15) {
7853 tmp = load_reg(s, rs);
7854 if (insn & (1 << 20)) {
7855 tmp64 = gen_addq_msw(tmp64, tmp);
7856 } else {
7857 tmp64 = gen_subq_msw(tmp64, tmp);
7858 }
7859 }
7860 if (insn & (1 << 4)) {
7861 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
7862 }
7863 tcg_gen_shri_i64(tmp64, tmp64, 32);
7864 tmp = new_tmp();
7865 tcg_gen_trunc_i64_i32(tmp, tmp64);
7866 tcg_temp_free_i64(tmp64);
7867 break;
7868 case 7: /* Unsigned sum of absolute differences. */
7869 gen_helper_usad8(tmp, tmp, tmp2);
7870 dead_tmp(tmp2);
7871 if (rs != 15) {
7872 tmp2 = load_reg(s, rs);
7873 tcg_gen_add_i32(tmp, tmp, tmp2);
7874 dead_tmp(tmp2);
7875 }
7876 break;
7877 }
7878 store_reg(s, rd, tmp);
7879 break;
7880 case 6: case 7: /* 64-bit multiply, Divide. */
7881 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7882 tmp = load_reg(s, rn);
7883 tmp2 = load_reg(s, rm);
7884 if ((op & 0x50) == 0x10) {
7885 /* sdiv, udiv */
7886 if (!arm_feature(env, ARM_FEATURE_DIV))
7887 goto illegal_op;
7888 if (op & 0x20)
7889 gen_helper_udiv(tmp, tmp, tmp2);
7890 else
7891 gen_helper_sdiv(tmp, tmp, tmp2);
7892 dead_tmp(tmp2);
7893 store_reg(s, rd, tmp);
7894 } else if ((op & 0xe) == 0xc) {
7895 /* Dual multiply accumulate long. */
7896 if (op & 1)
7897 gen_swap_half(tmp2);
7898 gen_smul_dual(tmp, tmp2);
7899 if (op & 0x10) {
7900 tcg_gen_sub_i32(tmp, tmp, tmp2);
7901 } else {
7902 tcg_gen_add_i32(tmp, tmp, tmp2);
7903 }
7904 dead_tmp(tmp2);
7905 /* BUGFIX */
7906 tmp64 = tcg_temp_new_i64();
7907 tcg_gen_ext_i32_i64(tmp64, tmp);
7908 dead_tmp(tmp);
7909 gen_addq(s, tmp64, rs, rd);
7910 gen_storeq_reg(s, rs, rd, tmp64);
7911 tcg_temp_free_i64(tmp64);
7912 } else {
7913 if (op & 0x20) {
7914 /* Unsigned 64-bit multiply */
7915 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
7916 } else {
7917 if (op & 8) {
7918 /* smlalxy */
7919 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7920 dead_tmp(tmp2);
7921 tmp64 = tcg_temp_new_i64();
7922 tcg_gen_ext_i32_i64(tmp64, tmp);
7923 dead_tmp(tmp);
7924 } else {
7925 /* Signed 64-bit multiply */
7926 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7927 }
7928 }
7929 if (op & 4) {
7930 /* umaal */
7931 gen_addq_lo(s, tmp64, rs);
7932 gen_addq_lo(s, tmp64, rd);
7933 } else if (op & 0x40) {
7934 /* 64-bit accumulate. */
7935 gen_addq(s, tmp64, rs, rd);
7936 }
7937 gen_storeq_reg(s, rs, rd, tmp64);
7938 tcg_temp_free_i64(tmp64);
7939 }
7940 break;
7941 }
7942 break;
7943 case 6: case 7: case 14: case 15:
7944 /* Coprocessor. */
7945 if (((insn >> 24) & 3) == 3) {
7946 /* Translate into the equivalent ARM encoding. */
7947 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7948 if (disas_neon_data_insn(env, s, insn))
7949 goto illegal_op;
7950 } else {
7951 if (insn & (1 << 28))
7952 goto illegal_op;
7953 if (disas_coproc_insn (env, s, insn))
7954 goto illegal_op;
7955 }
7956 break;
7957 case 8: case 9: case 10: case 11:
7958 if (insn & (1 << 15)) {
7959 /* Branches, misc control. */
7960 if (insn & 0x5000) {
7961 /* Unconditional branch. */
7962 /* signextend(hw1[10:0]) -> offset[:12]. */
7963 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7964 /* hw1[10:0] -> offset[11:1]. */
7965 offset |= (insn & 0x7ff) << 1;
7966 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7967 offset[24:22] already have the same value because of the
7968 sign extension above. */
7969 offset ^= ((~insn) & (1 << 13)) << 10;
7970 offset ^= ((~insn) & (1 << 11)) << 11;
7971
7972 if (insn & (1 << 14)) {
7973 /* Branch and link. */
7974 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
7975 }
7976
7977 offset += s->pc;
7978 if (insn & (1 << 12)) {
7979 /* b/bl */
7980 gen_jmp(s, offset);
7981 } else {
7982 /* blx */
7983 offset &= ~(uint32_t)2;
7984 gen_bx_im(s, offset);
7985 }
7986 } else if (((insn >> 23) & 7) == 7) {
7987 /* Misc control */
7988 if (insn & (1 << 13))
7989 goto illegal_op;
7990
7991 if (insn & (1 << 26)) {
7992 /* Secure monitor call (v6Z) */
7993 goto illegal_op; /* not implemented. */
7994 } else {
7995 op = (insn >> 20) & 7;
7996 switch (op) {
7997 case 0: /* msr cpsr. */
7998 if (IS_M(env)) {
7999 tmp = load_reg(s, rn);
8000 addr = tcg_const_i32(insn & 0xff);
8001 gen_helper_v7m_msr(cpu_env, addr, tmp);
8002 tcg_temp_free_i32(addr);
8003 dead_tmp(tmp);
8004 gen_lookup_tb(s);
8005 break;
8006 }
8007 /* fall through */
8008 case 1: /* msr spsr. */
8009 if (IS_M(env))
8010 goto illegal_op;
8011 tmp = load_reg(s, rn);
8012 if (gen_set_psr(s,
8013 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
8014 op == 1, tmp))
8015 goto illegal_op;
8016 break;
8017 case 2: /* cps, nop-hint. */
8018 if (((insn >> 8) & 7) == 0) {
8019 gen_nop_hint(s, insn & 0xff);
8020 }
8021 /* Implemented as NOP in user mode. */
8022 if (IS_USER(s))
8023 break;
8024 offset = 0;
8025 imm = 0;
8026 if (insn & (1 << 10)) {
8027 if (insn & (1 << 7))
8028 offset |= CPSR_A;
8029 if (insn & (1 << 6))
8030 offset |= CPSR_I;
8031 if (insn & (1 << 5))
8032 offset |= CPSR_F;
8033 if (insn & (1 << 9))
8034 imm = CPSR_A | CPSR_I | CPSR_F;
8035 }
8036 if (insn & (1 << 8)) {
8037 offset |= 0x1f;
8038 imm |= (insn & 0x1f);
8039 }
8040 if (offset) {
8041 gen_set_psr_im(s, offset, 0, imm);
8042 }
8043 break;
8044 case 3: /* Special control operations. */
8045 ARCH(7);
8046 op = (insn >> 4) & 0xf;
8047 switch (op) {
8048 case 2: /* clrex */
8049 gen_clrex(s);
8050 break;
8051 case 4: /* dsb */
8052 case 5: /* dmb */
8053 case 6: /* isb */
8054 /* These execute as NOPs. */
8055 break;
8056 default:
8057 goto illegal_op;
8058 }
8059 break;
8060 case 4: /* bxj */
8061 /* Trivial implementation equivalent to bx. */
8062 tmp = load_reg(s, rn);
8063 gen_bx(s, tmp);
8064 break;
8065 case 5: /* Exception return. */
8066 if (IS_USER(s)) {
8067 goto illegal_op;
8068 }
8069 if (rn != 14 || rd != 15) {
8070 goto illegal_op;
8071 }
8072 tmp = load_reg(s, rn);
8073 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
8074 gen_exception_return(s, tmp);
8075 break;
8076 case 6: /* mrs cpsr. */
8077 tmp = new_tmp();
8078 if (IS_M(env)) {
8079 addr = tcg_const_i32(insn & 0xff);
8080 gen_helper_v7m_mrs(tmp, cpu_env, addr);
8081 tcg_temp_free_i32(addr);
8082 } else {
8083 gen_helper_cpsr_read(tmp);
8084 }
8085 store_reg(s, rd, tmp);
8086 break;
8087 case 7: /* mrs spsr. */
8088 /* Not accessible in user mode. */
8089 if (IS_USER(s) || IS_M(env))
8090 goto illegal_op;
8091 tmp = load_cpu_field(spsr);
8092 store_reg(s, rd, tmp);
8093 break;
8094 }
8095 }
8096 } else {
8097 /* Conditional branch. */
8098 op = (insn >> 22) & 0xf;
8099 /* Generate a conditional jump to next instruction. */
8100 s->condlabel = gen_new_label();
8101 gen_test_cc(op ^ 1, s->condlabel);
8102 s->condjmp = 1;
8103
8104 /* offset[11:1] = insn[10:0] */
8105 offset = (insn & 0x7ff) << 1;
8106 /* offset[17:12] = insn[21:16]. */
8107 offset |= (insn & 0x003f0000) >> 4;
8108 /* offset[31:20] = insn[26]. */
8109 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
8110 /* offset[18] = insn[13]. */
8111 offset |= (insn & (1 << 13)) << 5;
8112 /* offset[19] = insn[11]. */
8113 offset |= (insn & (1 << 11)) << 8;
8114
8115 /* jump to the offset */
8116 gen_jmp(s, s->pc + offset);
8117 }
8118 } else {
8119 /* Data processing immediate. */
8120 if (insn & (1 << 25)) {
8121 if (insn & (1 << 24)) {
8122 if (insn & (1 << 20))
8123 goto illegal_op;
8124 /* Bitfield/Saturate. */
8125 op = (insn >> 21) & 7;
8126 imm = insn & 0x1f;
8127 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8128 if (rn == 15) {
8129 tmp = new_tmp();
8130 tcg_gen_movi_i32(tmp, 0);
8131 } else {
8132 tmp = load_reg(s, rn);
8133 }
8134 switch (op) {
8135 case 2: /* Signed bitfield extract. */
8136 imm++;
8137 if (shift + imm > 32)
8138 goto illegal_op;
8139 if (imm < 32)
8140 gen_sbfx(tmp, shift, imm);
8141 break;
8142 case 6: /* Unsigned bitfield extract. */
8143 imm++;
8144 if (shift + imm > 32)
8145 goto illegal_op;
8146 if (imm < 32)
8147 gen_ubfx(tmp, shift, (1u << imm) - 1);
8148 break;
8149 case 3: /* Bitfield insert/clear. */
8150 if (imm < shift)
8151 goto illegal_op;
8152 imm = imm + 1 - shift;
8153 if (imm != 32) {
8154 tmp2 = load_reg(s, rd);
8155 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8156 dead_tmp(tmp2);
8157 }
8158 break;
8159 case 7:
8160 goto illegal_op;
8161 default: /* Saturate. */
8162 if (shift) {
8163 if (op & 1)
8164 tcg_gen_sari_i32(tmp, tmp, shift);
8165 else
8166 tcg_gen_shli_i32(tmp, tmp, shift);
8167 }
8168 tmp2 = tcg_const_i32(imm);
8169 if (op & 4) {
8170 /* Unsigned. */
8171 if ((op & 1) && shift == 0)
8172 gen_helper_usat16(tmp, tmp, tmp2);
8173 else
8174 gen_helper_usat(tmp, tmp, tmp2);
8175 } else {
8176 /* Signed. */
8177 if ((op & 1) && shift == 0)
8178 gen_helper_ssat16(tmp, tmp, tmp2);
8179 else
8180 gen_helper_ssat(tmp, tmp, tmp2);
8181 }
8182 tcg_temp_free_i32(tmp2);
8183 break;
8184 }
8185 store_reg(s, rd, tmp);
8186 } else {
8187 imm = ((insn & 0x04000000) >> 15)
8188 | ((insn & 0x7000) >> 4) | (insn & 0xff);
8189 if (insn & (1 << 22)) {
8190 /* 16-bit immediate. */
8191 imm |= (insn >> 4) & 0xf000;
8192 if (insn & (1 << 23)) {
8193 /* movt */
8194 tmp = load_reg(s, rd);
8195 tcg_gen_ext16u_i32(tmp, tmp);
8196 tcg_gen_ori_i32(tmp, tmp, imm << 16);
8197 } else {
8198 /* movw */
8199 tmp = new_tmp();
8200 tcg_gen_movi_i32(tmp, imm);
8201 }
8202 } else {
8203 /* Add/sub 12-bit immediate. */
8204 if (rn == 15) {
8205 offset = s->pc & ~(uint32_t)3;
8206 if (insn & (1 << 23))
8207 offset -= imm;
8208 else
8209 offset += imm;
8210 tmp = new_tmp();
8211 tcg_gen_movi_i32(tmp, offset);
8212 } else {
8213 tmp = load_reg(s, rn);
8214 if (insn & (1 << 23))
8215 tcg_gen_subi_i32(tmp, tmp, imm);
8216 else
8217 tcg_gen_addi_i32(tmp, tmp, imm);
8218 }
8219 }
8220 store_reg(s, rd, tmp);
8221 }
8222 } else {
8223 int shifter_out = 0;
8224 /* modified 12-bit immediate. */
8225 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8226 imm = (insn & 0xff);
8227 switch (shift) {
8228 case 0: /* XY */
8229 /* Nothing to do. */
8230 break;
8231 case 1: /* 00XY00XY */
8232 imm |= imm << 16;
8233 break;
8234 case 2: /* XY00XY00 */
8235 imm |= imm << 16;
8236 imm <<= 8;
8237 break;
8238 case 3: /* XYXYXYXY */
8239 imm |= imm << 16;
8240 imm |= imm << 8;
8241 break;
8242 default: /* Rotated constant. */
8243 shift = (shift << 1) | (imm >> 7);
8244 imm |= 0x80;
8245 imm = imm << (32 - shift);
8246 shifter_out = 1;
8247 break;
8248 }
8249 tmp2 = new_tmp();
8250 tcg_gen_movi_i32(tmp2, imm);
8251 rn = (insn >> 16) & 0xf;
8252 if (rn == 15) {
8253 tmp = new_tmp();
8254 tcg_gen_movi_i32(tmp, 0);
8255 } else {
8256 tmp = load_reg(s, rn);
8257 }
8258 op = (insn >> 21) & 0xf;
8259 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
8260 shifter_out, tmp, tmp2))
8261 goto illegal_op;
8262 dead_tmp(tmp2);
8263 rd = (insn >> 8) & 0xf;
8264 if (rd != 15) {
8265 store_reg(s, rd, tmp);
8266 } else {
8267 dead_tmp(tmp);
8268 }
8269 }
8270 }
8271 break;
8272 case 12: /* Load/store single data item. */
8273 {
8274 int postinc = 0;
8275 int writeback = 0;
8276 int user;
8277 if ((insn & 0x01100000) == 0x01000000) {
8278 if (disas_neon_ls_insn(env, s, insn))
8279 goto illegal_op;
8280 break;
8281 }
8282 user = IS_USER(s);
8283 if (rn == 15) {
8284 addr = new_tmp();
8285 /* PC relative. */
8286 /* s->pc has already been incremented by 4. */
8287 imm = s->pc & 0xfffffffc;
8288 if (insn & (1 << 23))
8289 imm += insn & 0xfff;
8290 else
8291 imm -= insn & 0xfff;
8292 tcg_gen_movi_i32(addr, imm);
8293 } else {
8294 addr = load_reg(s, rn);
8295 if (insn & (1 << 23)) {
8296 /* Positive offset. */
8297 imm = insn & 0xfff;
8298 tcg_gen_addi_i32(addr, addr, imm);
8299 } else {
8300 op = (insn >> 8) & 7;
8301 imm = insn & 0xff;
8302 switch (op) {
8303 case 0: case 8: /* Shifted Register. */
8304 shift = (insn >> 4) & 0xf;
8305 if (shift > 3)
8306 goto illegal_op;
8307 tmp = load_reg(s, rm);
8308 if (shift)
8309 tcg_gen_shli_i32(tmp, tmp, shift);
8310 tcg_gen_add_i32(addr, addr, tmp);
8311 dead_tmp(tmp);
8312 break;
8313 case 4: /* Negative offset. */
8314 tcg_gen_addi_i32(addr, addr, -imm);
8315 break;
8316 case 6: /* User privilege. */
8317 tcg_gen_addi_i32(addr, addr, imm);
8318 user = 1;
8319 break;
8320 case 1: /* Post-decrement. */
8321 imm = -imm;
8322 /* Fall through. */
8323 case 3: /* Post-increment. */
8324 postinc = 1;
8325 writeback = 1;
8326 break;
8327 case 5: /* Pre-decrement. */
8328 imm = -imm;
8329 /* Fall through. */
8330 case 7: /* Pre-increment. */
8331 tcg_gen_addi_i32(addr, addr, imm);
8332 writeback = 1;
8333 break;
8334 default:
8335 goto illegal_op;
8336 }
8337 }
8338 }
8339 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
8340 if (insn & (1 << 20)) {
8341 /* Load. */
8342 if (rs == 15 && op != 2) {
8343 if (op & 2)
8344 goto illegal_op;
8345 /* Memory hint. Implemented as NOP. */
8346 } else {
8347 switch (op) {
8348 case 0: tmp = gen_ld8u(addr, user); break;
8349 case 4: tmp = gen_ld8s(addr, user); break;
8350 case 1: tmp = gen_ld16u(addr, user); break;
8351 case 5: tmp = gen_ld16s(addr, user); break;
8352 case 2: tmp = gen_ld32(addr, user); break;
8353 default: goto illegal_op;
8354 }
8355 if (rs == 15) {
8356 gen_bx(s, tmp);
8357 } else {
8358 store_reg(s, rs, tmp);
8359 }
8360 }
8361 } else {
8362 /* Store. */
8363 if (rs == 15)
8364 goto illegal_op;
8365 tmp = load_reg(s, rs);
8366 switch (op) {
8367 case 0: gen_st8(tmp, addr, user); break;
8368 case 1: gen_st16(tmp, addr, user); break;
8369 case 2: gen_st32(tmp, addr, user); break;
8370 default: goto illegal_op;
8371 }
8372 }
8373 if (postinc)
8374 tcg_gen_addi_i32(addr, addr, imm);
8375 if (writeback) {
8376 store_reg(s, rn, addr);
8377 } else {
8378 dead_tmp(addr);
8379 }
8380 }
8381 break;
8382 default:
8383 goto illegal_op;
8384 }
8385 return 0;
8386 illegal_op:
8387 return 1;
8388 }
8389
8390 static void disas_thumb_insn(CPUState *env, DisasContext *s)
8391 {
8392 uint32_t val, insn, op, rm, rn, rd, shift, cond;
8393 int32_t offset;
8394 int i;
8395 TCGv tmp;
8396 TCGv tmp2;
8397 TCGv addr;
8398
8399 if (s->condexec_mask) {
8400 cond = s->condexec_cond;
8401 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
8402 s->condlabel = gen_new_label();
8403 gen_test_cc(cond ^ 1, s->condlabel);
8404 s->condjmp = 1;
8405 }
8406 }
8407
8408 insn = lduw_code(s->pc);
8409 s->pc += 2;
8410
8411 switch (insn >> 12) {
8412 case 0: case 1:
8413
8414 rd = insn & 7;
8415 op = (insn >> 11) & 3;
8416 if (op == 3) {
8417 /* add/subtract */
8418 rn = (insn >> 3) & 7;
8419 tmp = load_reg(s, rn);
8420 if (insn & (1 << 10)) {
8421 /* immediate */
8422 tmp2 = new_tmp();
8423 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
8424 } else {
8425 /* reg */
8426 rm = (insn >> 6) & 7;
8427 tmp2 = load_reg(s, rm);
8428 }
8429 if (insn & (1 << 9)) {
8430 if (s->condexec_mask)
8431 tcg_gen_sub_i32(tmp, tmp, tmp2);
8432 else
8433 gen_helper_sub_cc(tmp, tmp, tmp2);
8434 } else {
8435 if (s->condexec_mask)
8436 tcg_gen_add_i32(tmp, tmp, tmp2);
8437 else
8438 gen_helper_add_cc(tmp, tmp, tmp2);
8439 }
8440 dead_tmp(tmp2);
8441 store_reg(s, rd, tmp);
8442 } else {
8443 /* shift immediate */
8444 rm = (insn >> 3) & 7;
8445 shift = (insn >> 6) & 0x1f;
8446 tmp = load_reg(s, rm);
8447 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
8448 if (!s->condexec_mask)
8449 gen_logic_CC(tmp);
8450 store_reg(s, rd, tmp);
8451 }
8452 break;
8453 case 2: case 3:
8454 /* arithmetic large immediate */
8455 op = (insn >> 11) & 3;
8456 rd = (insn >> 8) & 0x7;
8457 if (op == 0) { /* mov */
8458 tmp = new_tmp();
8459 tcg_gen_movi_i32(tmp, insn & 0xff);
8460 if (!s->condexec_mask)
8461 gen_logic_CC(tmp);
8462 store_reg(s, rd, tmp);
8463 } else {
8464 tmp = load_reg(s, rd);
8465 tmp2 = new_tmp();
8466 tcg_gen_movi_i32(tmp2, insn & 0xff);
8467 switch (op) {
8468 case 1: /* cmp */
8469 gen_helper_sub_cc(tmp, tmp, tmp2);
8470 dead_tmp(tmp);
8471 dead_tmp(tmp2);
8472 break;
8473 case 2: /* add */
8474 if (s->condexec_mask)
8475 tcg_gen_add_i32(tmp, tmp, tmp2);
8476 else
8477 gen_helper_add_cc(tmp, tmp, tmp2);
8478 dead_tmp(tmp2);
8479 store_reg(s, rd, tmp);
8480 break;
8481 case 3: /* sub */
8482 if (s->condexec_mask)
8483 tcg_gen_sub_i32(tmp, tmp, tmp2);
8484 else
8485 gen_helper_sub_cc(tmp, tmp, tmp2);
8486 dead_tmp(tmp2);
8487 store_reg(s, rd, tmp);
8488 break;
8489 }
8490 }
8491 break;
8492 case 4:
8493 if (insn & (1 << 11)) {
8494 rd = (insn >> 8) & 7;
8495 /* load pc-relative. Bit 1 of PC is ignored. */
8496 val = s->pc + 2 + ((insn & 0xff) * 4);
8497 val &= ~(uint32_t)2;
8498 addr = new_tmp();
8499 tcg_gen_movi_i32(addr, val);
8500 tmp = gen_ld32(addr, IS_USER(s));
8501 dead_tmp(addr);
8502 store_reg(s, rd, tmp);
8503 break;
8504 }
8505 if (insn & (1 << 10)) {
8506 /* data processing extended or blx */
8507 rd = (insn & 7) | ((insn >> 4) & 8);
8508 rm = (insn >> 3) & 0xf;
8509 op = (insn >> 8) & 3;
8510 switch (op) {
8511 case 0: /* add */
8512 tmp = load_reg(s, rd);
8513 tmp2 = load_reg(s, rm);
8514 tcg_gen_add_i32(tmp, tmp, tmp2);
8515 dead_tmp(tmp2);
8516 store_reg(s, rd, tmp);
8517 break;
8518 case 1: /* cmp */
8519 tmp = load_reg(s, rd);
8520 tmp2 = load_reg(s, rm);
8521 gen_helper_sub_cc(tmp, tmp, tmp2);
8522 dead_tmp(tmp2);
8523 dead_tmp(tmp);
8524 break;
8525 case 2: /* mov/cpy */
8526 tmp = load_reg(s, rm);
8527 store_reg(s, rd, tmp);
8528 break;
8529 case 3:/* branch [and link] exchange thumb register */
8530 tmp = load_reg(s, rm);
8531 if (insn & (1 << 7)) {
8532 val = (uint32_t)s->pc | 1;
8533 tmp2 = new_tmp();
8534 tcg_gen_movi_i32(tmp2, val);
8535 store_reg(s, 14, tmp2);
8536 }
8537 gen_bx(s, tmp);
8538 break;
8539 }
8540 break;
8541 }
8542
8543 /* data processing register */
8544 rd = insn & 7;
8545 rm = (insn >> 3) & 7;
8546 op = (insn >> 6) & 0xf;
8547 if (op == 2 || op == 3 || op == 4 || op == 7) {
8548 /* the shift/rotate ops want the operands backwards */
8549 val = rm;
8550 rm = rd;
8551 rd = val;
8552 val = 1;
8553 } else {
8554 val = 0;
8555 }
8556
8557 if (op == 9) { /* neg */
8558 tmp = new_tmp();
8559 tcg_gen_movi_i32(tmp, 0);
8560 } else if (op != 0xf) { /* mvn doesn't read its first operand */
8561 tmp = load_reg(s, rd);
8562 } else {
8563 TCGV_UNUSED(tmp);
8564 }
8565
8566 tmp2 = load_reg(s, rm);
8567 switch (op) {
8568 case 0x0: /* and */
8569 tcg_gen_and_i32(tmp, tmp, tmp2);
8570 if (!s->condexec_mask)
8571 gen_logic_CC(tmp);
8572 break;
8573 case 0x1: /* eor */
8574 tcg_gen_xor_i32(tmp, tmp, tmp2);
8575 if (!s->condexec_mask)
8576 gen_logic_CC(tmp);
8577 break;
8578 case 0x2: /* lsl */
8579 if (s->condexec_mask) {
8580 gen_helper_shl(tmp2, tmp2, tmp);
8581 } else {
8582 gen_helper_shl_cc(tmp2, tmp2, tmp);
8583 gen_logic_CC(tmp2);
8584 }
8585 break;
8586 case 0x3: /* lsr */
8587 if (s->condexec_mask) {
8588 gen_helper_shr(tmp2, tmp2, tmp);
8589 } else {
8590 gen_helper_shr_cc(tmp2, tmp2, tmp);
8591 gen_logic_CC(tmp2);
8592 }
8593 break;
8594 case 0x4: /* asr */
8595 if (s->condexec_mask) {
8596 gen_helper_sar(tmp2, tmp2, tmp);
8597 } else {
8598 gen_helper_sar_cc(tmp2, tmp2, tmp);
8599 gen_logic_CC(tmp2);
8600 }
8601 break;
8602 case 0x5: /* adc */
8603 if (s->condexec_mask)
8604 gen_adc(tmp, tmp2);
8605 else
8606 gen_helper_adc_cc(tmp, tmp, tmp2);
8607 break;
8608 case 0x6: /* sbc */
8609 if (s->condexec_mask)
8610 gen_sub_carry(tmp, tmp, tmp2);
8611 else
8612 gen_helper_sbc_cc(tmp, tmp, tmp2);
8613 break;
8614 case 0x7: /* ror */
8615 if (s->condexec_mask) {
8616 tcg_gen_andi_i32(tmp, tmp, 0x1f);
8617 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
8618 } else {
8619 gen_helper_ror_cc(tmp2, tmp2, tmp);
8620 gen_logic_CC(tmp2);
8621 }
8622 break;
8623 case 0x8: /* tst */
8624 tcg_gen_and_i32(tmp, tmp, tmp2);
8625 gen_logic_CC(tmp);
8626 rd = 16;
8627 break;
8628 case 0x9: /* neg */
8629 if (s->condexec_mask)
8630 tcg_gen_neg_i32(tmp, tmp2);
8631 else
8632 gen_helper_sub_cc(tmp, tmp, tmp2);
8633 break;
8634 case 0xa: /* cmp */
8635 gen_helper_sub_cc(tmp, tmp, tmp2);
8636 rd = 16;
8637 break;
8638 case 0xb: /* cmn */
8639 gen_helper_add_cc(tmp, tmp, tmp2);
8640 rd = 16;
8641 break;
8642 case 0xc: /* orr */
8643 tcg_gen_or_i32(tmp, tmp, tmp2);
8644 if (!s->condexec_mask)
8645 gen_logic_CC(tmp);
8646 break;
8647 case 0xd: /* mul */
8648 tcg_gen_mul_i32(tmp, tmp, tmp2);
8649 if (!s->condexec_mask)
8650 gen_logic_CC(tmp);
8651 break;
8652 case 0xe: /* bic */
8653 tcg_gen_andc_i32(tmp, tmp, tmp2);
8654 if (!s->condexec_mask)
8655 gen_logic_CC(tmp);
8656 break;
8657 case 0xf: /* mvn */
8658 tcg_gen_not_i32(tmp2, tmp2);
8659 if (!s->condexec_mask)
8660 gen_logic_CC(tmp2);
8661 val = 1;
8662 rm = rd;
8663 break;
8664 }
8665 if (rd != 16) {
8666 if (val) {
8667 store_reg(s, rm, tmp2);
8668 if (op != 0xf)
8669 dead_tmp(tmp);
8670 } else {
8671 store_reg(s, rd, tmp);
8672 dead_tmp(tmp2);
8673 }
8674 } else {
8675 dead_tmp(tmp);
8676 dead_tmp(tmp2);
8677 }
8678 break;
8679
8680 case 5:
8681 /* load/store register offset. */
8682 rd = insn & 7;
8683 rn = (insn >> 3) & 7;
8684 rm = (insn >> 6) & 7;
8685 op = (insn >> 9) & 7;
8686 addr = load_reg(s, rn);
8687 tmp = load_reg(s, rm);
8688 tcg_gen_add_i32(addr, addr, tmp);
8689 dead_tmp(tmp);
8690
8691 if (op < 3) /* store */
8692 tmp = load_reg(s, rd);
8693
8694 switch (op) {
8695 case 0: /* str */
8696 gen_st32(tmp, addr, IS_USER(s));
8697 break;
8698 case 1: /* strh */
8699 gen_st16(tmp, addr, IS_USER(s));
8700 break;
8701 case 2: /* strb */
8702 gen_st8(tmp, addr, IS_USER(s));
8703 break;
8704 case 3: /* ldrsb */
8705 tmp = gen_ld8s(addr, IS_USER(s));
8706 break;
8707 case 4: /* ldr */
8708 tmp = gen_ld32(addr, IS_USER(s));
8709 break;
8710 case 5: /* ldrh */
8711 tmp = gen_ld16u(addr, IS_USER(s));
8712 break;
8713 case 6: /* ldrb */
8714 tmp = gen_ld8u(addr, IS_USER(s));
8715 break;
8716 case 7: /* ldrsh */
8717 tmp = gen_ld16s(addr, IS_USER(s));
8718 break;
8719 }
8720 if (op >= 3) /* load */
8721 store_reg(s, rd, tmp);
8722 dead_tmp(addr);
8723 break;
8724
8725 case 6:
8726 /* load/store word immediate offset */
8727 rd = insn & 7;
8728 rn = (insn >> 3) & 7;
8729 addr = load_reg(s, rn);
8730 val = (insn >> 4) & 0x7c;
8731 tcg_gen_addi_i32(addr, addr, val);
8732
8733 if (insn & (1 << 11)) {
8734 /* load */
8735 tmp = gen_ld32(addr, IS_USER(s));
8736 store_reg(s, rd, tmp);
8737 } else {
8738 /* store */
8739 tmp = load_reg(s, rd);
8740 gen_st32(tmp, addr, IS_USER(s));
8741 }
8742 dead_tmp(addr);
8743 break;
8744
8745 case 7:
8746 /* load/store byte immediate offset */
8747 rd = insn & 7;
8748 rn = (insn >> 3) & 7;
8749 addr = load_reg(s, rn);
8750 val = (insn >> 6) & 0x1f;
8751 tcg_gen_addi_i32(addr, addr, val);
8752
8753 if (insn & (1 << 11)) {
8754 /* load */
8755 tmp = gen_ld8u(addr, IS_USER(s));
8756 store_reg(s, rd, tmp);
8757 } else {
8758 /* store */
8759 tmp = load_reg(s, rd);
8760 gen_st8(tmp, addr, IS_USER(s));
8761 }
8762 dead_tmp(addr);
8763 break;
8764
8765 case 8:
8766 /* load/store halfword immediate offset */
8767 rd = insn & 7;
8768 rn = (insn >> 3) & 7;
8769 addr = load_reg(s, rn);
8770 val = (insn >> 5) & 0x3e;
8771 tcg_gen_addi_i32(addr, addr, val);
8772
8773 if (insn & (1 << 11)) {
8774 /* load */
8775 tmp = gen_ld16u(addr, IS_USER(s));
8776 store_reg(s, rd, tmp);
8777 } else {
8778 /* store */
8779 tmp = load_reg(s, rd);
8780 gen_st16(tmp, addr, IS_USER(s));
8781 }
8782 dead_tmp(addr);
8783 break;
8784
8785 case 9:
8786 /* load/store from stack */
8787 rd = (insn >> 8) & 7;
8788 addr = load_reg(s, 13);
8789 val = (insn & 0xff) * 4;
8790 tcg_gen_addi_i32(addr, addr, val);
8791
8792 if (insn & (1 << 11)) {
8793 /* load */
8794 tmp = gen_ld32(addr, IS_USER(s));
8795 store_reg(s, rd, tmp);
8796 } else {
8797 /* store */
8798 tmp = load_reg(s, rd);
8799 gen_st32(tmp, addr, IS_USER(s));
8800 }
8801 dead_tmp(addr);
8802 break;
8803
8804 case 10:
8805 /* add to high reg */
8806 rd = (insn >> 8) & 7;
8807 if (insn & (1 << 11)) {
8808 /* SP */
8809 tmp = load_reg(s, 13);
8810 } else {
8811 /* PC. bit 1 is ignored. */
8812 tmp = new_tmp();
8813 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
8814 }
8815 val = (insn & 0xff) * 4;
8816 tcg_gen_addi_i32(tmp, tmp, val);
8817 store_reg(s, rd, tmp);
8818 break;
8819
8820 case 11:
8821 /* misc */
8822 op = (insn >> 8) & 0xf;
8823 switch (op) {
8824 case 0:
8825 /* adjust stack pointer */
8826 tmp = load_reg(s, 13);
8827 val = (insn & 0x7f) * 4;
8828 if (insn & (1 << 7))
8829 val = -(int32_t)val;
8830 tcg_gen_addi_i32(tmp, tmp, val);
8831 store_reg(s, 13, tmp);
8832 break;
8833
8834 case 2: /* sign/zero extend. */
8835 ARCH(6);
8836 rd = insn & 7;
8837 rm = (insn >> 3) & 7;
8838 tmp = load_reg(s, rm);
8839 switch ((insn >> 6) & 3) {
8840 case 0: gen_sxth(tmp); break;
8841 case 1: gen_sxtb(tmp); break;
8842 case 2: gen_uxth(tmp); break;
8843 case 3: gen_uxtb(tmp); break;
8844 }
8845 store_reg(s, rd, tmp);
8846 break;
8847 case 4: case 5: case 0xc: case 0xd:
8848 /* push/pop */
8849 addr = load_reg(s, 13);
8850 if (insn & (1 << 8))
8851 offset = 4;
8852 else
8853 offset = 0;
8854 for (i = 0; i < 8; i++) {
8855 if (insn & (1 << i))
8856 offset += 4;
8857 }
8858 if ((insn & (1 << 11)) == 0) {
8859 tcg_gen_addi_i32(addr, addr, -offset);
8860 }
8861 for (i = 0; i < 8; i++) {
8862 if (insn & (1 << i)) {
8863 if (insn & (1 << 11)) {
8864 /* pop */
8865 tmp = gen_ld32(addr, IS_USER(s));
8866 store_reg(s, i, tmp);
8867 } else {
8868 /* push */
8869 tmp = load_reg(s, i);
8870 gen_st32(tmp, addr, IS_USER(s));
8871 }
8872 /* advance to the next address. */
8873 tcg_gen_addi_i32(addr, addr, 4);
8874 }
8875 }
8876 TCGV_UNUSED(tmp);
8877 if (insn & (1 << 8)) {
8878 if (insn & (1 << 11)) {
8879 /* pop pc */
8880 tmp = gen_ld32(addr, IS_USER(s));
8881 /* don't set the pc until the rest of the instruction
8882 has completed */
8883 } else {
8884 /* push lr */
8885 tmp = load_reg(s, 14);
8886 gen_st32(tmp, addr, IS_USER(s));
8887 }
8888 tcg_gen_addi_i32(addr, addr, 4);
8889 }
8890 if ((insn & (1 << 11)) == 0) {
8891 tcg_gen_addi_i32(addr, addr, -offset);
8892 }
8893 /* write back the new stack pointer */
8894 store_reg(s, 13, addr);
8895 /* set the new PC value */
8896 if ((insn & 0x0900) == 0x0900)
8897 gen_bx(s, tmp);
8898 break;
8899
8900 case 1: case 3: case 9: case 11: /* czb */
8901 rm = insn & 7;
8902 tmp = load_reg(s, rm);
8903 s->condlabel = gen_new_label();
8904 s->condjmp = 1;
8905 if (insn & (1 << 11))
8906 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
8907 else
8908 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
8909 dead_tmp(tmp);
8910 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8911 val = (uint32_t)s->pc + 2;
8912 val += offset;
8913 gen_jmp(s, val);
8914 break;
8915
8916 case 15: /* IT, nop-hint. */
8917 if ((insn & 0xf) == 0) {
8918 gen_nop_hint(s, (insn >> 4) & 0xf);
8919 break;
8920 }
8921 /* If Then. */
8922 s->condexec_cond = (insn >> 4) & 0xe;
8923 s->condexec_mask = insn & 0x1f;
8924 /* No actual code generated for this insn, just setup state. */
8925 break;
8926
8927 case 0xe: /* bkpt */
8928 gen_set_condexec(s);
8929 gen_set_pc_im(s->pc - 2);
8930 gen_exception(EXCP_BKPT);
8931 s->is_jmp = DISAS_JUMP;
8932 break;
8933
8934 case 0xa: /* rev */
8935 ARCH(6);
8936 rn = (insn >> 3) & 0x7;
8937 rd = insn & 0x7;
8938 tmp = load_reg(s, rn);
8939 switch ((insn >> 6) & 3) {
8940 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
8941 case 1: gen_rev16(tmp); break;
8942 case 3: gen_revsh(tmp); break;
8943 default: goto illegal_op;
8944 }
8945 store_reg(s, rd, tmp);
8946 break;
8947
8948 case 6: /* cps */
8949 ARCH(6);
8950 if (IS_USER(s))
8951 break;
8952 if (IS_M(env)) {
8953 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
8954 /* PRIMASK */
8955 if (insn & 1) {
8956 addr = tcg_const_i32(16);
8957 gen_helper_v7m_msr(cpu_env, addr, tmp);
8958 tcg_temp_free_i32(addr);
8959 }
8960 /* FAULTMASK */
8961 if (insn & 2) {
8962 addr = tcg_const_i32(17);
8963 gen_helper_v7m_msr(cpu_env, addr, tmp);
8964 tcg_temp_free_i32(addr);
8965 }
8966 tcg_temp_free_i32(tmp);
8967 gen_lookup_tb(s);
8968 } else {
8969 if (insn & (1 << 4))
8970 shift = CPSR_A | CPSR_I | CPSR_F;
8971 else
8972 shift = 0;
8973 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8974 }
8975 break;
8976
8977 default:
8978 goto undef;
8979 }
8980 break;
8981
8982 case 12:
8983 /* load/store multiple */
8984 rn = (insn >> 8) & 0x7;
8985 addr = load_reg(s, rn);
8986 for (i = 0; i < 8; i++) {
8987 if (insn & (1 << i)) {
8988 if (insn & (1 << 11)) {
8989 /* load */
8990 tmp = gen_ld32(addr, IS_USER(s));
8991 store_reg(s, i, tmp);
8992 } else {
8993 /* store */
8994 tmp = load_reg(s, i);
8995 gen_st32(tmp, addr, IS_USER(s));
8996 }
8997 /* advance to the next address */
8998 tcg_gen_addi_i32(addr, addr, 4);
8999 }
9000 }
9001 /* Base register writeback. */
9002 if ((insn & (1 << rn)) == 0) {
9003 store_reg(s, rn, addr);
9004 } else {
9005 dead_tmp(addr);
9006 }
9007 break;
9008
9009 case 13:
9010 /* conditional branch or swi */
9011 cond = (insn >> 8) & 0xf;
9012 if (cond == 0xe)
9013 goto undef;
9014
9015 if (cond == 0xf) {
9016 /* swi */
9017 gen_set_condexec(s);
9018 gen_set_pc_im(s->pc);
9019 s->is_jmp = DISAS_SWI;
9020 break;
9021 }
9022 /* generate a conditional jump to next instruction */
9023 s->condlabel = gen_new_label();
9024 gen_test_cc(cond ^ 1, s->condlabel);
9025 s->condjmp = 1;
9026
9027 /* jump to the offset */
9028 val = (uint32_t)s->pc + 2;
9029 offset = ((int32_t)insn << 24) >> 24;
9030 val += offset << 1;
9031 gen_jmp(s, val);
9032 break;
9033
9034 case 14:
9035 if (insn & (1 << 11)) {
9036 if (disas_thumb2_insn(env, s, insn))
9037 goto undef32;
9038 break;
9039 }
9040 /* unconditional branch */
9041 val = (uint32_t)s->pc;
9042 offset = ((int32_t)insn << 21) >> 21;
9043 val += (offset << 1) + 2;
9044 gen_jmp(s, val);
9045 break;
9046
9047 case 15:
9048 if (disas_thumb2_insn(env, s, insn))
9049 goto undef32;
9050 break;
9051 }
9052 return;
9053 undef32:
9054 gen_set_condexec(s);
9055 gen_set_pc_im(s->pc - 4);
9056 gen_exception(EXCP_UDEF);
9057 s->is_jmp = DISAS_JUMP;
9058 return;
9059 illegal_op:
9060 undef:
9061 gen_set_condexec(s);
9062 gen_set_pc_im(s->pc - 2);
9063 gen_exception(EXCP_UDEF);
9064 s->is_jmp = DISAS_JUMP;
9065 }
9066
9067 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
9068 basic block 'tb'. If search_pc is TRUE, also generate PC
9069 information for each intermediate instruction. */
9070 static inline void gen_intermediate_code_internal(CPUState *env,
9071 TranslationBlock *tb,
9072 int search_pc)
9073 {
9074 DisasContext dc1, *dc = &dc1;
9075 CPUBreakpoint *bp;
9076 uint16_t *gen_opc_end;
9077 int j, lj;
9078 target_ulong pc_start;
9079 uint32_t next_page_start;
9080 int num_insns;
9081 int max_insns;
9082
9083 /* generate intermediate code */
9084 num_temps = 0;
9085
9086 pc_start = tb->pc;
9087
9088 dc->tb = tb;
9089
9090 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
9091
9092 dc->is_jmp = DISAS_NEXT;
9093 dc->pc = pc_start;
9094 dc->singlestep_enabled = env->singlestep_enabled;
9095 dc->condjmp = 0;
9096 dc->thumb = env->thumb;
9097 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
9098 dc->condexec_cond = env->condexec_bits >> 4;
9099 #if !defined(CONFIG_USER_ONLY)
9100 if (IS_M(env)) {
9101 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
9102 } else {
9103 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
9104 }
9105 #endif
9106 cpu_F0s = tcg_temp_new_i32();
9107 cpu_F1s = tcg_temp_new_i32();
9108 cpu_F0d = tcg_temp_new_i64();
9109 cpu_F1d = tcg_temp_new_i64();
9110 cpu_V0 = cpu_F0d;
9111 cpu_V1 = cpu_F1d;
9112 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
9113 cpu_M0 = tcg_temp_new_i64();
9114 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
9115 lj = -1;
9116 num_insns = 0;
9117 max_insns = tb->cflags & CF_COUNT_MASK;
9118 if (max_insns == 0)
9119 max_insns = CF_COUNT_MASK;
9120
9121 gen_icount_start();
9122 /* Reset the conditional execution bits immediately. This avoids
9123 complications trying to do it at the end of the block. */
9124 if (env->condexec_bits)
9125 {
9126 TCGv tmp = new_tmp();
9127 tcg_gen_movi_i32(tmp, 0);
9128 store_cpu_field(tmp, condexec_bits);
9129 }
9130 do {
9131 #ifdef CONFIG_USER_ONLY
9132 /* Intercept jump to the magic kernel page. */
9133 if (dc->pc >= 0xffff0000) {
9134 /* We always get here via a jump, so know we are not in a
9135 conditional execution block. */
9136 gen_exception(EXCP_KERNEL_TRAP);
9137 dc->is_jmp = DISAS_UPDATE;
9138 break;
9139 }
9140 #else
9141 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
9142 /* We always get here via a jump, so know we are not in a
9143 conditional execution block. */
9144 gen_exception(EXCP_EXCEPTION_EXIT);
9145 dc->is_jmp = DISAS_UPDATE;
9146 break;
9147 }
9148 #endif
9149
9150 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
9151 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
9152 if (bp->pc == dc->pc) {
9153 gen_set_condexec(dc);
9154 gen_set_pc_im(dc->pc);
9155 gen_exception(EXCP_DEBUG);
9156 dc->is_jmp = DISAS_JUMP;
9157 /* Advance PC so that clearing the breakpoint will
9158 invalidate this TB. */
9159 dc->pc += 2;
9160 goto done_generating;
9161 break;
9162 }
9163 }
9164 }
9165 if (search_pc) {
9166 j = gen_opc_ptr - gen_opc_buf;
9167 if (lj < j) {
9168 lj++;
9169 while (lj < j)
9170 gen_opc_instr_start[lj++] = 0;
9171 }
9172 gen_opc_pc[lj] = dc->pc;
9173 gen_opc_instr_start[lj] = 1;
9174 gen_opc_icount[lj] = num_insns;
9175 }
9176
9177 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
9178 gen_io_start();
9179
9180 if (env->thumb) {
9181 disas_thumb_insn(env, dc);
9182 if (dc->condexec_mask) {
9183 dc->condexec_cond = (dc->condexec_cond & 0xe)
9184 | ((dc->condexec_mask >> 4) & 1);
9185 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
9186 if (dc->condexec_mask == 0) {
9187 dc->condexec_cond = 0;
9188 }
9189 }
9190 } else {
9191 disas_arm_insn(env, dc);
9192 }
9193 if (num_temps) {
9194 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
9195 num_temps = 0;
9196 }
9197
9198 if (dc->condjmp && !dc->is_jmp) {
9199 gen_set_label(dc->condlabel);
9200 dc->condjmp = 0;
9201 }
9202 /* Translation stops when a conditional branch is encountered.
9203 * Otherwise the subsequent code could get translated several times.
9204 * Also stop translation when a page boundary is reached. This
9205 * ensures prefetch aborts occur at the right place. */
9206 num_insns ++;
9207 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
9208 !env->singlestep_enabled &&
9209 !singlestep &&
9210 dc->pc < next_page_start &&
9211 num_insns < max_insns);
9212
9213 if (tb->cflags & CF_LAST_IO) {
9214 if (dc->condjmp) {
9215 /* FIXME: This can theoretically happen with self-modifying
9216 code. */
9217 cpu_abort(env, "IO on conditional branch instruction");
9218 }
9219 gen_io_end();
9220 }
9221
9222 /* At this stage dc->condjmp will only be set when the skipped
9223 instruction was a conditional branch or trap, and the PC has
9224 already been written. */
9225 if (unlikely(env->singlestep_enabled)) {
9226 /* Make sure the pc is updated, and raise a debug exception. */
9227 if (dc->condjmp) {
9228 gen_set_condexec(dc);
9229 if (dc->is_jmp == DISAS_SWI) {
9230 gen_exception(EXCP_SWI);
9231 } else {
9232 gen_exception(EXCP_DEBUG);
9233 }
9234 gen_set_label(dc->condlabel);
9235 }
9236 if (dc->condjmp || !dc->is_jmp) {
9237 gen_set_pc_im(dc->pc);
9238 dc->condjmp = 0;
9239 }
9240 gen_set_condexec(dc);
9241 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
9242 gen_exception(EXCP_SWI);
9243 } else {
9244 /* FIXME: Single stepping a WFI insn will not halt
9245 the CPU. */
9246 gen_exception(EXCP_DEBUG);
9247 }
9248 } else {
9249 /* While branches must always occur at the end of an IT block,
9250 there are a few other things that can cause us to terminate
9251 the TB in the middel of an IT block:
9252 - Exception generating instructions (bkpt, swi, undefined).
9253 - Page boundaries.
9254 - Hardware watchpoints.
9255 Hardware breakpoints have already been handled and skip this code.
9256 */
9257 gen_set_condexec(dc);
9258 switch(dc->is_jmp) {
9259 case DISAS_NEXT:
9260 gen_goto_tb(dc, 1, dc->pc);
9261 break;
9262 default:
9263 case DISAS_JUMP:
9264 case DISAS_UPDATE:
9265 /* indicate that the hash table must be used to find the next TB */
9266 tcg_gen_exit_tb(0);
9267 break;
9268 case DISAS_TB_JUMP:
9269 /* nothing more to generate */
9270 break;
9271 case DISAS_WFI:
9272 gen_helper_wfi();
9273 break;
9274 case DISAS_SWI:
9275 gen_exception(EXCP_SWI);
9276 break;
9277 }
9278 if (dc->condjmp) {
9279 gen_set_label(dc->condlabel);
9280 gen_set_condexec(dc);
9281 gen_goto_tb(dc, 1, dc->pc);
9282 dc->condjmp = 0;
9283 }
9284 }
9285
9286 done_generating:
9287 gen_icount_end(tb, num_insns);
9288 *gen_opc_ptr = INDEX_op_end;
9289
9290 #ifdef DEBUG_DISAS
9291 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
9292 qemu_log("----------------\n");
9293 qemu_log("IN: %s\n", lookup_symbol(pc_start));
9294 log_target_disas(pc_start, dc->pc - pc_start, env->thumb);
9295 qemu_log("\n");
9296 }
9297 #endif
9298 if (search_pc) {
9299 j = gen_opc_ptr - gen_opc_buf;
9300 lj++;
9301 while (lj <= j)
9302 gen_opc_instr_start[lj++] = 0;
9303 } else {
9304 tb->size = dc->pc - pc_start;
9305 tb->icount = num_insns;
9306 }
9307 }
9308
9309 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
9310 {
9311 gen_intermediate_code_internal(env, tb, 0);
9312 }
9313
9314 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
9315 {
9316 gen_intermediate_code_internal(env, tb, 1);
9317 }
9318
9319 static const char *cpu_mode_names[16] = {
9320 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
9321 "???", "???", "???", "und", "???", "???", "???", "sys"
9322 };
9323
9324 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
9325 int flags)
9326 {
9327 int i;
9328 #if 0
9329 union {
9330 uint32_t i;
9331 float s;
9332 } s0, s1;
9333 CPU_DoubleU d;
9334 /* ??? This assumes float64 and double have the same layout.
9335 Oh well, it's only debug dumps. */
9336 union {
9337 float64 f64;
9338 double d;
9339 } d0;
9340 #endif
9341 uint32_t psr;
9342
9343 for(i=0;i<16;i++) {
9344 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
9345 if ((i % 4) == 3)
9346 cpu_fprintf(f, "\n");
9347 else
9348 cpu_fprintf(f, " ");
9349 }
9350 psr = cpsr_read(env);
9351 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
9352 psr,
9353 psr & (1 << 31) ? 'N' : '-',
9354 psr & (1 << 30) ? 'Z' : '-',
9355 psr & (1 << 29) ? 'C' : '-',
9356 psr & (1 << 28) ? 'V' : '-',
9357 psr & CPSR_T ? 'T' : 'A',
9358 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
9359
9360 #if 0
9361 for (i = 0; i < 16; i++) {
9362 d.d = env->vfp.regs[i];
9363 s0.i = d.l.lower;
9364 s1.i = d.l.upper;
9365 d0.f64 = d.d;
9366 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
9367 i * 2, (int)s0.i, s0.s,
9368 i * 2 + 1, (int)s1.i, s1.s,
9369 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
9370 d0.d);
9371 }
9372 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
9373 #endif
9374 }
9375
9376 void gen_pc_load(CPUState *env, TranslationBlock *tb,
9377 unsigned long searched_pc, int pc_pos, void *puc)
9378 {
9379 env->regs[15] = gen_opc_pc[pc_pos];
9380 }