]> git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
Watchpoint support (previous commit got eaten by Savannah server crash).
[qemu.git] / target-arm / translate.c
1 /*
2 * ARM translation
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <string.h>
25 #include <inttypes.h>
26
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
34
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
36
37 /* internal defines */
38 typedef struct DisasContext {
39 target_ulong pc;
40 int is_jmp;
41 /* Nonzero if this instruction has been conditionally skipped. */
42 int condjmp;
43 /* The label that will be jumped to when the instruction is skipped. */
44 int condlabel;
45 struct TranslationBlock *tb;
46 int singlestep_enabled;
47 int thumb;
48 int is_mem;
49 #if !defined(CONFIG_USER_ONLY)
50 int user;
51 #endif
52 } DisasContext;
53
54 #if defined(CONFIG_USER_ONLY)
55 #define IS_USER(s) 1
56 #else
57 #define IS_USER(s) (s->user)
58 #endif
59
60 #define DISAS_JUMP_NEXT 4
61
62 #ifdef USE_DIRECT_JUMP
63 #define TBPARAM(x)
64 #else
65 #define TBPARAM(x) (long)(x)
66 #endif
67
68 /* XXX: move that elsewhere */
69 static uint16_t *gen_opc_ptr;
70 static uint32_t *gen_opparam_ptr;
71 extern FILE *logfile;
72 extern int loglevel;
73
74 enum {
75 #define DEF(s, n, copy_size) INDEX_op_ ## s,
76 #include "opc.h"
77 #undef DEF
78 NB_OPS,
79 };
80
81 #include "gen-op.h"
82
83 static GenOpFunc1 *gen_test_cc[14] = {
84 gen_op_test_eq,
85 gen_op_test_ne,
86 gen_op_test_cs,
87 gen_op_test_cc,
88 gen_op_test_mi,
89 gen_op_test_pl,
90 gen_op_test_vs,
91 gen_op_test_vc,
92 gen_op_test_hi,
93 gen_op_test_ls,
94 gen_op_test_ge,
95 gen_op_test_lt,
96 gen_op_test_gt,
97 gen_op_test_le,
98 };
99
100 const uint8_t table_logic_cc[16] = {
101 1, /* and */
102 1, /* xor */
103 0, /* sub */
104 0, /* rsb */
105 0, /* add */
106 0, /* adc */
107 0, /* sbc */
108 0, /* rsc */
109 1, /* andl */
110 1, /* xorl */
111 0, /* cmp */
112 0, /* cmn */
113 1, /* orr */
114 1, /* mov */
115 1, /* bic */
116 1, /* mvn */
117 };
118
119 static GenOpFunc1 *gen_shift_T1_im[4] = {
120 gen_op_shll_T1_im,
121 gen_op_shrl_T1_im,
122 gen_op_sarl_T1_im,
123 gen_op_rorl_T1_im,
124 };
125
126 static GenOpFunc *gen_shift_T1_0[4] = {
127 NULL,
128 gen_op_shrl_T1_0,
129 gen_op_sarl_T1_0,
130 gen_op_rrxl_T1,
131 };
132
133 static GenOpFunc1 *gen_shift_T2_im[4] = {
134 gen_op_shll_T2_im,
135 gen_op_shrl_T2_im,
136 gen_op_sarl_T2_im,
137 gen_op_rorl_T2_im,
138 };
139
140 static GenOpFunc *gen_shift_T2_0[4] = {
141 NULL,
142 gen_op_shrl_T2_0,
143 gen_op_sarl_T2_0,
144 gen_op_rrxl_T2,
145 };
146
147 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
148 gen_op_shll_T1_im_cc,
149 gen_op_shrl_T1_im_cc,
150 gen_op_sarl_T1_im_cc,
151 gen_op_rorl_T1_im_cc,
152 };
153
154 static GenOpFunc *gen_shift_T1_0_cc[4] = {
155 NULL,
156 gen_op_shrl_T1_0_cc,
157 gen_op_sarl_T1_0_cc,
158 gen_op_rrxl_T1_cc,
159 };
160
161 static GenOpFunc *gen_shift_T1_T0[4] = {
162 gen_op_shll_T1_T0,
163 gen_op_shrl_T1_T0,
164 gen_op_sarl_T1_T0,
165 gen_op_rorl_T1_T0,
166 };
167
168 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
169 gen_op_shll_T1_T0_cc,
170 gen_op_shrl_T1_T0_cc,
171 gen_op_sarl_T1_T0_cc,
172 gen_op_rorl_T1_T0_cc,
173 };
174
175 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
176 {
177 gen_op_movl_T0_r0,
178 gen_op_movl_T0_r1,
179 gen_op_movl_T0_r2,
180 gen_op_movl_T0_r3,
181 gen_op_movl_T0_r4,
182 gen_op_movl_T0_r5,
183 gen_op_movl_T0_r6,
184 gen_op_movl_T0_r7,
185 gen_op_movl_T0_r8,
186 gen_op_movl_T0_r9,
187 gen_op_movl_T0_r10,
188 gen_op_movl_T0_r11,
189 gen_op_movl_T0_r12,
190 gen_op_movl_T0_r13,
191 gen_op_movl_T0_r14,
192 gen_op_movl_T0_r15,
193 },
194 {
195 gen_op_movl_T1_r0,
196 gen_op_movl_T1_r1,
197 gen_op_movl_T1_r2,
198 gen_op_movl_T1_r3,
199 gen_op_movl_T1_r4,
200 gen_op_movl_T1_r5,
201 gen_op_movl_T1_r6,
202 gen_op_movl_T1_r7,
203 gen_op_movl_T1_r8,
204 gen_op_movl_T1_r9,
205 gen_op_movl_T1_r10,
206 gen_op_movl_T1_r11,
207 gen_op_movl_T1_r12,
208 gen_op_movl_T1_r13,
209 gen_op_movl_T1_r14,
210 gen_op_movl_T1_r15,
211 },
212 {
213 gen_op_movl_T2_r0,
214 gen_op_movl_T2_r1,
215 gen_op_movl_T2_r2,
216 gen_op_movl_T2_r3,
217 gen_op_movl_T2_r4,
218 gen_op_movl_T2_r5,
219 gen_op_movl_T2_r6,
220 gen_op_movl_T2_r7,
221 gen_op_movl_T2_r8,
222 gen_op_movl_T2_r9,
223 gen_op_movl_T2_r10,
224 gen_op_movl_T2_r11,
225 gen_op_movl_T2_r12,
226 gen_op_movl_T2_r13,
227 gen_op_movl_T2_r14,
228 gen_op_movl_T2_r15,
229 },
230 };
231
232 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
233 {
234 gen_op_movl_r0_T0,
235 gen_op_movl_r1_T0,
236 gen_op_movl_r2_T0,
237 gen_op_movl_r3_T0,
238 gen_op_movl_r4_T0,
239 gen_op_movl_r5_T0,
240 gen_op_movl_r6_T0,
241 gen_op_movl_r7_T0,
242 gen_op_movl_r8_T0,
243 gen_op_movl_r9_T0,
244 gen_op_movl_r10_T0,
245 gen_op_movl_r11_T0,
246 gen_op_movl_r12_T0,
247 gen_op_movl_r13_T0,
248 gen_op_movl_r14_T0,
249 gen_op_movl_r15_T0,
250 },
251 {
252 gen_op_movl_r0_T1,
253 gen_op_movl_r1_T1,
254 gen_op_movl_r2_T1,
255 gen_op_movl_r3_T1,
256 gen_op_movl_r4_T1,
257 gen_op_movl_r5_T1,
258 gen_op_movl_r6_T1,
259 gen_op_movl_r7_T1,
260 gen_op_movl_r8_T1,
261 gen_op_movl_r9_T1,
262 gen_op_movl_r10_T1,
263 gen_op_movl_r11_T1,
264 gen_op_movl_r12_T1,
265 gen_op_movl_r13_T1,
266 gen_op_movl_r14_T1,
267 gen_op_movl_r15_T1,
268 },
269 };
270
271 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
272 gen_op_movl_T0_im,
273 gen_op_movl_T1_im,
274 gen_op_movl_T2_im,
275 };
276
277 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
278 gen_op_shll_T0_im_thumb,
279 gen_op_shrl_T0_im_thumb,
280 gen_op_sarl_T0_im_thumb,
281 };
282
283 static inline void gen_bx(DisasContext *s)
284 {
285 s->is_jmp = DISAS_UPDATE;
286 gen_op_bx_T0();
287 }
288
289
290 #if defined(CONFIG_USER_ONLY)
291 #define gen_ldst(name, s) gen_op_##name##_raw()
292 #else
293 #define gen_ldst(name, s) do { \
294 s->is_mem = 1; \
295 if (IS_USER(s)) \
296 gen_op_##name##_user(); \
297 else \
298 gen_op_##name##_kernel(); \
299 } while (0)
300 #endif
301
302 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
303 {
304 int val;
305
306 if (reg == 15) {
307 /* normaly, since we updated PC, we need only to add one insn */
308 if (s->thumb)
309 val = (long)s->pc + 2;
310 else
311 val = (long)s->pc + 4;
312 gen_op_movl_TN_im[t](val);
313 } else {
314 gen_op_movl_TN_reg[t][reg]();
315 }
316 }
317
318 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
319 {
320 gen_movl_TN_reg(s, reg, 0);
321 }
322
323 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
324 {
325 gen_movl_TN_reg(s, reg, 1);
326 }
327
328 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
329 {
330 gen_movl_TN_reg(s, reg, 2);
331 }
332
333 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
334 {
335 gen_op_movl_reg_TN[t][reg]();
336 if (reg == 15) {
337 s->is_jmp = DISAS_JUMP;
338 }
339 }
340
341 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
342 {
343 gen_movl_reg_TN(s, reg, 0);
344 }
345
346 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
347 {
348 gen_movl_reg_TN(s, reg, 1);
349 }
350
351 /* Force a TB lookup after an instruction that changes the CPU state. */
352 static inline void gen_lookup_tb(DisasContext *s)
353 {
354 gen_op_movl_T0_im(s->pc);
355 gen_movl_reg_T0(s, 15);
356 s->is_jmp = DISAS_UPDATE;
357 }
358
359 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
360 {
361 int val, rm, shift, shiftop;
362
363 if (!(insn & (1 << 25))) {
364 /* immediate */
365 val = insn & 0xfff;
366 if (!(insn & (1 << 23)))
367 val = -val;
368 if (val != 0)
369 gen_op_addl_T1_im(val);
370 } else {
371 /* shift/register */
372 rm = (insn) & 0xf;
373 shift = (insn >> 7) & 0x1f;
374 gen_movl_T2_reg(s, rm);
375 shiftop = (insn >> 5) & 3;
376 if (shift != 0) {
377 gen_shift_T2_im[shiftop](shift);
378 } else if (shiftop != 0) {
379 gen_shift_T2_0[shiftop]();
380 }
381 if (!(insn & (1 << 23)))
382 gen_op_subl_T1_T2();
383 else
384 gen_op_addl_T1_T2();
385 }
386 }
387
388 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
389 int extra)
390 {
391 int val, rm;
392
393 if (insn & (1 << 22)) {
394 /* immediate */
395 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
396 if (!(insn & (1 << 23)))
397 val = -val;
398 val += extra;
399 if (val != 0)
400 gen_op_addl_T1_im(val);
401 } else {
402 /* register */
403 if (extra)
404 gen_op_addl_T1_im(extra);
405 rm = (insn) & 0xf;
406 gen_movl_T2_reg(s, rm);
407 if (!(insn & (1 << 23)))
408 gen_op_subl_T1_T2();
409 else
410 gen_op_addl_T1_T2();
411 }
412 }
413
414 #define VFP_OP(name) \
415 static inline void gen_vfp_##name(int dp) \
416 { \
417 if (dp) \
418 gen_op_vfp_##name##d(); \
419 else \
420 gen_op_vfp_##name##s(); \
421 }
422
423 VFP_OP(add)
424 VFP_OP(sub)
425 VFP_OP(mul)
426 VFP_OP(div)
427 VFP_OP(neg)
428 VFP_OP(abs)
429 VFP_OP(sqrt)
430 VFP_OP(cmp)
431 VFP_OP(cmpe)
432 VFP_OP(F1_ld0)
433 VFP_OP(uito)
434 VFP_OP(sito)
435 VFP_OP(toui)
436 VFP_OP(touiz)
437 VFP_OP(tosi)
438 VFP_OP(tosiz)
439
440 #undef VFP_OP
441
442 static inline void gen_vfp_ld(DisasContext *s, int dp)
443 {
444 if (dp)
445 gen_ldst(vfp_ldd, s);
446 else
447 gen_ldst(vfp_lds, s);
448 }
449
450 static inline void gen_vfp_st(DisasContext *s, int dp)
451 {
452 if (dp)
453 gen_ldst(vfp_std, s);
454 else
455 gen_ldst(vfp_sts, s);
456 }
457
458 static inline long
459 vfp_reg_offset (int dp, int reg)
460 {
461 if (dp)
462 return offsetof(CPUARMState, vfp.regs[reg]);
463 else if (reg & 1) {
464 return offsetof(CPUARMState, vfp.regs[reg >> 1])
465 + offsetof(CPU_DoubleU, l.upper);
466 } else {
467 return offsetof(CPUARMState, vfp.regs[reg >> 1])
468 + offsetof(CPU_DoubleU, l.lower);
469 }
470 }
471 static inline void gen_mov_F0_vreg(int dp, int reg)
472 {
473 if (dp)
474 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
475 else
476 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
477 }
478
479 static inline void gen_mov_F1_vreg(int dp, int reg)
480 {
481 if (dp)
482 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
483 else
484 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
485 }
486
487 static inline void gen_mov_vreg_F0(int dp, int reg)
488 {
489 if (dp)
490 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
491 else
492 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
493 }
494
495 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
496 instruction is not defined. */
497 static int disas_cp15_insn(DisasContext *s, uint32_t insn)
498 {
499 uint32_t rd;
500
501 /* ??? Some cp15 registers are accessible from userspace. */
502 if (IS_USER(s)) {
503 return 1;
504 }
505 if ((insn & 0x0fff0fff) == 0x0e070f90
506 || (insn & 0x0fff0fff) == 0x0e070f58) {
507 /* Wait for interrupt. */
508 gen_op_movl_T0_im((long)s->pc);
509 gen_op_movl_reg_TN[0][15]();
510 gen_op_wfi();
511 s->is_jmp = DISAS_JUMP;
512 return 0;
513 }
514 rd = (insn >> 12) & 0xf;
515 if (insn & (1 << 20)) {
516 gen_op_movl_T0_cp15(insn);
517 /* If the destination register is r15 then sets condition codes. */
518 if (rd != 15)
519 gen_movl_reg_T0(s, rd);
520 } else {
521 gen_movl_T0_reg(s, rd);
522 gen_op_movl_cp15_T0(insn);
523 }
524 gen_lookup_tb(s);
525 return 0;
526 }
527
528 /* Disassemble a VFP instruction. Returns nonzero if an error occured
529 (ie. an undefined instruction). */
530 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
531 {
532 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
533 int dp, veclen;
534
535 if (!arm_feature(env, ARM_FEATURE_VFP))
536 return 1;
537
538 if ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) == 0) {
539 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
540 if ((insn & 0x0fe00fff) != 0x0ee00a10)
541 return 1;
542 rn = (insn >> 16) & 0xf;
543 if (rn != 0 && rn != 8)
544 return 1;
545 }
546 dp = ((insn & 0xf00) == 0xb00);
547 switch ((insn >> 24) & 0xf) {
548 case 0xe:
549 if (insn & (1 << 4)) {
550 /* single register transfer */
551 if ((insn & 0x6f) != 0x00)
552 return 1;
553 rd = (insn >> 12) & 0xf;
554 if (dp) {
555 if (insn & 0x80)
556 return 1;
557 rn = (insn >> 16) & 0xf;
558 /* Get the existing value even for arm->vfp moves because
559 we only set half the register. */
560 gen_mov_F0_vreg(1, rn);
561 gen_op_vfp_mrrd();
562 if (insn & (1 << 20)) {
563 /* vfp->arm */
564 if (insn & (1 << 21))
565 gen_movl_reg_T1(s, rd);
566 else
567 gen_movl_reg_T0(s, rd);
568 } else {
569 /* arm->vfp */
570 if (insn & (1 << 21))
571 gen_movl_T1_reg(s, rd);
572 else
573 gen_movl_T0_reg(s, rd);
574 gen_op_vfp_mdrr();
575 gen_mov_vreg_F0(dp, rn);
576 }
577 } else {
578 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
579 if (insn & (1 << 20)) {
580 /* vfp->arm */
581 if (insn & (1 << 21)) {
582 /* system register */
583 rn >>= 1;
584 switch (rn) {
585 case ARM_VFP_FPSID:
586 case ARM_VFP_FPEXC:
587 case ARM_VFP_FPINST:
588 case ARM_VFP_FPINST2:
589 gen_op_vfp_movl_T0_xreg(rn);
590 break;
591 case ARM_VFP_FPSCR:
592 if (rd == 15)
593 gen_op_vfp_movl_T0_fpscr_flags();
594 else
595 gen_op_vfp_movl_T0_fpscr();
596 break;
597 default:
598 return 1;
599 }
600 } else {
601 gen_mov_F0_vreg(0, rn);
602 gen_op_vfp_mrs();
603 }
604 if (rd == 15) {
605 /* Set the 4 flag bits in the CPSR. */
606 gen_op_movl_cpsr_T0(0xf0000000);
607 } else
608 gen_movl_reg_T0(s, rd);
609 } else {
610 /* arm->vfp */
611 gen_movl_T0_reg(s, rd);
612 if (insn & (1 << 21)) {
613 rn >>= 1;
614 /* system register */
615 switch (rn) {
616 case ARM_VFP_FPSID:
617 /* Writes are ignored. */
618 break;
619 case ARM_VFP_FPSCR:
620 gen_op_vfp_movl_fpscr_T0();
621 gen_lookup_tb(s);
622 break;
623 case ARM_VFP_FPEXC:
624 gen_op_vfp_movl_xreg_T0(rn);
625 gen_lookup_tb(s);
626 break;
627 case ARM_VFP_FPINST:
628 case ARM_VFP_FPINST2:
629 gen_op_vfp_movl_xreg_T0(rn);
630 break;
631 default:
632 return 1;
633 }
634 } else {
635 gen_op_vfp_msr();
636 gen_mov_vreg_F0(0, rn);
637 }
638 }
639 }
640 } else {
641 /* data processing */
642 /* The opcode is in bits 23, 21, 20 and 6. */
643 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
644 if (dp) {
645 if (op == 15) {
646 /* rn is opcode */
647 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
648 } else {
649 /* rn is register number */
650 if (insn & (1 << 7))
651 return 1;
652 rn = (insn >> 16) & 0xf;
653 }
654
655 if (op == 15 && (rn == 15 || rn > 17)) {
656 /* Integer or single precision destination. */
657 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
658 } else {
659 if (insn & (1 << 22))
660 return 1;
661 rd = (insn >> 12) & 0xf;
662 }
663
664 if (op == 15 && (rn == 16 || rn == 17)) {
665 /* Integer source. */
666 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
667 } else {
668 if (insn & (1 << 5))
669 return 1;
670 rm = insn & 0xf;
671 }
672 } else {
673 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
674 if (op == 15 && rn == 15) {
675 /* Double precision destination. */
676 if (insn & (1 << 22))
677 return 1;
678 rd = (insn >> 12) & 0xf;
679 } else
680 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
681 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
682 }
683
684 veclen = env->vfp.vec_len;
685 if (op == 15 && rn > 3)
686 veclen = 0;
687
688 /* Shut up compiler warnings. */
689 delta_m = 0;
690 delta_d = 0;
691 bank_mask = 0;
692
693 if (veclen > 0) {
694 if (dp)
695 bank_mask = 0xc;
696 else
697 bank_mask = 0x18;
698
699 /* Figure out what type of vector operation this is. */
700 if ((rd & bank_mask) == 0) {
701 /* scalar */
702 veclen = 0;
703 } else {
704 if (dp)
705 delta_d = (env->vfp.vec_stride >> 1) + 1;
706 else
707 delta_d = env->vfp.vec_stride + 1;
708
709 if ((rm & bank_mask) == 0) {
710 /* mixed scalar/vector */
711 delta_m = 0;
712 } else {
713 /* vector */
714 delta_m = delta_d;
715 }
716 }
717 }
718
719 /* Load the initial operands. */
720 if (op == 15) {
721 switch (rn) {
722 case 16:
723 case 17:
724 /* Integer source */
725 gen_mov_F0_vreg(0, rm);
726 break;
727 case 8:
728 case 9:
729 /* Compare */
730 gen_mov_F0_vreg(dp, rd);
731 gen_mov_F1_vreg(dp, rm);
732 break;
733 case 10:
734 case 11:
735 /* Compare with zero */
736 gen_mov_F0_vreg(dp, rd);
737 gen_vfp_F1_ld0(dp);
738 break;
739 default:
740 /* One source operand. */
741 gen_mov_F0_vreg(dp, rm);
742 }
743 } else {
744 /* Two source operands. */
745 gen_mov_F0_vreg(dp, rn);
746 gen_mov_F1_vreg(dp, rm);
747 }
748
749 for (;;) {
750 /* Perform the calculation. */
751 switch (op) {
752 case 0: /* mac: fd + (fn * fm) */
753 gen_vfp_mul(dp);
754 gen_mov_F1_vreg(dp, rd);
755 gen_vfp_add(dp);
756 break;
757 case 1: /* nmac: fd - (fn * fm) */
758 gen_vfp_mul(dp);
759 gen_vfp_neg(dp);
760 gen_mov_F1_vreg(dp, rd);
761 gen_vfp_add(dp);
762 break;
763 case 2: /* msc: -fd + (fn * fm) */
764 gen_vfp_mul(dp);
765 gen_mov_F1_vreg(dp, rd);
766 gen_vfp_sub(dp);
767 break;
768 case 3: /* nmsc: -fd - (fn * fm) */
769 gen_vfp_mul(dp);
770 gen_mov_F1_vreg(dp, rd);
771 gen_vfp_add(dp);
772 gen_vfp_neg(dp);
773 break;
774 case 4: /* mul: fn * fm */
775 gen_vfp_mul(dp);
776 break;
777 case 5: /* nmul: -(fn * fm) */
778 gen_vfp_mul(dp);
779 gen_vfp_neg(dp);
780 break;
781 case 6: /* add: fn + fm */
782 gen_vfp_add(dp);
783 break;
784 case 7: /* sub: fn - fm */
785 gen_vfp_sub(dp);
786 break;
787 case 8: /* div: fn / fm */
788 gen_vfp_div(dp);
789 break;
790 case 15: /* extension space */
791 switch (rn) {
792 case 0: /* cpy */
793 /* no-op */
794 break;
795 case 1: /* abs */
796 gen_vfp_abs(dp);
797 break;
798 case 2: /* neg */
799 gen_vfp_neg(dp);
800 break;
801 case 3: /* sqrt */
802 gen_vfp_sqrt(dp);
803 break;
804 case 8: /* cmp */
805 gen_vfp_cmp(dp);
806 break;
807 case 9: /* cmpe */
808 gen_vfp_cmpe(dp);
809 break;
810 case 10: /* cmpz */
811 gen_vfp_cmp(dp);
812 break;
813 case 11: /* cmpez */
814 gen_vfp_F1_ld0(dp);
815 gen_vfp_cmpe(dp);
816 break;
817 case 15: /* single<->double conversion */
818 if (dp)
819 gen_op_vfp_fcvtsd();
820 else
821 gen_op_vfp_fcvtds();
822 break;
823 case 16: /* fuito */
824 gen_vfp_uito(dp);
825 break;
826 case 17: /* fsito */
827 gen_vfp_sito(dp);
828 break;
829 case 24: /* ftoui */
830 gen_vfp_toui(dp);
831 break;
832 case 25: /* ftouiz */
833 gen_vfp_touiz(dp);
834 break;
835 case 26: /* ftosi */
836 gen_vfp_tosi(dp);
837 break;
838 case 27: /* ftosiz */
839 gen_vfp_tosiz(dp);
840 break;
841 default: /* undefined */
842 printf ("rn:%d\n", rn);
843 return 1;
844 }
845 break;
846 default: /* undefined */
847 printf ("op:%d\n", op);
848 return 1;
849 }
850
851 /* Write back the result. */
852 if (op == 15 && (rn >= 8 && rn <= 11))
853 ; /* Comparison, do nothing. */
854 else if (op == 15 && rn > 17)
855 /* Integer result. */
856 gen_mov_vreg_F0(0, rd);
857 else if (op == 15 && rn == 15)
858 /* conversion */
859 gen_mov_vreg_F0(!dp, rd);
860 else
861 gen_mov_vreg_F0(dp, rd);
862
863 /* break out of the loop if we have finished */
864 if (veclen == 0)
865 break;
866
867 if (op == 15 && delta_m == 0) {
868 /* single source one-many */
869 while (veclen--) {
870 rd = ((rd + delta_d) & (bank_mask - 1))
871 | (rd & bank_mask);
872 gen_mov_vreg_F0(dp, rd);
873 }
874 break;
875 }
876 /* Setup the next operands. */
877 veclen--;
878 rd = ((rd + delta_d) & (bank_mask - 1))
879 | (rd & bank_mask);
880
881 if (op == 15) {
882 /* One source operand. */
883 rm = ((rm + delta_m) & (bank_mask - 1))
884 | (rm & bank_mask);
885 gen_mov_F0_vreg(dp, rm);
886 } else {
887 /* Two source operands. */
888 rn = ((rn + delta_d) & (bank_mask - 1))
889 | (rn & bank_mask);
890 gen_mov_F0_vreg(dp, rn);
891 if (delta_m) {
892 rm = ((rm + delta_m) & (bank_mask - 1))
893 | (rm & bank_mask);
894 gen_mov_F1_vreg(dp, rm);
895 }
896 }
897 }
898 }
899 break;
900 case 0xc:
901 case 0xd:
902 if (dp && (insn & (1 << 22))) {
903 /* two-register transfer */
904 rn = (insn >> 16) & 0xf;
905 rd = (insn >> 12) & 0xf;
906 if (dp) {
907 if (insn & (1 << 5))
908 return 1;
909 rm = insn & 0xf;
910 } else
911 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
912
913 if (insn & (1 << 20)) {
914 /* vfp->arm */
915 if (dp) {
916 gen_mov_F0_vreg(1, rm);
917 gen_op_vfp_mrrd();
918 gen_movl_reg_T0(s, rd);
919 gen_movl_reg_T1(s, rn);
920 } else {
921 gen_mov_F0_vreg(0, rm);
922 gen_op_vfp_mrs();
923 gen_movl_reg_T0(s, rn);
924 gen_mov_F0_vreg(0, rm + 1);
925 gen_op_vfp_mrs();
926 gen_movl_reg_T0(s, rd);
927 }
928 } else {
929 /* arm->vfp */
930 if (dp) {
931 gen_movl_T0_reg(s, rd);
932 gen_movl_T1_reg(s, rn);
933 gen_op_vfp_mdrr();
934 gen_mov_vreg_F0(1, rm);
935 } else {
936 gen_movl_T0_reg(s, rn);
937 gen_op_vfp_msr();
938 gen_mov_vreg_F0(0, rm);
939 gen_movl_T0_reg(s, rd);
940 gen_op_vfp_msr();
941 gen_mov_vreg_F0(0, rm + 1);
942 }
943 }
944 } else {
945 /* Load/store */
946 rn = (insn >> 16) & 0xf;
947 if (dp)
948 rd = (insn >> 12) & 0xf;
949 else
950 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
951 gen_movl_T1_reg(s, rn);
952 if ((insn & 0x01200000) == 0x01000000) {
953 /* Single load/store */
954 offset = (insn & 0xff) << 2;
955 if ((insn & (1 << 23)) == 0)
956 offset = -offset;
957 gen_op_addl_T1_im(offset);
958 if (insn & (1 << 20)) {
959 gen_vfp_ld(s, dp);
960 gen_mov_vreg_F0(dp, rd);
961 } else {
962 gen_mov_F0_vreg(dp, rd);
963 gen_vfp_st(s, dp);
964 }
965 } else {
966 /* load/store multiple */
967 if (dp)
968 n = (insn >> 1) & 0x7f;
969 else
970 n = insn & 0xff;
971
972 if (insn & (1 << 24)) /* pre-decrement */
973 gen_op_addl_T1_im(-((insn & 0xff) << 2));
974
975 if (dp)
976 offset = 8;
977 else
978 offset = 4;
979 for (i = 0; i < n; i++) {
980 if (insn & (1 << 20)) {
981 /* load */
982 gen_vfp_ld(s, dp);
983 gen_mov_vreg_F0(dp, rd + i);
984 } else {
985 /* store */
986 gen_mov_F0_vreg(dp, rd + i);
987 gen_vfp_st(s, dp);
988 }
989 gen_op_addl_T1_im(offset);
990 }
991 if (insn & (1 << 21)) {
992 /* writeback */
993 if (insn & (1 << 24))
994 offset = -offset * n;
995 else if (dp && (insn & 1))
996 offset = 4;
997 else
998 offset = 0;
999
1000 if (offset != 0)
1001 gen_op_addl_T1_im(offset);
1002 gen_movl_reg_T1(s, rn);
1003 }
1004 }
1005 }
1006 break;
1007 default:
1008 /* Should never happen. */
1009 return 1;
1010 }
1011 return 0;
1012 }
1013
1014 static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
1015 {
1016 TranslationBlock *tb;
1017
1018 tb = s->tb;
1019 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
1020 if (n == 0)
1021 gen_op_goto_tb0(TBPARAM(tb));
1022 else
1023 gen_op_goto_tb1(TBPARAM(tb));
1024 gen_op_movl_T0_im(dest);
1025 gen_op_movl_r15_T0();
1026 gen_op_movl_T0_im((long)tb + n);
1027 gen_op_exit_tb();
1028 } else {
1029 gen_op_movl_T0_im(dest);
1030 gen_op_movl_r15_T0();
1031 gen_op_movl_T0_0();
1032 gen_op_exit_tb();
1033 }
1034 }
1035
1036 static inline void gen_jmp (DisasContext *s, uint32_t dest)
1037 {
1038 if (__builtin_expect(s->singlestep_enabled, 0)) {
1039 /* An indirect jump so that we still trigger the debug exception. */
1040 if (s->thumb)
1041 dest |= 1;
1042 gen_op_movl_T0_im(dest);
1043 gen_bx(s);
1044 } else {
1045 gen_goto_tb(s, 0, dest);
1046 s->is_jmp = DISAS_TB_JUMP;
1047 }
1048 }
1049
1050 static inline void gen_mulxy(int x, int y)
1051 {
1052 if (x)
1053 gen_op_sarl_T0_im(16);
1054 else
1055 gen_op_sxth_T0();
1056 if (y)
1057 gen_op_sarl_T1_im(16);
1058 else
1059 gen_op_sxth_T1();
1060 gen_op_mul_T0_T1();
1061 }
1062
1063 /* Return the mask of PSR bits set by a MSR instruction. */
1064 static uint32_t msr_mask(DisasContext *s, int flags, int spsr) {
1065 uint32_t mask;
1066
1067 mask = 0;
1068 if (flags & (1 << 0))
1069 mask |= 0xff;
1070 if (flags & (1 << 1))
1071 mask |= 0xff00;
1072 if (flags & (1 << 2))
1073 mask |= 0xff0000;
1074 if (flags & (1 << 3))
1075 mask |= 0xff000000;
1076 /* Mask out undefined bits. */
1077 mask &= 0xf90f03ff;
1078 /* Mask out state bits. */
1079 if (!spsr)
1080 mask &= ~0x01000020;
1081 /* Mask out privileged bits. */
1082 if (IS_USER(s))
1083 mask &= 0xf80f0200;
1084 return mask;
1085 }
1086
1087 /* Returns nonzero if access to the PSR is not permitted. */
1088 static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
1089 {
1090 if (spsr) {
1091 /* ??? This is also undefined in system mode. */
1092 if (IS_USER(s))
1093 return 1;
1094 gen_op_movl_spsr_T0(mask);
1095 } else {
1096 gen_op_movl_cpsr_T0(mask);
1097 }
1098 gen_lookup_tb(s);
1099 return 0;
1100 }
1101
1102 static void gen_exception_return(DisasContext *s)
1103 {
1104 gen_op_movl_reg_TN[0][15]();
1105 gen_op_movl_T0_spsr();
1106 gen_op_movl_cpsr_T0(0xffffffff);
1107 s->is_jmp = DISAS_UPDATE;
1108 }
1109
1110 static void disas_arm_insn(CPUState * env, DisasContext *s)
1111 {
1112 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
1113
1114 insn = ldl_code(s->pc);
1115 s->pc += 4;
1116
1117 cond = insn >> 28;
1118 if (cond == 0xf){
1119 /* Unconditional instructions. */
1120 if ((insn & 0x0d70f000) == 0x0550f000)
1121 return; /* PLD */
1122 else if ((insn & 0x0e000000) == 0x0a000000) {
1123 /* branch link and change to thumb (blx <offset>) */
1124 int32_t offset;
1125
1126 val = (uint32_t)s->pc;
1127 gen_op_movl_T0_im(val);
1128 gen_movl_reg_T0(s, 14);
1129 /* Sign-extend the 24-bit offset */
1130 offset = (((int32_t)insn) << 8) >> 8;
1131 /* offset * 4 + bit24 * 2 + (thumb bit) */
1132 val += (offset << 2) | ((insn >> 23) & 2) | 1;
1133 /* pipeline offset */
1134 val += 4;
1135 gen_op_movl_T0_im(val);
1136 gen_bx(s);
1137 return;
1138 } else if ((insn & 0x0fe00000) == 0x0c400000) {
1139 /* Coprocessor double register transfer. */
1140 } else if ((insn & 0x0f000010) == 0x0e000010) {
1141 /* Additional coprocessor register transfer. */
1142 } else if ((insn & 0x0ff10010) == 0x01000000) {
1143 /* cps (privileged) */
1144 } else if ((insn & 0x0ffffdff) == 0x01010000) {
1145 /* setend */
1146 if (insn & (1 << 9)) {
1147 /* BE8 mode not implemented. */
1148 goto illegal_op;
1149 }
1150 return;
1151 }
1152 goto illegal_op;
1153 }
1154 if (cond != 0xe) {
1155 /* if not always execute, we generate a conditional jump to
1156 next instruction */
1157 s->condlabel = gen_new_label();
1158 gen_test_cc[cond ^ 1](s->condlabel);
1159 s->condjmp = 1;
1160 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1161 //s->is_jmp = DISAS_JUMP_NEXT;
1162 }
1163 if ((insn & 0x0f900000) == 0x03000000) {
1164 if ((insn & 0x0fb0f000) != 0x0320f000)
1165 goto illegal_op;
1166 /* CPSR = immediate */
1167 val = insn & 0xff;
1168 shift = ((insn >> 8) & 0xf) * 2;
1169 if (shift)
1170 val = (val >> shift) | (val << (32 - shift));
1171 gen_op_movl_T0_im(val);
1172 i = ((insn & (1 << 22)) != 0);
1173 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1174 goto illegal_op;
1175 } else if ((insn & 0x0f900000) == 0x01000000
1176 && (insn & 0x00000090) != 0x00000090) {
1177 /* miscellaneous instructions */
1178 op1 = (insn >> 21) & 3;
1179 sh = (insn >> 4) & 0xf;
1180 rm = insn & 0xf;
1181 switch (sh) {
1182 case 0x0: /* move program status register */
1183 if (op1 & 1) {
1184 /* PSR = reg */
1185 gen_movl_T0_reg(s, rm);
1186 i = ((op1 & 2) != 0);
1187 if (gen_set_psr_T0(s, msr_mask(s, (insn >> 16) & 0xf, i), i))
1188 goto illegal_op;
1189 } else {
1190 /* reg = PSR */
1191 rd = (insn >> 12) & 0xf;
1192 if (op1 & 2) {
1193 if (IS_USER(s))
1194 goto illegal_op;
1195 gen_op_movl_T0_spsr();
1196 } else {
1197 gen_op_movl_T0_cpsr();
1198 }
1199 gen_movl_reg_T0(s, rd);
1200 }
1201 break;
1202 case 0x1:
1203 if (op1 == 1) {
1204 /* branch/exchange thumb (bx). */
1205 gen_movl_T0_reg(s, rm);
1206 gen_bx(s);
1207 } else if (op1 == 3) {
1208 /* clz */
1209 rd = (insn >> 12) & 0xf;
1210 gen_movl_T0_reg(s, rm);
1211 gen_op_clz_T0();
1212 gen_movl_reg_T0(s, rd);
1213 } else {
1214 goto illegal_op;
1215 }
1216 break;
1217 case 0x2:
1218 if (op1 == 1) {
1219 ARCH(5J); /* bxj */
1220 /* Trivial implementation equivalent to bx. */
1221 gen_movl_T0_reg(s, rm);
1222 gen_bx(s);
1223 } else {
1224 goto illegal_op;
1225 }
1226 break;
1227 case 0x3:
1228 if (op1 != 1)
1229 goto illegal_op;
1230
1231 /* branch link/exchange thumb (blx) */
1232 val = (uint32_t)s->pc;
1233 gen_op_movl_T0_im(val);
1234 gen_movl_reg_T0(s, 14);
1235 gen_movl_T0_reg(s, rm);
1236 gen_bx(s);
1237 break;
1238 case 0x5: /* saturating add/subtract */
1239 rd = (insn >> 12) & 0xf;
1240 rn = (insn >> 16) & 0xf;
1241 gen_movl_T0_reg(s, rm);
1242 gen_movl_T1_reg(s, rn);
1243 if (op1 & 2)
1244 gen_op_double_T1_saturate();
1245 if (op1 & 1)
1246 gen_op_subl_T0_T1_saturate();
1247 else
1248 gen_op_addl_T0_T1_saturate();
1249 gen_movl_reg_T0(s, rd);
1250 break;
1251 case 7: /* bkpt */
1252 gen_op_movl_T0_im((long)s->pc - 4);
1253 gen_op_movl_reg_TN[0][15]();
1254 gen_op_bkpt();
1255 s->is_jmp = DISAS_JUMP;
1256 break;
1257 case 0x8: /* signed multiply */
1258 case 0xa:
1259 case 0xc:
1260 case 0xe:
1261 rs = (insn >> 8) & 0xf;
1262 rn = (insn >> 12) & 0xf;
1263 rd = (insn >> 16) & 0xf;
1264 if (op1 == 1) {
1265 /* (32 * 16) >> 16 */
1266 gen_movl_T0_reg(s, rm);
1267 gen_movl_T1_reg(s, rs);
1268 if (sh & 4)
1269 gen_op_sarl_T1_im(16);
1270 else
1271 gen_op_sxth_T1();
1272 gen_op_imulw_T0_T1();
1273 if ((sh & 2) == 0) {
1274 gen_movl_T1_reg(s, rn);
1275 gen_op_addl_T0_T1_setq();
1276 }
1277 gen_movl_reg_T0(s, rd);
1278 } else {
1279 /* 16 * 16 */
1280 gen_movl_T0_reg(s, rm);
1281 gen_movl_T1_reg(s, rs);
1282 gen_mulxy(sh & 2, sh & 4);
1283 if (op1 == 2) {
1284 gen_op_signbit_T1_T0();
1285 gen_op_addq_T0_T1(rn, rd);
1286 gen_movl_reg_T0(s, rn);
1287 gen_movl_reg_T1(s, rd);
1288 } else {
1289 if (op1 == 0) {
1290 gen_movl_T1_reg(s, rn);
1291 gen_op_addl_T0_T1_setq();
1292 }
1293 gen_movl_reg_T0(s, rd);
1294 }
1295 }
1296 break;
1297 default:
1298 goto illegal_op;
1299 }
1300 } else if (((insn & 0x0e000000) == 0 &&
1301 (insn & 0x00000090) != 0x90) ||
1302 ((insn & 0x0e000000) == (1 << 25))) {
1303 int set_cc, logic_cc, shiftop;
1304
1305 op1 = (insn >> 21) & 0xf;
1306 set_cc = (insn >> 20) & 1;
1307 logic_cc = table_logic_cc[op1] & set_cc;
1308
1309 /* data processing instruction */
1310 if (insn & (1 << 25)) {
1311 /* immediate operand */
1312 val = insn & 0xff;
1313 shift = ((insn >> 8) & 0xf) * 2;
1314 if (shift)
1315 val = (val >> shift) | (val << (32 - shift));
1316 gen_op_movl_T1_im(val);
1317 if (logic_cc && shift)
1318 gen_op_mov_CF_T1();
1319 } else {
1320 /* register */
1321 rm = (insn) & 0xf;
1322 gen_movl_T1_reg(s, rm);
1323 shiftop = (insn >> 5) & 3;
1324 if (!(insn & (1 << 4))) {
1325 shift = (insn >> 7) & 0x1f;
1326 if (shift != 0) {
1327 if (logic_cc) {
1328 gen_shift_T1_im_cc[shiftop](shift);
1329 } else {
1330 gen_shift_T1_im[shiftop](shift);
1331 }
1332 } else if (shiftop != 0) {
1333 if (logic_cc) {
1334 gen_shift_T1_0_cc[shiftop]();
1335 } else {
1336 gen_shift_T1_0[shiftop]();
1337 }
1338 }
1339 } else {
1340 rs = (insn >> 8) & 0xf;
1341 gen_movl_T0_reg(s, rs);
1342 if (logic_cc) {
1343 gen_shift_T1_T0_cc[shiftop]();
1344 } else {
1345 gen_shift_T1_T0[shiftop]();
1346 }
1347 }
1348 }
1349 if (op1 != 0x0f && op1 != 0x0d) {
1350 rn = (insn >> 16) & 0xf;
1351 gen_movl_T0_reg(s, rn);
1352 }
1353 rd = (insn >> 12) & 0xf;
1354 switch(op1) {
1355 case 0x00:
1356 gen_op_andl_T0_T1();
1357 gen_movl_reg_T0(s, rd);
1358 if (logic_cc)
1359 gen_op_logic_T0_cc();
1360 break;
1361 case 0x01:
1362 gen_op_xorl_T0_T1();
1363 gen_movl_reg_T0(s, rd);
1364 if (logic_cc)
1365 gen_op_logic_T0_cc();
1366 break;
1367 case 0x02:
1368 if (set_cc && rd == 15) {
1369 /* SUBS r15, ... is used for exception return. */
1370 if (IS_USER(s))
1371 goto illegal_op;
1372 gen_op_subl_T0_T1_cc();
1373 gen_exception_return(s);
1374 } else {
1375 if (set_cc)
1376 gen_op_subl_T0_T1_cc();
1377 else
1378 gen_op_subl_T0_T1();
1379 gen_movl_reg_T0(s, rd);
1380 }
1381 break;
1382 case 0x03:
1383 if (set_cc)
1384 gen_op_rsbl_T0_T1_cc();
1385 else
1386 gen_op_rsbl_T0_T1();
1387 gen_movl_reg_T0(s, rd);
1388 break;
1389 case 0x04:
1390 if (set_cc)
1391 gen_op_addl_T0_T1_cc();
1392 else
1393 gen_op_addl_T0_T1();
1394 gen_movl_reg_T0(s, rd);
1395 break;
1396 case 0x05:
1397 if (set_cc)
1398 gen_op_adcl_T0_T1_cc();
1399 else
1400 gen_op_adcl_T0_T1();
1401 gen_movl_reg_T0(s, rd);
1402 break;
1403 case 0x06:
1404 if (set_cc)
1405 gen_op_sbcl_T0_T1_cc();
1406 else
1407 gen_op_sbcl_T0_T1();
1408 gen_movl_reg_T0(s, rd);
1409 break;
1410 case 0x07:
1411 if (set_cc)
1412 gen_op_rscl_T0_T1_cc();
1413 else
1414 gen_op_rscl_T0_T1();
1415 gen_movl_reg_T0(s, rd);
1416 break;
1417 case 0x08:
1418 if (set_cc) {
1419 gen_op_andl_T0_T1();
1420 gen_op_logic_T0_cc();
1421 }
1422 break;
1423 case 0x09:
1424 if (set_cc) {
1425 gen_op_xorl_T0_T1();
1426 gen_op_logic_T0_cc();
1427 }
1428 break;
1429 case 0x0a:
1430 if (set_cc) {
1431 gen_op_subl_T0_T1_cc();
1432 }
1433 break;
1434 case 0x0b:
1435 if (set_cc) {
1436 gen_op_addl_T0_T1_cc();
1437 }
1438 break;
1439 case 0x0c:
1440 gen_op_orl_T0_T1();
1441 gen_movl_reg_T0(s, rd);
1442 if (logic_cc)
1443 gen_op_logic_T0_cc();
1444 break;
1445 case 0x0d:
1446 if (logic_cc && rd == 15) {
1447 /* MOVS r15, ... is used for exception return. */
1448 if (IS_USER(s))
1449 goto illegal_op;
1450 gen_op_movl_T0_T1();
1451 gen_exception_return(s);
1452 } else {
1453 gen_movl_reg_T1(s, rd);
1454 if (logic_cc)
1455 gen_op_logic_T1_cc();
1456 }
1457 break;
1458 case 0x0e:
1459 gen_op_bicl_T0_T1();
1460 gen_movl_reg_T0(s, rd);
1461 if (logic_cc)
1462 gen_op_logic_T0_cc();
1463 break;
1464 default:
1465 case 0x0f:
1466 gen_op_notl_T1();
1467 gen_movl_reg_T1(s, rd);
1468 if (logic_cc)
1469 gen_op_logic_T1_cc();
1470 break;
1471 }
1472 } else {
1473 /* other instructions */
1474 op1 = (insn >> 24) & 0xf;
1475 switch(op1) {
1476 case 0x0:
1477 case 0x1:
1478 /* multiplies, extra load/stores */
1479 sh = (insn >> 5) & 3;
1480 if (sh == 0) {
1481 if (op1 == 0x0) {
1482 rd = (insn >> 16) & 0xf;
1483 rn = (insn >> 12) & 0xf;
1484 rs = (insn >> 8) & 0xf;
1485 rm = (insn) & 0xf;
1486 if (((insn >> 22) & 3) == 0) {
1487 /* 32 bit mul */
1488 gen_movl_T0_reg(s, rs);
1489 gen_movl_T1_reg(s, rm);
1490 gen_op_mul_T0_T1();
1491 if (insn & (1 << 21)) {
1492 gen_movl_T1_reg(s, rn);
1493 gen_op_addl_T0_T1();
1494 }
1495 if (insn & (1 << 20))
1496 gen_op_logic_T0_cc();
1497 gen_movl_reg_T0(s, rd);
1498 } else {
1499 /* 64 bit mul */
1500 gen_movl_T0_reg(s, rs);
1501 gen_movl_T1_reg(s, rm);
1502 if (insn & (1 << 22))
1503 gen_op_imull_T0_T1();
1504 else
1505 gen_op_mull_T0_T1();
1506 if (insn & (1 << 21)) /* mult accumulate */
1507 gen_op_addq_T0_T1(rn, rd);
1508 if (!(insn & (1 << 23))) { /* double accumulate */
1509 ARCH(6);
1510 gen_op_addq_lo_T0_T1(rn);
1511 gen_op_addq_lo_T0_T1(rd);
1512 }
1513 if (insn & (1 << 20))
1514 gen_op_logicq_cc();
1515 gen_movl_reg_T0(s, rn);
1516 gen_movl_reg_T1(s, rd);
1517 }
1518 } else {
1519 rn = (insn >> 16) & 0xf;
1520 rd = (insn >> 12) & 0xf;
1521 if (insn & (1 << 23)) {
1522 /* load/store exclusive */
1523 goto illegal_op;
1524 } else {
1525 /* SWP instruction */
1526 rm = (insn) & 0xf;
1527
1528 gen_movl_T0_reg(s, rm);
1529 gen_movl_T1_reg(s, rn);
1530 if (insn & (1 << 22)) {
1531 gen_ldst(swpb, s);
1532 } else {
1533 gen_ldst(swpl, s);
1534 }
1535 gen_movl_reg_T0(s, rd);
1536 }
1537 }
1538 } else {
1539 int address_offset;
1540 /* Misc load/store */
1541 rn = (insn >> 16) & 0xf;
1542 rd = (insn >> 12) & 0xf;
1543 gen_movl_T1_reg(s, rn);
1544 if (insn & (1 << 24))
1545 gen_add_datah_offset(s, insn, 0);
1546 address_offset = 0;
1547 if (insn & (1 << 20)) {
1548 /* load */
1549 switch(sh) {
1550 case 1:
1551 gen_ldst(lduw, s);
1552 break;
1553 case 2:
1554 gen_ldst(ldsb, s);
1555 break;
1556 default:
1557 case 3:
1558 gen_ldst(ldsw, s);
1559 break;
1560 }
1561 gen_movl_reg_T0(s, rd);
1562 } else if (sh & 2) {
1563 /* doubleword */
1564 if (sh & 1) {
1565 /* store */
1566 gen_movl_T0_reg(s, rd);
1567 gen_ldst(stl, s);
1568 gen_op_addl_T1_im(4);
1569 gen_movl_T0_reg(s, rd + 1);
1570 gen_ldst(stl, s);
1571 } else {
1572 /* load */
1573 gen_ldst(ldl, s);
1574 gen_movl_reg_T0(s, rd);
1575 gen_op_addl_T1_im(4);
1576 gen_ldst(ldl, s);
1577 gen_movl_reg_T0(s, rd + 1);
1578 }
1579 address_offset = -4;
1580 } else {
1581 /* store */
1582 gen_movl_T0_reg(s, rd);
1583 gen_ldst(stw, s);
1584 }
1585 if (!(insn & (1 << 24))) {
1586 gen_add_datah_offset(s, insn, address_offset);
1587 gen_movl_reg_T1(s, rn);
1588 } else if (insn & (1 << 21)) {
1589 if (address_offset)
1590 gen_op_addl_T1_im(address_offset);
1591 gen_movl_reg_T1(s, rn);
1592 }
1593 }
1594 break;
1595 case 0x4:
1596 case 0x5:
1597 case 0x6:
1598 case 0x7:
1599 /* Check for undefined extension instructions
1600 * per the ARM Bible IE:
1601 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
1602 */
1603 sh = (0xf << 20) | (0xf << 4);
1604 if (op1 == 0x7 && ((insn & sh) == sh))
1605 {
1606 goto illegal_op;
1607 }
1608 /* load/store byte/word */
1609 rn = (insn >> 16) & 0xf;
1610 rd = (insn >> 12) & 0xf;
1611 gen_movl_T1_reg(s, rn);
1612 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
1613 if (insn & (1 << 24))
1614 gen_add_data_offset(s, insn);
1615 if (insn & (1 << 20)) {
1616 /* load */
1617 s->is_mem = 1;
1618 #if defined(CONFIG_USER_ONLY)
1619 if (insn & (1 << 22))
1620 gen_op_ldub_raw();
1621 else
1622 gen_op_ldl_raw();
1623 #else
1624 if (insn & (1 << 22)) {
1625 if (i)
1626 gen_op_ldub_user();
1627 else
1628 gen_op_ldub_kernel();
1629 } else {
1630 if (i)
1631 gen_op_ldl_user();
1632 else
1633 gen_op_ldl_kernel();
1634 }
1635 #endif
1636 if (rd == 15)
1637 gen_bx(s);
1638 else
1639 gen_movl_reg_T0(s, rd);
1640 } else {
1641 /* store */
1642 gen_movl_T0_reg(s, rd);
1643 #if defined(CONFIG_USER_ONLY)
1644 if (insn & (1 << 22))
1645 gen_op_stb_raw();
1646 else
1647 gen_op_stl_raw();
1648 #else
1649 if (insn & (1 << 22)) {
1650 if (i)
1651 gen_op_stb_user();
1652 else
1653 gen_op_stb_kernel();
1654 } else {
1655 if (i)
1656 gen_op_stl_user();
1657 else
1658 gen_op_stl_kernel();
1659 }
1660 #endif
1661 }
1662 if (!(insn & (1 << 24))) {
1663 gen_add_data_offset(s, insn);
1664 gen_movl_reg_T1(s, rn);
1665 } else if (insn & (1 << 21))
1666 gen_movl_reg_T1(s, rn); {
1667 }
1668 break;
1669 case 0x08:
1670 case 0x09:
1671 {
1672 int j, n, user, loaded_base;
1673 /* load/store multiple words */
1674 /* XXX: store correct base if write back */
1675 user = 0;
1676 if (insn & (1 << 22)) {
1677 if (IS_USER(s))
1678 goto illegal_op; /* only usable in supervisor mode */
1679
1680 if ((insn & (1 << 15)) == 0)
1681 user = 1;
1682 }
1683 rn = (insn >> 16) & 0xf;
1684 gen_movl_T1_reg(s, rn);
1685
1686 /* compute total size */
1687 loaded_base = 0;
1688 n = 0;
1689 for(i=0;i<16;i++) {
1690 if (insn & (1 << i))
1691 n++;
1692 }
1693 /* XXX: test invalid n == 0 case ? */
1694 if (insn & (1 << 23)) {
1695 if (insn & (1 << 24)) {
1696 /* pre increment */
1697 gen_op_addl_T1_im(4);
1698 } else {
1699 /* post increment */
1700 }
1701 } else {
1702 if (insn & (1 << 24)) {
1703 /* pre decrement */
1704 gen_op_addl_T1_im(-(n * 4));
1705 } else {
1706 /* post decrement */
1707 if (n != 1)
1708 gen_op_addl_T1_im(-((n - 1) * 4));
1709 }
1710 }
1711 j = 0;
1712 for(i=0;i<16;i++) {
1713 if (insn & (1 << i)) {
1714 if (insn & (1 << 20)) {
1715 /* load */
1716 gen_ldst(ldl, s);
1717 if (i == 15) {
1718 gen_bx(s);
1719 } else if (user) {
1720 gen_op_movl_user_T0(i);
1721 } else if (i == rn) {
1722 gen_op_movl_T2_T0();
1723 loaded_base = 1;
1724 } else {
1725 gen_movl_reg_T0(s, i);
1726 }
1727 } else {
1728 /* store */
1729 if (i == 15) {
1730 /* special case: r15 = PC + 12 */
1731 val = (long)s->pc + 8;
1732 gen_op_movl_TN_im[0](val);
1733 } else if (user) {
1734 gen_op_movl_T0_user(i);
1735 } else {
1736 gen_movl_T0_reg(s, i);
1737 }
1738 gen_ldst(stl, s);
1739 }
1740 j++;
1741 /* no need to add after the last transfer */
1742 if (j != n)
1743 gen_op_addl_T1_im(4);
1744 }
1745 }
1746 if (insn & (1 << 21)) {
1747 /* write back */
1748 if (insn & (1 << 23)) {
1749 if (insn & (1 << 24)) {
1750 /* pre increment */
1751 } else {
1752 /* post increment */
1753 gen_op_addl_T1_im(4);
1754 }
1755 } else {
1756 if (insn & (1 << 24)) {
1757 /* pre decrement */
1758 if (n != 1)
1759 gen_op_addl_T1_im(-((n - 1) * 4));
1760 } else {
1761 /* post decrement */
1762 gen_op_addl_T1_im(-(n * 4));
1763 }
1764 }
1765 gen_movl_reg_T1(s, rn);
1766 }
1767 if (loaded_base) {
1768 gen_op_movl_T0_T2();
1769 gen_movl_reg_T0(s, rn);
1770 }
1771 if ((insn & (1 << 22)) && !user) {
1772 /* Restore CPSR from SPSR. */
1773 gen_op_movl_T0_spsr();
1774 gen_op_movl_cpsr_T0(0xffffffff);
1775 s->is_jmp = DISAS_UPDATE;
1776 }
1777 }
1778 break;
1779 case 0xa:
1780 case 0xb:
1781 {
1782 int32_t offset;
1783
1784 /* branch (and link) */
1785 val = (int32_t)s->pc;
1786 if (insn & (1 << 24)) {
1787 gen_op_movl_T0_im(val);
1788 gen_op_movl_reg_TN[0][14]();
1789 }
1790 offset = (((int32_t)insn << 8) >> 8);
1791 val += (offset << 2) + 4;
1792 gen_jmp(s, val);
1793 }
1794 break;
1795 case 0xc:
1796 case 0xd:
1797 case 0xe:
1798 /* Coprocessor. */
1799 op1 = (insn >> 8) & 0xf;
1800 switch (op1) {
1801 case 10:
1802 case 11:
1803 if (disas_vfp_insn (env, s, insn))
1804 goto illegal_op;
1805 break;
1806 case 15:
1807 if (disas_cp15_insn (s, insn))
1808 goto illegal_op;
1809 break;
1810 default:
1811 /* unknown coprocessor. */
1812 goto illegal_op;
1813 }
1814 break;
1815 case 0xf:
1816 /* swi */
1817 gen_op_movl_T0_im((long)s->pc);
1818 gen_op_movl_reg_TN[0][15]();
1819 gen_op_swi();
1820 s->is_jmp = DISAS_JUMP;
1821 break;
1822 default:
1823 illegal_op:
1824 gen_op_movl_T0_im((long)s->pc - 4);
1825 gen_op_movl_reg_TN[0][15]();
1826 gen_op_undef_insn();
1827 s->is_jmp = DISAS_JUMP;
1828 break;
1829 }
1830 }
1831 }
1832
1833 static void disas_thumb_insn(DisasContext *s)
1834 {
1835 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1836 int32_t offset;
1837 int i;
1838
1839 insn = lduw_code(s->pc);
1840 s->pc += 2;
1841
1842 switch (insn >> 12) {
1843 case 0: case 1:
1844 rd = insn & 7;
1845 op = (insn >> 11) & 3;
1846 if (op == 3) {
1847 /* add/subtract */
1848 rn = (insn >> 3) & 7;
1849 gen_movl_T0_reg(s, rn);
1850 if (insn & (1 << 10)) {
1851 /* immediate */
1852 gen_op_movl_T1_im((insn >> 6) & 7);
1853 } else {
1854 /* reg */
1855 rm = (insn >> 6) & 7;
1856 gen_movl_T1_reg(s, rm);
1857 }
1858 if (insn & (1 << 9))
1859 gen_op_subl_T0_T1_cc();
1860 else
1861 gen_op_addl_T0_T1_cc();
1862 gen_movl_reg_T0(s, rd);
1863 } else {
1864 /* shift immediate */
1865 rm = (insn >> 3) & 7;
1866 shift = (insn >> 6) & 0x1f;
1867 gen_movl_T0_reg(s, rm);
1868 gen_shift_T0_im_thumb[op](shift);
1869 gen_movl_reg_T0(s, rd);
1870 }
1871 break;
1872 case 2: case 3:
1873 /* arithmetic large immediate */
1874 op = (insn >> 11) & 3;
1875 rd = (insn >> 8) & 0x7;
1876 if (op == 0) {
1877 gen_op_movl_T0_im(insn & 0xff);
1878 } else {
1879 gen_movl_T0_reg(s, rd);
1880 gen_op_movl_T1_im(insn & 0xff);
1881 }
1882 switch (op) {
1883 case 0: /* mov */
1884 gen_op_logic_T0_cc();
1885 break;
1886 case 1: /* cmp */
1887 gen_op_subl_T0_T1_cc();
1888 break;
1889 case 2: /* add */
1890 gen_op_addl_T0_T1_cc();
1891 break;
1892 case 3: /* sub */
1893 gen_op_subl_T0_T1_cc();
1894 break;
1895 }
1896 if (op != 1)
1897 gen_movl_reg_T0(s, rd);
1898 break;
1899 case 4:
1900 if (insn & (1 << 11)) {
1901 rd = (insn >> 8) & 7;
1902 /* load pc-relative. Bit 1 of PC is ignored. */
1903 val = s->pc + 2 + ((insn & 0xff) * 4);
1904 val &= ~(uint32_t)2;
1905 gen_op_movl_T1_im(val);
1906 gen_ldst(ldl, s);
1907 gen_movl_reg_T0(s, rd);
1908 break;
1909 }
1910 if (insn & (1 << 10)) {
1911 /* data processing extended or blx */
1912 rd = (insn & 7) | ((insn >> 4) & 8);
1913 rm = (insn >> 3) & 0xf;
1914 op = (insn >> 8) & 3;
1915 switch (op) {
1916 case 0: /* add */
1917 gen_movl_T0_reg(s, rd);
1918 gen_movl_T1_reg(s, rm);
1919 gen_op_addl_T0_T1();
1920 gen_movl_reg_T0(s, rd);
1921 break;
1922 case 1: /* cmp */
1923 gen_movl_T0_reg(s, rd);
1924 gen_movl_T1_reg(s, rm);
1925 gen_op_subl_T0_T1_cc();
1926 break;
1927 case 2: /* mov/cpy */
1928 gen_movl_T0_reg(s, rm);
1929 gen_movl_reg_T0(s, rd);
1930 break;
1931 case 3:/* branch [and link] exchange thumb register */
1932 if (insn & (1 << 7)) {
1933 val = (uint32_t)s->pc | 1;
1934 gen_op_movl_T1_im(val);
1935 gen_movl_reg_T1(s, 14);
1936 }
1937 gen_movl_T0_reg(s, rm);
1938 gen_bx(s);
1939 break;
1940 }
1941 break;
1942 }
1943
1944 /* data processing register */
1945 rd = insn & 7;
1946 rm = (insn >> 3) & 7;
1947 op = (insn >> 6) & 0xf;
1948 if (op == 2 || op == 3 || op == 4 || op == 7) {
1949 /* the shift/rotate ops want the operands backwards */
1950 val = rm;
1951 rm = rd;
1952 rd = val;
1953 val = 1;
1954 } else {
1955 val = 0;
1956 }
1957
1958 if (op == 9) /* neg */
1959 gen_op_movl_T0_im(0);
1960 else if (op != 0xf) /* mvn doesn't read its first operand */
1961 gen_movl_T0_reg(s, rd);
1962
1963 gen_movl_T1_reg(s, rm);
1964 switch (op) {
1965 case 0x0: /* and */
1966 gen_op_andl_T0_T1();
1967 gen_op_logic_T0_cc();
1968 break;
1969 case 0x1: /* eor */
1970 gen_op_xorl_T0_T1();
1971 gen_op_logic_T0_cc();
1972 break;
1973 case 0x2: /* lsl */
1974 gen_op_shll_T1_T0_cc();
1975 gen_op_logic_T1_cc();
1976 break;
1977 case 0x3: /* lsr */
1978 gen_op_shrl_T1_T0_cc();
1979 gen_op_logic_T1_cc();
1980 break;
1981 case 0x4: /* asr */
1982 gen_op_sarl_T1_T0_cc();
1983 gen_op_logic_T1_cc();
1984 break;
1985 case 0x5: /* adc */
1986 gen_op_adcl_T0_T1_cc();
1987 break;
1988 case 0x6: /* sbc */
1989 gen_op_sbcl_T0_T1_cc();
1990 break;
1991 case 0x7: /* ror */
1992 gen_op_rorl_T1_T0_cc();
1993 gen_op_logic_T1_cc();
1994 break;
1995 case 0x8: /* tst */
1996 gen_op_andl_T0_T1();
1997 gen_op_logic_T0_cc();
1998 rd = 16;
1999 break;
2000 case 0x9: /* neg */
2001 gen_op_subl_T0_T1_cc();
2002 break;
2003 case 0xa: /* cmp */
2004 gen_op_subl_T0_T1_cc();
2005 rd = 16;
2006 break;
2007 case 0xb: /* cmn */
2008 gen_op_addl_T0_T1_cc();
2009 rd = 16;
2010 break;
2011 case 0xc: /* orr */
2012 gen_op_orl_T0_T1();
2013 gen_op_logic_T0_cc();
2014 break;
2015 case 0xd: /* mul */
2016 gen_op_mull_T0_T1();
2017 gen_op_logic_T0_cc();
2018 break;
2019 case 0xe: /* bic */
2020 gen_op_bicl_T0_T1();
2021 gen_op_logic_T0_cc();
2022 break;
2023 case 0xf: /* mvn */
2024 gen_op_notl_T1();
2025 gen_op_logic_T1_cc();
2026 val = 1;
2027 rm = rd;
2028 break;
2029 }
2030 if (rd != 16) {
2031 if (val)
2032 gen_movl_reg_T1(s, rm);
2033 else
2034 gen_movl_reg_T0(s, rd);
2035 }
2036 break;
2037
2038 case 5:
2039 /* load/store register offset. */
2040 rd = insn & 7;
2041 rn = (insn >> 3) & 7;
2042 rm = (insn >> 6) & 7;
2043 op = (insn >> 9) & 7;
2044 gen_movl_T1_reg(s, rn);
2045 gen_movl_T2_reg(s, rm);
2046 gen_op_addl_T1_T2();
2047
2048 if (op < 3) /* store */
2049 gen_movl_T0_reg(s, rd);
2050
2051 switch (op) {
2052 case 0: /* str */
2053 gen_ldst(stl, s);
2054 break;
2055 case 1: /* strh */
2056 gen_ldst(stw, s);
2057 break;
2058 case 2: /* strb */
2059 gen_ldst(stb, s);
2060 break;
2061 case 3: /* ldrsb */
2062 gen_ldst(ldsb, s);
2063 break;
2064 case 4: /* ldr */
2065 gen_ldst(ldl, s);
2066 break;
2067 case 5: /* ldrh */
2068 gen_ldst(lduw, s);
2069 break;
2070 case 6: /* ldrb */
2071 gen_ldst(ldub, s);
2072 break;
2073 case 7: /* ldrsh */
2074 gen_ldst(ldsw, s);
2075 break;
2076 }
2077 if (op >= 3) /* load */
2078 gen_movl_reg_T0(s, rd);
2079 break;
2080
2081 case 6:
2082 /* load/store word immediate offset */
2083 rd = insn & 7;
2084 rn = (insn >> 3) & 7;
2085 gen_movl_T1_reg(s, rn);
2086 val = (insn >> 4) & 0x7c;
2087 gen_op_movl_T2_im(val);
2088 gen_op_addl_T1_T2();
2089
2090 if (insn & (1 << 11)) {
2091 /* load */
2092 gen_ldst(ldl, s);
2093 gen_movl_reg_T0(s, rd);
2094 } else {
2095 /* store */
2096 gen_movl_T0_reg(s, rd);
2097 gen_ldst(stl, s);
2098 }
2099 break;
2100
2101 case 7:
2102 /* load/store byte immediate offset */
2103 rd = insn & 7;
2104 rn = (insn >> 3) & 7;
2105 gen_movl_T1_reg(s, rn);
2106 val = (insn >> 6) & 0x1f;
2107 gen_op_movl_T2_im(val);
2108 gen_op_addl_T1_T2();
2109
2110 if (insn & (1 << 11)) {
2111 /* load */
2112 gen_ldst(ldub, s);
2113 gen_movl_reg_T0(s, rd);
2114 } else {
2115 /* store */
2116 gen_movl_T0_reg(s, rd);
2117 gen_ldst(stb, s);
2118 }
2119 break;
2120
2121 case 8:
2122 /* load/store halfword immediate offset */
2123 rd = insn & 7;
2124 rn = (insn >> 3) & 7;
2125 gen_movl_T1_reg(s, rn);
2126 val = (insn >> 5) & 0x3e;
2127 gen_op_movl_T2_im(val);
2128 gen_op_addl_T1_T2();
2129
2130 if (insn & (1 << 11)) {
2131 /* load */
2132 gen_ldst(lduw, s);
2133 gen_movl_reg_T0(s, rd);
2134 } else {
2135 /* store */
2136 gen_movl_T0_reg(s, rd);
2137 gen_ldst(stw, s);
2138 }
2139 break;
2140
2141 case 9:
2142 /* load/store from stack */
2143 rd = (insn >> 8) & 7;
2144 gen_movl_T1_reg(s, 13);
2145 val = (insn & 0xff) * 4;
2146 gen_op_movl_T2_im(val);
2147 gen_op_addl_T1_T2();
2148
2149 if (insn & (1 << 11)) {
2150 /* load */
2151 gen_ldst(ldl, s);
2152 gen_movl_reg_T0(s, rd);
2153 } else {
2154 /* store */
2155 gen_movl_T0_reg(s, rd);
2156 gen_ldst(stl, s);
2157 }
2158 break;
2159
2160 case 10:
2161 /* add to high reg */
2162 rd = (insn >> 8) & 7;
2163 if (insn & (1 << 11)) {
2164 /* SP */
2165 gen_movl_T0_reg(s, 13);
2166 } else {
2167 /* PC. bit 1 is ignored. */
2168 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
2169 }
2170 val = (insn & 0xff) * 4;
2171 gen_op_movl_T1_im(val);
2172 gen_op_addl_T0_T1();
2173 gen_movl_reg_T0(s, rd);
2174 break;
2175
2176 case 11:
2177 /* misc */
2178 op = (insn >> 8) & 0xf;
2179 switch (op) {
2180 case 0:
2181 /* adjust stack pointer */
2182 gen_movl_T1_reg(s, 13);
2183 val = (insn & 0x7f) * 4;
2184 if (insn & (1 << 7))
2185 val = -(int32_t)val;
2186 gen_op_movl_T2_im(val);
2187 gen_op_addl_T1_T2();
2188 gen_movl_reg_T1(s, 13);
2189 break;
2190
2191 case 4: case 5: case 0xc: case 0xd:
2192 /* push/pop */
2193 gen_movl_T1_reg(s, 13);
2194 if (insn & (1 << 8))
2195 offset = 4;
2196 else
2197 offset = 0;
2198 for (i = 0; i < 8; i++) {
2199 if (insn & (1 << i))
2200 offset += 4;
2201 }
2202 if ((insn & (1 << 11)) == 0) {
2203 gen_op_movl_T2_im(-offset);
2204 gen_op_addl_T1_T2();
2205 }
2206 gen_op_movl_T2_im(4);
2207 for (i = 0; i < 8; i++) {
2208 if (insn & (1 << i)) {
2209 if (insn & (1 << 11)) {
2210 /* pop */
2211 gen_ldst(ldl, s);
2212 gen_movl_reg_T0(s, i);
2213 } else {
2214 /* push */
2215 gen_movl_T0_reg(s, i);
2216 gen_ldst(stl, s);
2217 }
2218 /* advance to the next address. */
2219 gen_op_addl_T1_T2();
2220 }
2221 }
2222 if (insn & (1 << 8)) {
2223 if (insn & (1 << 11)) {
2224 /* pop pc */
2225 gen_ldst(ldl, s);
2226 /* don't set the pc until the rest of the instruction
2227 has completed */
2228 } else {
2229 /* push lr */
2230 gen_movl_T0_reg(s, 14);
2231 gen_ldst(stl, s);
2232 }
2233 gen_op_addl_T1_T2();
2234 }
2235 if ((insn & (1 << 11)) == 0) {
2236 gen_op_movl_T2_im(-offset);
2237 gen_op_addl_T1_T2();
2238 }
2239 /* write back the new stack pointer */
2240 gen_movl_reg_T1(s, 13);
2241 /* set the new PC value */
2242 if ((insn & 0x0900) == 0x0900)
2243 gen_bx(s);
2244 break;
2245
2246 case 0xe: /* bkpt */
2247 gen_op_movl_T0_im((long)s->pc - 2);
2248 gen_op_movl_reg_TN[0][15]();
2249 gen_op_bkpt();
2250 s->is_jmp = DISAS_JUMP;
2251 break;
2252
2253 default:
2254 goto undef;
2255 }
2256 break;
2257
2258 case 12:
2259 /* load/store multiple */
2260 rn = (insn >> 8) & 0x7;
2261 gen_movl_T1_reg(s, rn);
2262 gen_op_movl_T2_im(4);
2263 for (i = 0; i < 8; i++) {
2264 if (insn & (1 << i)) {
2265 if (insn & (1 << 11)) {
2266 /* load */
2267 gen_ldst(ldl, s);
2268 gen_movl_reg_T0(s, i);
2269 } else {
2270 /* store */
2271 gen_movl_T0_reg(s, i);
2272 gen_ldst(stl, s);
2273 }
2274 /* advance to the next address */
2275 gen_op_addl_T1_T2();
2276 }
2277 }
2278 /* Base register writeback. */
2279 if ((insn & (1 << rn)) == 0)
2280 gen_movl_reg_T1(s, rn);
2281 break;
2282
2283 case 13:
2284 /* conditional branch or swi */
2285 cond = (insn >> 8) & 0xf;
2286 if (cond == 0xe)
2287 goto undef;
2288
2289 if (cond == 0xf) {
2290 /* swi */
2291 gen_op_movl_T0_im((long)s->pc | 1);
2292 /* Don't set r15. */
2293 gen_op_movl_reg_TN[0][15]();
2294 gen_op_swi();
2295 s->is_jmp = DISAS_JUMP;
2296 break;
2297 }
2298 /* generate a conditional jump to next instruction */
2299 s->condlabel = gen_new_label();
2300 gen_test_cc[cond ^ 1](s->condlabel);
2301 s->condjmp = 1;
2302 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2303 //s->is_jmp = DISAS_JUMP_NEXT;
2304 gen_movl_T1_reg(s, 15);
2305
2306 /* jump to the offset */
2307 val = (uint32_t)s->pc + 2;
2308 offset = ((int32_t)insn << 24) >> 24;
2309 val += offset << 1;
2310 gen_jmp(s, val);
2311 break;
2312
2313 case 14:
2314 /* unconditional branch */
2315 if (insn & (1 << 11)) {
2316 /* Second half of blx. */
2317 offset = ((insn & 0x7ff) << 1);
2318 gen_movl_T0_reg(s, 14);
2319 gen_op_movl_T1_im(offset);
2320 gen_op_addl_T0_T1();
2321 gen_op_movl_T1_im(0xfffffffc);
2322 gen_op_andl_T0_T1();
2323
2324 val = (uint32_t)s->pc;
2325 gen_op_movl_T1_im(val | 1);
2326 gen_movl_reg_T1(s, 14);
2327 gen_bx(s);
2328 break;
2329 }
2330 val = (uint32_t)s->pc;
2331 offset = ((int32_t)insn << 21) >> 21;
2332 val += (offset << 1) + 2;
2333 gen_jmp(s, val);
2334 break;
2335
2336 case 15:
2337 /* branch and link [and switch to arm] */
2338 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
2339 /* Instruction spans a page boundary. Implement it as two
2340 16-bit instructions in case the second half causes an
2341 prefetch abort. */
2342 offset = ((int32_t)insn << 21) >> 9;
2343 val = s->pc + 2 + offset;
2344 gen_op_movl_T0_im(val);
2345 gen_movl_reg_T0(s, 14);
2346 break;
2347 }
2348 if (insn & (1 << 11)) {
2349 /* Second half of bl. */
2350 offset = ((insn & 0x7ff) << 1) | 1;
2351 gen_movl_T0_reg(s, 14);
2352 gen_op_movl_T1_im(offset);
2353 gen_op_addl_T0_T1();
2354
2355 val = (uint32_t)s->pc;
2356 gen_op_movl_T1_im(val | 1);
2357 gen_movl_reg_T1(s, 14);
2358 gen_bx(s);
2359 break;
2360 }
2361 offset = ((int32_t)insn << 21) >> 10;
2362 insn = lduw_code(s->pc);
2363 offset |= insn & 0x7ff;
2364
2365 val = (uint32_t)s->pc + 2;
2366 gen_op_movl_T1_im(val | 1);
2367 gen_movl_reg_T1(s, 14);
2368
2369 val += offset << 1;
2370 if (insn & (1 << 12)) {
2371 /* bl */
2372 gen_jmp(s, val);
2373 } else {
2374 /* blx */
2375 val &= ~(uint32_t)2;
2376 gen_op_movl_T0_im(val);
2377 gen_bx(s);
2378 }
2379 }
2380 return;
2381 undef:
2382 gen_op_movl_T0_im((long)s->pc - 2);
2383 gen_op_movl_reg_TN[0][15]();
2384 gen_op_undef_insn();
2385 s->is_jmp = DISAS_JUMP;
2386 }
2387
2388 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2389 basic block 'tb'. If search_pc is TRUE, also generate PC
2390 information for each intermediate instruction. */
2391 static inline int gen_intermediate_code_internal(CPUState *env,
2392 TranslationBlock *tb,
2393 int search_pc)
2394 {
2395 DisasContext dc1, *dc = &dc1;
2396 uint16_t *gen_opc_end;
2397 int j, lj;
2398 target_ulong pc_start;
2399 uint32_t next_page_start;
2400
2401 /* generate intermediate code */
2402 pc_start = tb->pc;
2403
2404 dc->tb = tb;
2405
2406 gen_opc_ptr = gen_opc_buf;
2407 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2408 gen_opparam_ptr = gen_opparam_buf;
2409
2410 dc->is_jmp = DISAS_NEXT;
2411 dc->pc = pc_start;
2412 dc->singlestep_enabled = env->singlestep_enabled;
2413 dc->condjmp = 0;
2414 dc->thumb = env->thumb;
2415 dc->is_mem = 0;
2416 #if !defined(CONFIG_USER_ONLY)
2417 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
2418 #endif
2419 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2420 nb_gen_labels = 0;
2421 lj = -1;
2422 do {
2423 if (env->nb_breakpoints > 0) {
2424 for(j = 0; j < env->nb_breakpoints; j++) {
2425 if (env->breakpoints[j] == dc->pc) {
2426 gen_op_movl_T0_im((long)dc->pc);
2427 gen_op_movl_reg_TN[0][15]();
2428 gen_op_debug();
2429 dc->is_jmp = DISAS_JUMP;
2430 break;
2431 }
2432 }
2433 }
2434 if (search_pc) {
2435 j = gen_opc_ptr - gen_opc_buf;
2436 if (lj < j) {
2437 lj++;
2438 while (lj < j)
2439 gen_opc_instr_start[lj++] = 0;
2440 }
2441 gen_opc_pc[lj] = dc->pc;
2442 gen_opc_instr_start[lj] = 1;
2443 }
2444
2445 if (env->thumb)
2446 disas_thumb_insn(dc);
2447 else
2448 disas_arm_insn(env, dc);
2449
2450 if (dc->condjmp && !dc->is_jmp) {
2451 gen_set_label(dc->condlabel);
2452 dc->condjmp = 0;
2453 }
2454 /* Terminate the TB on memory ops if watchpoints are present. */
2455 /* FIXME: This should be replacd by the deterministic execution
2456 * IRQ raising bits. */
2457 if (dc->is_mem && env->nb_watchpoints)
2458 break;
2459
2460 /* Translation stops when a conditional branch is enoutered.
2461 * Otherwise the subsequent code could get translated several times.
2462 * Also stop translation when a page boundary is reached. This
2463 * ensures prefech aborts occur at the right place. */
2464 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2465 !env->singlestep_enabled &&
2466 dc->pc < next_page_start);
2467 /* At this stage dc->condjmp will only be set when the skipped
2468 * instruction was a conditional branch, and the PC has already been
2469 * written. */
2470 if (__builtin_expect(env->singlestep_enabled, 0)) {
2471 /* Make sure the pc is updated, and raise a debug exception. */
2472 if (dc->condjmp) {
2473 gen_op_debug();
2474 gen_set_label(dc->condlabel);
2475 }
2476 if (dc->condjmp || !dc->is_jmp) {
2477 gen_op_movl_T0_im((long)dc->pc);
2478 gen_op_movl_reg_TN[0][15]();
2479 dc->condjmp = 0;
2480 }
2481 gen_op_debug();
2482 } else {
2483 switch(dc->is_jmp) {
2484 case DISAS_NEXT:
2485 gen_goto_tb(dc, 1, dc->pc);
2486 break;
2487 default:
2488 case DISAS_JUMP:
2489 case DISAS_UPDATE:
2490 /* indicate that the hash table must be used to find the next TB */
2491 gen_op_movl_T0_0();
2492 gen_op_exit_tb();
2493 break;
2494 case DISAS_TB_JUMP:
2495 /* nothing more to generate */
2496 break;
2497 }
2498 if (dc->condjmp) {
2499 gen_set_label(dc->condlabel);
2500 gen_goto_tb(dc, 1, dc->pc);
2501 dc->condjmp = 0;
2502 }
2503 }
2504 *gen_opc_ptr = INDEX_op_end;
2505
2506 #ifdef DEBUG_DISAS
2507 if (loglevel & CPU_LOG_TB_IN_ASM) {
2508 fprintf(logfile, "----------------\n");
2509 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2510 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2511 fprintf(logfile, "\n");
2512 if (loglevel & (CPU_LOG_TB_OP)) {
2513 fprintf(logfile, "OP:\n");
2514 dump_ops(gen_opc_buf, gen_opparam_buf);
2515 fprintf(logfile, "\n");
2516 }
2517 }
2518 #endif
2519 if (search_pc) {
2520 j = gen_opc_ptr - gen_opc_buf;
2521 lj++;
2522 while (lj <= j)
2523 gen_opc_instr_start[lj++] = 0;
2524 tb->size = 0;
2525 } else {
2526 tb->size = dc->pc - pc_start;
2527 }
2528 return 0;
2529 }
2530
2531 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2532 {
2533 return gen_intermediate_code_internal(env, tb, 0);
2534 }
2535
2536 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2537 {
2538 return gen_intermediate_code_internal(env, tb, 1);
2539 }
2540
2541 static const char *cpu_mode_names[16] = {
2542 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2543 "???", "???", "???", "und", "???", "???", "???", "sys"
2544 };
2545 void cpu_dump_state(CPUState *env, FILE *f,
2546 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2547 int flags)
2548 {
2549 int i;
2550 union {
2551 uint32_t i;
2552 float s;
2553 } s0, s1;
2554 CPU_DoubleU d;
2555 /* ??? This assumes float64 and double have the same layout.
2556 Oh well, it's only debug dumps. */
2557 union {
2558 float64 f64;
2559 double d;
2560 } d0;
2561 uint32_t psr;
2562
2563 for(i=0;i<16;i++) {
2564 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2565 if ((i % 4) == 3)
2566 cpu_fprintf(f, "\n");
2567 else
2568 cpu_fprintf(f, " ");
2569 }
2570 psr = cpsr_read(env);
2571 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2572 psr,
2573 psr & (1 << 31) ? 'N' : '-',
2574 psr & (1 << 30) ? 'Z' : '-',
2575 psr & (1 << 29) ? 'C' : '-',
2576 psr & (1 << 28) ? 'V' : '-',
2577 psr & CPSR_T ? 'T' : 'A',
2578 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
2579
2580 for (i = 0; i < 16; i++) {
2581 d.d = env->vfp.regs[i];
2582 s0.i = d.l.lower;
2583 s1.i = d.l.upper;
2584 d0.f64 = d.d;
2585 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
2586 i * 2, (int)s0.i, s0.s,
2587 i * 2 + 1, (int)s1.i, s1.s,
2588 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2589 d0.d);
2590 }
2591 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
2592 }
2593