2 * UniCore32 translation
4 * Copyright (C) 2010-2012 Guan Xuetao
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation, or (at your option) any
9 * later version. See the COPYING file in the top-level directory.
26 /* internal defines */
27 typedef struct DisasContext
{
30 /* Nonzero if this instruction has been conditionally skipped. */
32 /* The label that will be jumped to when the instruction is skipped. */
34 struct TranslationBlock
*tb
;
35 int singlestep_enabled
;
40 /* These instructions trap after executing, so defer them until after the
41 conditional executions state has been updated. */
42 #define DISAS_SYSCALL 5
44 static TCGv_ptr cpu_env
;
45 static TCGv_i32 cpu_R
[32];
47 /* FIXME: These should be removed. */
48 static TCGv cpu_F0s
, cpu_F1s
;
49 static TCGv_i64 cpu_F0d
, cpu_F1d
;
51 #include "gen-icount.h"
53 static const char *regnames
[] = {
54 "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
55 "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
56 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
57 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
59 /* initialize TCG globals. */
60 void uc32_translate_init(void)
64 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
66 for (i
= 0; i
< 32; i
++) {
67 cpu_R
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
68 offsetof(CPUUniCore32State
, regs
[i
]), regnames
[i
]);
77 /* Allocate a temporary variable. */
78 static TCGv_i32
new_tmp(void)
81 return tcg_temp_new_i32();
84 /* Release a temporary variable. */
85 static void dead_tmp(TCGv tmp
)
91 static inline TCGv
load_cpu_offset(int offset
)
94 tcg_gen_ld_i32(tmp
, cpu_env
, offset
);
98 #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
100 static inline void store_cpu_offset(TCGv var
, int offset
)
102 tcg_gen_st_i32(var
, cpu_env
, offset
);
106 #define store_cpu_field(var, name) \
107 store_cpu_offset(var, offsetof(CPUUniCore32State, name))
109 /* Set a variable to the value of a CPU register. */
110 static void load_reg_var(DisasContext
*s
, TCGv var
, int reg
)
114 /* normaly, since we updated PC */
116 tcg_gen_movi_i32(var
, addr
);
118 tcg_gen_mov_i32(var
, cpu_R
[reg
]);
122 /* Create a new temporary and set it to the value of a CPU register. */
123 static inline TCGv
load_reg(DisasContext
*s
, int reg
)
125 TCGv tmp
= new_tmp();
126 load_reg_var(s
, tmp
, reg
);
130 /* Set a CPU register. The source must be a temporary and will be
132 static void store_reg(DisasContext
*s
, int reg
, TCGv var
)
135 tcg_gen_andi_i32(var
, var
, ~3);
136 s
->is_jmp
= DISAS_JUMP
;
138 tcg_gen_mov_i32(cpu_R
[reg
], var
);
142 /* Value extensions. */
143 #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
144 #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
145 #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
146 #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
148 #define UCOP_REG_M (((insn) >> 0) & 0x1f)
149 #define UCOP_REG_N (((insn) >> 19) & 0x1f)
150 #define UCOP_REG_D (((insn) >> 14) & 0x1f)
151 #define UCOP_REG_S (((insn) >> 9) & 0x1f)
152 #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
153 #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
154 #define UCOP_SH_OP (((insn) >> 6) & 0x03)
155 #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
156 #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
157 #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
158 #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
159 #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
160 #define UCOP_COND (((insn) >> 25) & 0x0f)
161 #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
162 #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
163 #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
164 #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
165 #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
167 #define UCOP_SET(i) ((insn) & (1 << (i)))
168 #define UCOP_SET_P UCOP_SET(28)
169 #define UCOP_SET_U UCOP_SET(27)
170 #define UCOP_SET_B UCOP_SET(26)
171 #define UCOP_SET_W UCOP_SET(25)
172 #define UCOP_SET_L UCOP_SET(24)
173 #define UCOP_SET_S UCOP_SET(24)
175 #define ILLEGAL cpu_abort(env, \
176 "Illegal UniCore32 instruction %x at line %d!", \
179 #ifndef CONFIG_USER_ONLY
180 static void disas_cp0_insn(CPUUniCore32State
*env
, DisasContext
*s
,
183 TCGv tmp
, tmp2
, tmp3
;
184 if ((insn
& 0xfe000000) == 0xe0000000) {
187 tcg_gen_movi_i32(tmp2
, UCOP_REG_N
);
188 tcg_gen_movi_i32(tmp3
, UCOP_IMM10
);
191 gen_helper_cp0_get(tmp
, cpu_env
, tmp2
, tmp3
);
192 store_reg(s
, UCOP_REG_D
, tmp
);
194 tmp
= load_reg(s
, UCOP_REG_D
);
195 gen_helper_cp0_set(cpu_env
, tmp
, tmp2
, tmp3
);
205 static void disas_ocd_insn(CPUUniCore32State
*env
, DisasContext
*s
,
210 if ((insn
& 0xff003fff) == 0xe1000400) {
212 * movc rd, pp.nn, #imm9
214 * nn: UCOP_REG_N (must be 0)
217 if (UCOP_REG_N
== 0) {
219 tcg_gen_movi_i32(tmp
, 0);
220 store_reg(s
, UCOP_REG_D
, tmp
);
226 if ((insn
& 0xff003fff) == 0xe0000401) {
228 * movc pp.nn, rn, #imm9
230 * nn: UCOP_REG_N (must be 1)
233 if (UCOP_REG_N
== 1) {
234 tmp
= load_reg(s
, UCOP_REG_D
);
235 gen_helper_cp1_putc(tmp
);
246 static inline void gen_set_asr(TCGv var
, uint32_t mask
)
248 TCGv tmp_mask
= tcg_const_i32(mask
);
249 gen_helper_asr_write(var
, tmp_mask
);
250 tcg_temp_free_i32(tmp_mask
);
252 /* Set NZCV flags from the high 4 bits of var. */
253 #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
255 static void gen_exception(int excp
)
257 TCGv tmp
= new_tmp();
258 tcg_gen_movi_i32(tmp
, excp
);
259 gen_helper_exception(tmp
);
263 /* FIXME: Most targets have native widening multiplication.
264 It would be good to use that instead of a full wide multiply. */
265 /* 32x32->64 multiply. Marks inputs as dead. */
266 static TCGv_i64
gen_mulu_i64_i32(TCGv a
, TCGv b
)
268 TCGv_i64 tmp1
= tcg_temp_new_i64();
269 TCGv_i64 tmp2
= tcg_temp_new_i64();
271 tcg_gen_extu_i32_i64(tmp1
, a
);
273 tcg_gen_extu_i32_i64(tmp2
, b
);
275 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
276 tcg_temp_free_i64(tmp2
);
280 static TCGv_i64
gen_muls_i64_i32(TCGv a
, TCGv b
)
282 TCGv_i64 tmp1
= tcg_temp_new_i64();
283 TCGv_i64 tmp2
= tcg_temp_new_i64();
285 tcg_gen_ext_i32_i64(tmp1
, a
);
287 tcg_gen_ext_i32_i64(tmp2
, b
);
289 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
290 tcg_temp_free_i64(tmp2
);
294 #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
296 /* Set CF to the top bit of var. */
297 static void gen_set_CF_bit31(TCGv var
)
299 TCGv tmp
= new_tmp();
300 tcg_gen_shri_i32(tmp
, var
, 31);
305 /* Set N and Z flags from var. */
306 static inline void gen_logic_CC(TCGv var
)
308 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUUniCore32State
, NF
));
309 tcg_gen_st_i32(var
, cpu_env
, offsetof(CPUUniCore32State
, ZF
));
312 /* dest = T0 + T1 + CF. */
313 static void gen_add_carry(TCGv dest
, TCGv t0
, TCGv t1
)
316 tcg_gen_add_i32(dest
, t0
, t1
);
317 tmp
= load_cpu_field(CF
);
318 tcg_gen_add_i32(dest
, dest
, tmp
);
322 /* dest = T0 - T1 + CF - 1. */
323 static void gen_sub_carry(TCGv dest
, TCGv t0
, TCGv t1
)
326 tcg_gen_sub_i32(dest
, t0
, t1
);
327 tmp
= load_cpu_field(CF
);
328 tcg_gen_add_i32(dest
, dest
, tmp
);
329 tcg_gen_subi_i32(dest
, dest
, 1);
333 static void shifter_out_im(TCGv var
, int shift
)
335 TCGv tmp
= new_tmp();
337 tcg_gen_andi_i32(tmp
, var
, 1);
339 tcg_gen_shri_i32(tmp
, var
, shift
);
341 tcg_gen_andi_i32(tmp
, tmp
, 1);
348 /* Shift by immediate. Includes special handling for shift == 0. */
349 static inline void gen_uc32_shift_im(TCGv var
, int shiftop
, int shift
,
356 shifter_out_im(var
, 32 - shift
);
358 tcg_gen_shli_i32(var
, var
, shift
);
364 tcg_gen_shri_i32(var
, var
, 31);
367 tcg_gen_movi_i32(var
, 0);
370 shifter_out_im(var
, shift
- 1);
372 tcg_gen_shri_i32(var
, var
, shift
);
380 shifter_out_im(var
, shift
- 1);
385 tcg_gen_sari_i32(var
, var
, shift
);
387 case 3: /* ROR/RRX */
390 shifter_out_im(var
, shift
- 1);
392 tcg_gen_rotri_i32(var
, var
, shift
); break;
394 TCGv tmp
= load_cpu_field(CF
);
396 shifter_out_im(var
, 0);
398 tcg_gen_shri_i32(var
, var
, 1);
399 tcg_gen_shli_i32(tmp
, tmp
, 31);
400 tcg_gen_or_i32(var
, var
, tmp
);
406 static inline void gen_uc32_shift_reg(TCGv var
, int shiftop
,
407 TCGv shift
, int flags
)
412 gen_helper_shl_cc(var
, var
, shift
);
415 gen_helper_shr_cc(var
, var
, shift
);
418 gen_helper_sar_cc(var
, var
, shift
);
421 gen_helper_ror_cc(var
, var
, shift
);
427 gen_helper_shl(var
, var
, shift
);
430 gen_helper_shr(var
, var
, shift
);
433 gen_helper_sar(var
, var
, shift
);
436 tcg_gen_andi_i32(shift
, shift
, 0x1f);
437 tcg_gen_rotr_i32(var
, var
, shift
);
444 static void gen_test_cc(int cc
, int label
)
452 tmp
= load_cpu_field(ZF
);
453 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
456 tmp
= load_cpu_field(ZF
);
457 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
460 tmp
= load_cpu_field(CF
);
461 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
464 tmp
= load_cpu_field(CF
);
465 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
468 tmp
= load_cpu_field(NF
);
469 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
472 tmp
= load_cpu_field(NF
);
473 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
476 tmp
= load_cpu_field(VF
);
477 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
480 tmp
= load_cpu_field(VF
);
481 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
483 case 8: /* hi: C && !Z */
484 inv
= gen_new_label();
485 tmp
= load_cpu_field(CF
);
486 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
488 tmp
= load_cpu_field(ZF
);
489 tcg_gen_brcondi_i32(TCG_COND_NE
, tmp
, 0, label
);
492 case 9: /* ls: !C || Z */
493 tmp
= load_cpu_field(CF
);
494 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
496 tmp
= load_cpu_field(ZF
);
497 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
499 case 10: /* ge: N == V -> N ^ V == 0 */
500 tmp
= load_cpu_field(VF
);
501 tmp2
= load_cpu_field(NF
);
502 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
504 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
506 case 11: /* lt: N != V -> N ^ V != 0 */
507 tmp
= load_cpu_field(VF
);
508 tmp2
= load_cpu_field(NF
);
509 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
511 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
513 case 12: /* gt: !Z && N == V */
514 inv
= gen_new_label();
515 tmp
= load_cpu_field(ZF
);
516 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, inv
);
518 tmp
= load_cpu_field(VF
);
519 tmp2
= load_cpu_field(NF
);
520 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
522 tcg_gen_brcondi_i32(TCG_COND_GE
, tmp
, 0, label
);
525 case 13: /* le: Z || N != V */
526 tmp
= load_cpu_field(ZF
);
527 tcg_gen_brcondi_i32(TCG_COND_EQ
, tmp
, 0, label
);
529 tmp
= load_cpu_field(VF
);
530 tmp2
= load_cpu_field(NF
);
531 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
533 tcg_gen_brcondi_i32(TCG_COND_LT
, tmp
, 0, label
);
536 fprintf(stderr
, "Bad condition code 0x%x\n", cc
);
542 static const uint8_t table_logic_cc
[16] = {
543 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
544 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
545 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
546 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
549 /* Set PC state from an immediate address. */
550 static inline void gen_bx_im(DisasContext
*s
, uint32_t addr
)
552 s
->is_jmp
= DISAS_UPDATE
;
553 tcg_gen_movi_i32(cpu_R
[31], addr
& ~3);
556 /* Set PC state from var. var is marked as dead. */
557 static inline void gen_bx(DisasContext
*s
, TCGv var
)
559 s
->is_jmp
= DISAS_UPDATE
;
560 tcg_gen_andi_i32(cpu_R
[31], var
, ~3);
564 static inline void store_reg_bx(DisasContext
*s
, int reg
, TCGv var
)
566 store_reg(s
, reg
, var
);
569 static inline TCGv
gen_ld8s(TCGv addr
, int index
)
571 TCGv tmp
= new_tmp();
572 tcg_gen_qemu_ld8s(tmp
, addr
, index
);
576 static inline TCGv
gen_ld8u(TCGv addr
, int index
)
578 TCGv tmp
= new_tmp();
579 tcg_gen_qemu_ld8u(tmp
, addr
, index
);
583 static inline TCGv
gen_ld16s(TCGv addr
, int index
)
585 TCGv tmp
= new_tmp();
586 tcg_gen_qemu_ld16s(tmp
, addr
, index
);
590 static inline TCGv
gen_ld16u(TCGv addr
, int index
)
592 TCGv tmp
= new_tmp();
593 tcg_gen_qemu_ld16u(tmp
, addr
, index
);
597 static inline TCGv
gen_ld32(TCGv addr
, int index
)
599 TCGv tmp
= new_tmp();
600 tcg_gen_qemu_ld32u(tmp
, addr
, index
);
604 static inline TCGv_i64
gen_ld64(TCGv addr
, int index
)
606 TCGv_i64 tmp
= tcg_temp_new_i64();
607 tcg_gen_qemu_ld64(tmp
, addr
, index
);
611 static inline void gen_st8(TCGv val
, TCGv addr
, int index
)
613 tcg_gen_qemu_st8(val
, addr
, index
);
617 static inline void gen_st16(TCGv val
, TCGv addr
, int index
)
619 tcg_gen_qemu_st16(val
, addr
, index
);
623 static inline void gen_st32(TCGv val
, TCGv addr
, int index
)
625 tcg_gen_qemu_st32(val
, addr
, index
);
629 static inline void gen_st64(TCGv_i64 val
, TCGv addr
, int index
)
631 tcg_gen_qemu_st64(val
, addr
, index
);
632 tcg_temp_free_i64(val
);
635 static inline void gen_set_pc_im(uint32_t val
)
637 tcg_gen_movi_i32(cpu_R
[31], val
);
640 /* Force a TB lookup after an instruction that changes the CPU state. */
641 static inline void gen_lookup_tb(DisasContext
*s
)
643 tcg_gen_movi_i32(cpu_R
[31], s
->pc
& ~1);
644 s
->is_jmp
= DISAS_UPDATE
;
647 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
,
660 tcg_gen_addi_i32(var
, var
, val
);
664 offset
= load_reg(s
, UCOP_REG_M
);
665 gen_uc32_shift_im(offset
, UCOP_SH_OP
, UCOP_SH_IM
, 0);
667 tcg_gen_sub_i32(var
, var
, offset
);
669 tcg_gen_add_i32(var
, var
, offset
);
675 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
683 val
= (insn
& 0x1f) | ((insn
>> 4) & 0x3e0);
688 tcg_gen_addi_i32(var
, var
, val
);
692 offset
= load_reg(s
, UCOP_REG_M
);
694 tcg_gen_sub_i32(var
, var
, offset
);
696 tcg_gen_add_i32(var
, var
, offset
);
702 static inline long ucf64_reg_offset(int reg
)
705 return offsetof(CPUUniCore32State
, ucf64
.regs
[reg
>> 1])
706 + offsetof(CPU_DoubleU
, l
.upper
);
708 return offsetof(CPUUniCore32State
, ucf64
.regs
[reg
>> 1])
709 + offsetof(CPU_DoubleU
, l
.lower
);
713 #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
714 #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
716 /* UniCore-F64 single load/store I_offset */
717 static void do_ucf64_ldst_i(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
723 addr
= load_reg(s
, UCOP_REG_N
);
724 if (!UCOP_SET_P
&& !UCOP_SET_W
) {
729 offset
= UCOP_IMM10
<< 2;
734 tcg_gen_addi_i32(addr
, addr
, offset
);
738 if (UCOP_SET_L
) { /* load */
739 tmp
= gen_ld32(addr
, IS_USER(s
));
740 ucf64_gen_st32(tmp
, UCOP_REG_D
);
742 tmp
= ucf64_gen_ld32(UCOP_REG_D
);
743 gen_st32(tmp
, addr
, IS_USER(s
));
747 offset
= UCOP_IMM10
<< 2;
752 tcg_gen_addi_i32(addr
, addr
, offset
);
756 store_reg(s
, UCOP_REG_N
, addr
);
762 /* UniCore-F64 load/store multiple words */
763 static void do_ucf64_ldst_m(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
770 if (UCOP_REG_D
!= 0) {
773 if (UCOP_REG_N
== 31) {
776 if ((insn
<< 24) == 0) {
780 addr
= load_reg(s
, UCOP_REG_N
);
783 for (i
= 0; i
< 8; i
++) {
790 if (UCOP_SET_P
) { /* pre increment */
791 tcg_gen_addi_i32(addr
, addr
, 4);
792 } /* unnecessary to do anything when post increment */
794 if (UCOP_SET_P
) { /* pre decrement */
795 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
796 } else { /* post decrement */
798 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
803 freg
= ((insn
>> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
805 for (i
= 0, j
= 0; i
< 8; i
++, freg
++) {
810 if (UCOP_SET_L
) { /* load */
811 tmp
= gen_ld32(addr
, IS_USER(s
));
812 ucf64_gen_st32(tmp
, freg
);
814 tmp
= ucf64_gen_ld32(freg
);
815 gen_st32(tmp
, addr
, IS_USER(s
));
819 /* unnecessary to add after the last transfer */
821 tcg_gen_addi_i32(addr
, addr
, 4);
825 if (UCOP_SET_W
) { /* write back */
827 if (!UCOP_SET_P
) { /* post increment */
828 tcg_gen_addi_i32(addr
, addr
, 4);
829 } /* unnecessary to do anything when pre increment */
834 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
838 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
841 store_reg(s
, UCOP_REG_N
, addr
);
847 /* UniCore-F64 mrc/mcr */
848 static void do_ucf64_trans(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
852 if ((insn
& 0xfe0003ff) == 0xe2000000) {
853 /* control register */
854 if ((UCOP_REG_N
!= UC32_UCF64_FPSCR
) || (UCOP_REG_D
== 31)) {
860 gen_helper_ucf64_get_fpscr(tmp
, cpu_env
);
861 store_reg(s
, UCOP_REG_D
, tmp
);
864 tmp
= load_reg(s
, UCOP_REG_D
);
865 gen_helper_ucf64_set_fpscr(cpu_env
, tmp
);
871 if ((insn
& 0xfe0003ff) == 0xe0000000) {
872 /* general register */
873 if (UCOP_REG_D
== 31) {
876 if (UCOP_SET(24)) { /* MFF */
877 tmp
= ucf64_gen_ld32(UCOP_REG_N
);
878 store_reg(s
, UCOP_REG_D
, tmp
);
880 tmp
= load_reg(s
, UCOP_REG_D
);
881 ucf64_gen_st32(tmp
, UCOP_REG_N
);
885 if ((insn
& 0xfb000000) == 0xe9000000) {
887 if (UCOP_REG_D
!= 31) {
890 if (UCOP_UCF64_COND
& 0x8) {
895 tcg_gen_movi_i32(tmp
, UCOP_UCF64_COND
);
897 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
898 tcg_gen_ld_i64(cpu_F1d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
899 gen_helper_ucf64_cmpd(cpu_F0d
, cpu_F1d
, tmp
, cpu_env
);
901 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
902 tcg_gen_ld_i32(cpu_F1s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
903 gen_helper_ucf64_cmps(cpu_F0s
, cpu_F1s
, tmp
, cpu_env
);
911 /* UniCore-F64 convert instructions */
912 static void do_ucf64_fcvt(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
914 if (UCOP_UCF64_FMT
== 3) {
917 if (UCOP_REG_N
!= 0) {
920 switch (UCOP_UCF64_FUNC
) {
922 switch (UCOP_UCF64_FMT
) {
924 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
925 gen_helper_ucf64_df2sf(cpu_F0s
, cpu_F0d
, cpu_env
);
926 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
929 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
930 gen_helper_ucf64_si2sf(cpu_F0s
, cpu_F0s
, cpu_env
);
931 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
939 switch (UCOP_UCF64_FMT
) {
941 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
942 gen_helper_ucf64_sf2df(cpu_F0d
, cpu_F0s
, cpu_env
);
943 tcg_gen_st_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
946 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
947 gen_helper_ucf64_si2df(cpu_F0d
, cpu_F0s
, cpu_env
);
948 tcg_gen_st_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
956 switch (UCOP_UCF64_FMT
) {
958 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
959 gen_helper_ucf64_sf2si(cpu_F0s
, cpu_F0s
, cpu_env
);
960 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
963 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
964 gen_helper_ucf64_df2si(cpu_F0s
, cpu_F0d
, cpu_env
);
965 tcg_gen_st_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_D
));
977 /* UniCore-F64 compare instructions */
978 static void do_ucf64_fcmp(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
983 if (UCOP_REG_D
!= 0) {
989 tcg_gen_ld_i64(cpu_F0d
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
990 tcg_gen_ld_i64(cpu_F1d
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
991 /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
993 tcg_gen_ld_i32(cpu_F0s
, cpu_env
, ucf64_reg_offset(UCOP_REG_N
));
994 tcg_gen_ld_i32(cpu_F1s
, cpu_env
, ucf64_reg_offset(UCOP_REG_M
));
995 /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
999 #define gen_helper_ucf64_movs(x, y) do { } while (0)
1000 #define gen_helper_ucf64_movd(x, y) do { } while (0)
1002 #define UCF64_OP1(name) do { \
1003 if (UCOP_REG_N != 0) { \
1006 switch (UCOP_UCF64_FMT) { \
1008 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1009 ucf64_reg_offset(UCOP_REG_M)); \
1010 gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
1011 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1012 ucf64_reg_offset(UCOP_REG_D)); \
1015 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1016 ucf64_reg_offset(UCOP_REG_M)); \
1017 gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
1018 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1019 ucf64_reg_offset(UCOP_REG_D)); \
1027 #define UCF64_OP2(name) do { \
1028 switch (UCOP_UCF64_FMT) { \
1030 tcg_gen_ld_i32(cpu_F0s, cpu_env, \
1031 ucf64_reg_offset(UCOP_REG_N)); \
1032 tcg_gen_ld_i32(cpu_F1s, cpu_env, \
1033 ucf64_reg_offset(UCOP_REG_M)); \
1034 gen_helper_ucf64_##name##s(cpu_F0s, \
1035 cpu_F0s, cpu_F1s, cpu_env); \
1036 tcg_gen_st_i32(cpu_F0s, cpu_env, \
1037 ucf64_reg_offset(UCOP_REG_D)); \
1040 tcg_gen_ld_i64(cpu_F0d, cpu_env, \
1041 ucf64_reg_offset(UCOP_REG_N)); \
1042 tcg_gen_ld_i64(cpu_F1d, cpu_env, \
1043 ucf64_reg_offset(UCOP_REG_M)); \
1044 gen_helper_ucf64_##name##d(cpu_F0d, \
1045 cpu_F0d, cpu_F1d, cpu_env); \
1046 tcg_gen_st_i64(cpu_F0d, cpu_env, \
1047 ucf64_reg_offset(UCOP_REG_D)); \
1055 /* UniCore-F64 data processing */
1056 static void do_ucf64_datap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1058 if (UCOP_UCF64_FMT
== 3) {
1061 switch (UCOP_UCF64_FUNC
) {
1088 /* Disassemble an F64 instruction */
1089 static void disas_ucf64_insn(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1091 if (!UCOP_SET(29)) {
1093 do_ucf64_ldst_m(env
, s
, insn
);
1095 do_ucf64_ldst_i(env
, s
, insn
);
1099 switch ((insn
>> 26) & 0x3) {
1101 do_ucf64_datap(env
, s
, insn
);
1107 do_ucf64_fcvt(env
, s
, insn
);
1110 do_ucf64_fcmp(env
, s
, insn
);
1114 do_ucf64_trans(env
, s
, insn
);
1119 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
1121 TranslationBlock
*tb
;
1124 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
1126 gen_set_pc_im(dest
);
1127 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
1129 gen_set_pc_im(dest
);
1134 static inline void gen_jmp(DisasContext
*s
, uint32_t dest
)
1136 if (unlikely(s
->singlestep_enabled
)) {
1137 /* An indirect jump so that we still trigger the debug exception. */
1140 gen_goto_tb(s
, 0, dest
);
1141 s
->is_jmp
= DISAS_TB_JUMP
;
1145 static inline void gen_mulxy(TCGv t0
, TCGv t1
, int x
, int y
)
1148 tcg_gen_sari_i32(t0
, t0
, 16);
1153 tcg_gen_sari_i32(t1
, t1
, 16);
1157 tcg_gen_mul_i32(t0
, t0
, t1
);
1160 /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
1161 static int gen_set_psr(DisasContext
*s
, uint32_t mask
, int bsr
, TCGv t0
)
1165 /* ??? This is also undefined in system mode. */
1170 tmp
= load_cpu_field(bsr
);
1171 tcg_gen_andi_i32(tmp
, tmp
, ~mask
);
1172 tcg_gen_andi_i32(t0
, t0
, mask
);
1173 tcg_gen_or_i32(tmp
, tmp
, t0
);
1174 store_cpu_field(tmp
, bsr
);
1176 gen_set_asr(t0
, mask
);
1183 /* Generate an old-style exception return. Marks pc as dead. */
1184 static void gen_exception_return(DisasContext
*s
, TCGv pc
)
1187 store_reg(s
, 31, pc
);
1188 tmp
= load_cpu_field(bsr
);
1189 gen_set_asr(tmp
, 0xffffffff);
1191 s
->is_jmp
= DISAS_UPDATE
;
1194 static void disas_coproc_insn(CPUUniCore32State
*env
, DisasContext
*s
,
1197 switch (UCOP_CPNUM
) {
1198 #ifndef CONFIG_USER_ONLY
1200 disas_cp0_insn(env
, s
, insn
);
1203 disas_ocd_insn(env
, s
, insn
);
1207 disas_ucf64_insn(env
, s
, insn
);
1210 /* Unknown coprocessor. */
1211 cpu_abort(env
, "Unknown coprocessor!");
1216 /* Store a 64-bit value to a register pair. Clobbers val. */
1217 static void gen_storeq_reg(DisasContext
*s
, int rlow
, int rhigh
, TCGv_i64 val
)
1221 tcg_gen_trunc_i64_i32(tmp
, val
);
1222 store_reg(s
, rlow
, tmp
);
1224 tcg_gen_shri_i64(val
, val
, 32);
1225 tcg_gen_trunc_i64_i32(tmp
, val
);
1226 store_reg(s
, rhigh
, tmp
);
1229 /* load and add a 64-bit value from a register pair. */
1230 static void gen_addq(DisasContext
*s
, TCGv_i64 val
, int rlow
, int rhigh
)
1236 /* Load 64-bit value rd:rn. */
1237 tmpl
= load_reg(s
, rlow
);
1238 tmph
= load_reg(s
, rhigh
);
1239 tmp
= tcg_temp_new_i64();
1240 tcg_gen_concat_i32_i64(tmp
, tmpl
, tmph
);
1243 tcg_gen_add_i64(val
, val
, tmp
);
1244 tcg_temp_free_i64(tmp
);
1247 /* data processing instructions */
1248 static void do_datap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1254 if (UCOP_OPCODES
== 0x0f || UCOP_OPCODES
== 0x0d) {
1255 if (UCOP_SET(23)) { /* CMOV instructions */
1256 if ((UCOP_CMOV_COND
== 0xe) || (UCOP_CMOV_COND
== 0xf)) {
1259 /* if not always execute, we generate a conditional jump to
1261 s
->condlabel
= gen_new_label();
1262 gen_test_cc(UCOP_CMOV_COND
^ 1, s
->condlabel
);
1267 logic_cc
= table_logic_cc
[UCOP_OPCODES
] & (UCOP_SET_S
>> 24);
1271 /* immediate operand */
1274 val
= (val
>> UCOP_SH_IM
) | (val
<< (32 - UCOP_SH_IM
));
1277 tcg_gen_movi_i32(tmp2
, val
);
1278 if (logic_cc
&& UCOP_SH_IM
) {
1279 gen_set_CF_bit31(tmp2
);
1283 tmp2
= load_reg(s
, UCOP_REG_M
);
1285 tmp
= load_reg(s
, UCOP_REG_S
);
1286 gen_uc32_shift_reg(tmp2
, UCOP_SH_OP
, tmp
, logic_cc
);
1288 gen_uc32_shift_im(tmp2
, UCOP_SH_OP
, UCOP_SH_IM
, logic_cc
);
1292 if (UCOP_OPCODES
!= 0x0f && UCOP_OPCODES
!= 0x0d) {
1293 tmp
= load_reg(s
, UCOP_REG_N
);
1298 switch (UCOP_OPCODES
) {
1300 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1304 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1307 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
1311 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1314 if (UCOP_SET_S
&& UCOP_REG_D
== 31) {
1315 /* SUBS r31, ... is used for exception return. */
1319 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
1320 gen_exception_return(s
, tmp
);
1323 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
1325 tcg_gen_sub_i32(tmp
, tmp
, tmp2
);
1327 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1332 gen_helper_sub_cc(tmp
, tmp2
, tmp
);
1334 tcg_gen_sub_i32(tmp
, tmp2
, tmp
);
1336 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1340 gen_helper_add_cc(tmp
, tmp
, tmp2
);
1342 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
1344 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1348 gen_helper_adc_cc(tmp
, tmp
, tmp2
);
1350 gen_add_carry(tmp
, tmp
, tmp2
);
1352 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1356 gen_helper_sbc_cc(tmp
, tmp
, tmp2
);
1358 gen_sub_carry(tmp
, tmp
, tmp2
);
1360 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1364 gen_helper_sbc_cc(tmp
, tmp2
, tmp
);
1366 gen_sub_carry(tmp
, tmp2
, tmp
);
1368 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1372 tcg_gen_and_i32(tmp
, tmp
, tmp2
);
1379 tcg_gen_xor_i32(tmp
, tmp
, tmp2
);
1386 gen_helper_sub_cc(tmp
, tmp
, tmp2
);
1392 gen_helper_add_cc(tmp
, tmp
, tmp2
);
1397 tcg_gen_or_i32(tmp
, tmp
, tmp2
);
1401 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1404 if (logic_cc
&& UCOP_REG_D
== 31) {
1405 /* MOVS r31, ... is used for exception return. */
1409 gen_exception_return(s
, tmp2
);
1414 store_reg_bx(s
, UCOP_REG_D
, tmp2
);
1418 tcg_gen_andc_i32(tmp
, tmp
, tmp2
);
1422 store_reg_bx(s
, UCOP_REG_D
, tmp
);
1426 tcg_gen_not_i32(tmp2
, tmp2
);
1430 store_reg_bx(s
, UCOP_REG_D
, tmp2
);
1433 if (UCOP_OPCODES
!= 0x0f && UCOP_OPCODES
!= 0x0d) {
1439 static void do_mult(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1447 tmp
= load_reg(s
, UCOP_REG_M
);
1448 tmp2
= load_reg(s
, UCOP_REG_N
);
1450 tmp64
= gen_muls_i64_i32(tmp
, tmp2
);
1452 tmp64
= gen_mulu_i64_i32(tmp
, tmp2
);
1454 if (UCOP_SET(25)) { /* mult accumulate */
1455 gen_addq(s
, tmp64
, UCOP_REG_LO
, UCOP_REG_HI
);
1457 gen_storeq_reg(s
, UCOP_REG_LO
, UCOP_REG_HI
, tmp64
);
1458 tcg_temp_free_i64(tmp64
);
1461 tmp
= load_reg(s
, UCOP_REG_M
);
1462 tmp2
= load_reg(s
, UCOP_REG_N
);
1463 tcg_gen_mul_i32(tmp
, tmp
, tmp2
);
1467 tmp2
= load_reg(s
, UCOP_REG_S
);
1468 tcg_gen_add_i32(tmp
, tmp
, tmp2
);
1474 store_reg(s
, UCOP_REG_D
, tmp
);
1478 /* miscellaneous instructions */
1479 static void do_misc(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1484 if ((insn
& 0xffffffe0) == 0x10ffc120) {
1485 /* Trivial implementation equivalent to bx. */
1486 tmp
= load_reg(s
, UCOP_REG_M
);
1491 if ((insn
& 0xfbffc000) == 0x30ffc000) {
1492 /* PSR = immediate */
1495 val
= (val
>> UCOP_SH_IM
) | (val
<< (32 - UCOP_SH_IM
));
1498 tcg_gen_movi_i32(tmp
, val
);
1499 if (gen_set_psr(s
, ~ASR_RESERVED
, UCOP_SET_B
, tmp
)) {
1505 if ((insn
& 0xfbffffe0) == 0x12ffc020) {
1506 /* PSR.flag = reg */
1507 tmp
= load_reg(s
, UCOP_REG_M
);
1508 if (gen_set_psr(s
, ASR_NZCV
, UCOP_SET_B
, tmp
)) {
1514 if ((insn
& 0xfbffffe0) == 0x10ffc020) {
1516 tmp
= load_reg(s
, UCOP_REG_M
);
1517 if (gen_set_psr(s
, ~ASR_RESERVED
, UCOP_SET_B
, tmp
)) {
1523 if ((insn
& 0xfbf83fff) == 0x10f80000) {
1529 tmp
= load_cpu_field(bsr
);
1532 gen_helper_asr_read(tmp
);
1534 store_reg(s
, UCOP_REG_D
, tmp
);
1538 if ((insn
& 0xfbf83fe0) == 0x12f80120) {
1540 tmp
= load_reg(s
, UCOP_REG_M
);
1542 gen_helper_clo(tmp
, tmp
);
1544 gen_helper_clz(tmp
, tmp
);
1546 store_reg(s
, UCOP_REG_D
, tmp
);
1554 /* load/store I_offset and R_offset */
1555 static void do_ldst_ir(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1561 tmp2
= load_reg(s
, UCOP_REG_N
);
1562 i
= (IS_USER(s
) || (!UCOP_SET_P
&& UCOP_SET_W
));
1566 gen_add_data_offset(s
, insn
, tmp2
);
1572 tmp
= gen_ld8u(tmp2
, i
);
1574 tmp
= gen_ld32(tmp2
, i
);
1578 tmp
= load_reg(s
, UCOP_REG_D
);
1580 gen_st8(tmp
, tmp2
, i
);
1582 gen_st32(tmp
, tmp2
, i
);
1586 gen_add_data_offset(s
, insn
, tmp2
);
1587 store_reg(s
, UCOP_REG_N
, tmp2
);
1588 } else if (UCOP_SET_W
) {
1589 store_reg(s
, UCOP_REG_N
, tmp2
);
1594 /* Complete the load. */
1595 if (UCOP_REG_D
== 31) {
1598 store_reg(s
, UCOP_REG_D
, tmp
);
1603 /* SWP instruction */
1604 static void do_swap(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1610 if ((insn
& 0xff003fe0) != 0x40000120) {
1614 /* ??? This is not really atomic. However we know
1615 we never have multiple CPUs running in parallel,
1616 so it is good enough. */
1617 addr
= load_reg(s
, UCOP_REG_N
);
1618 tmp
= load_reg(s
, UCOP_REG_M
);
1620 tmp2
= gen_ld8u(addr
, IS_USER(s
));
1621 gen_st8(tmp
, addr
, IS_USER(s
));
1623 tmp2
= gen_ld32(addr
, IS_USER(s
));
1624 gen_st32(tmp
, addr
, IS_USER(s
));
1627 store_reg(s
, UCOP_REG_D
, tmp2
);
1630 /* load/store hw/sb */
1631 static void do_ldst_hwsb(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1636 if (UCOP_SH_OP
== 0) {
1637 do_swap(env
, s
, insn
);
1641 addr
= load_reg(s
, UCOP_REG_N
);
1643 gen_add_datah_offset(s
, insn
, addr
);
1646 if (UCOP_SET_L
) { /* load */
1647 switch (UCOP_SH_OP
) {
1649 tmp
= gen_ld16u(addr
, IS_USER(s
));
1652 tmp
= gen_ld8s(addr
, IS_USER(s
));
1654 default: /* see do_swap */
1656 tmp
= gen_ld16s(addr
, IS_USER(s
));
1659 } else { /* store */
1660 if (UCOP_SH_OP
!= 1) {
1663 tmp
= load_reg(s
, UCOP_REG_D
);
1664 gen_st16(tmp
, addr
, IS_USER(s
));
1666 /* Perform base writeback before the loaded value to
1667 ensure correct behavior with overlapping index registers. */
1669 gen_add_datah_offset(s
, insn
, addr
);
1670 store_reg(s
, UCOP_REG_N
, addr
);
1671 } else if (UCOP_SET_W
) {
1672 store_reg(s
, UCOP_REG_N
, addr
);
1677 /* Complete the load. */
1678 store_reg(s
, UCOP_REG_D
, tmp
);
1682 /* load/store multiple words */
1683 static void do_ldst_m(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1685 unsigned int val
, i
;
1686 int j
, n
, reg
, user
, loaded_base
;
1695 /* XXX: store correct base if write back */
1697 if (UCOP_SET_B
) { /* S bit in instruction table */
1699 ILLEGAL
; /* only usable in supervisor mode */
1701 if (UCOP_SET(18) == 0) { /* pc reg */
1706 addr
= load_reg(s
, UCOP_REG_N
);
1708 /* compute total size */
1710 TCGV_UNUSED(loaded_var
);
1712 for (i
= 0; i
< 6; i
++) {
1717 for (i
= 9; i
< 19; i
++) {
1722 /* XXX: test invalid n == 0 case ? */
1726 tcg_gen_addi_i32(addr
, addr
, 4);
1728 /* post increment */
1733 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
1735 /* post decrement */
1737 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
1743 reg
= UCOP_SET(6) ? 16 : 0;
1744 for (i
= 0; i
< 19; i
++, reg
++) {
1749 if (UCOP_SET_L
) { /* load */
1750 tmp
= gen_ld32(addr
, IS_USER(s
));
1754 tmp2
= tcg_const_i32(reg
);
1755 gen_helper_set_user_reg(tmp2
, tmp
);
1756 tcg_temp_free_i32(tmp2
);
1758 } else if (reg
== UCOP_REG_N
) {
1762 store_reg(s
, reg
, tmp
);
1764 } else { /* store */
1766 /* special case: r31 = PC + 4 */
1769 tcg_gen_movi_i32(tmp
, val
);
1772 tmp2
= tcg_const_i32(reg
);
1773 gen_helper_get_user_reg(tmp
, tmp2
);
1774 tcg_temp_free_i32(tmp2
);
1776 tmp
= load_reg(s
, reg
);
1778 gen_st32(tmp
, addr
, IS_USER(s
));
1781 /* no need to add after the last transfer */
1783 tcg_gen_addi_i32(addr
, addr
, 4);
1787 if (UCOP_SET_W
) { /* write back */
1792 /* post increment */
1793 tcg_gen_addi_i32(addr
, addr
, 4);
1799 tcg_gen_addi_i32(addr
, addr
, -((n
- 1) * 4));
1802 /* post decrement */
1803 tcg_gen_addi_i32(addr
, addr
, -(n
* 4));
1806 store_reg(s
, UCOP_REG_N
, addr
);
1811 store_reg(s
, UCOP_REG_N
, loaded_var
);
1813 if (UCOP_SET_B
&& !user
) {
1814 /* Restore ASR from BSR. */
1815 tmp
= load_cpu_field(bsr
);
1816 gen_set_asr(tmp
, 0xffffffff);
1818 s
->is_jmp
= DISAS_UPDATE
;
1822 /* branch (and link) */
1823 static void do_branch(CPUUniCore32State
*env
, DisasContext
*s
, uint32_t insn
)
1829 if (UCOP_COND
== 0xf) {
1833 if (UCOP_COND
!= 0xe) {
1834 /* if not always execute, we generate a conditional jump to
1836 s
->condlabel
= gen_new_label();
1837 gen_test_cc(UCOP_COND
^ 1, s
->condlabel
);
1841 val
= (int32_t)s
->pc
;
1844 tcg_gen_movi_i32(tmp
, val
);
1845 store_reg(s
, 30, tmp
);
1847 offset
= (((int32_t)insn
<< 8) >> 8);
1848 val
+= (offset
<< 2); /* unicore is pc+4 */
1852 static void disas_uc32_insn(CPUUniCore32State
*env
, DisasContext
*s
)
1856 insn
= ldl_code(s
->pc
);
1859 /* UniCore instructions class:
1860 * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
1861 * AAA : see switch case
1862 * BBBB : opcodes or cond or PUBW
1867 switch (insn
>> 29) {
1869 if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
1870 do_mult(env
, s
, insn
);
1875 do_misc(env
, s
, insn
);
1879 if (((UCOP_OPCODES
>> 2) == 2) && !UCOP_SET_S
) {
1880 do_misc(env
, s
, insn
);
1883 do_datap(env
, s
, insn
);
1887 if (UCOP_SET(8) && UCOP_SET(5)) {
1888 do_ldst_hwsb(env
, s
, insn
);
1891 if (UCOP_SET(8) || UCOP_SET(5)) {
1895 do_ldst_ir(env
, s
, insn
);
1900 ILLEGAL
; /* extended instructions */
1902 do_ldst_m(env
, s
, insn
);
1905 do_branch(env
, s
, insn
);
1909 disas_coproc_insn(env
, s
, insn
);
1912 if (!UCOP_SET(28)) {
1913 disas_coproc_insn(env
, s
, insn
);
1916 if ((insn
& 0xff000000) == 0xff000000) { /* syscall */
1917 gen_set_pc_im(s
->pc
);
1918 s
->is_jmp
= DISAS_SYSCALL
;
1927 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1928 basic block 'tb'. If search_pc is TRUE, also generate PC
1929 information for each intermediate instruction. */
1930 static inline void gen_intermediate_code_internal(CPUUniCore32State
*env
,
1931 TranslationBlock
*tb
, int search_pc
)
1933 DisasContext dc1
, *dc
= &dc1
;
1935 uint16_t *gen_opc_end
;
1937 target_ulong pc_start
;
1938 uint32_t next_page_start
;
1942 /* generate intermediate code */
1949 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1951 dc
->is_jmp
= DISAS_NEXT
;
1953 dc
->singlestep_enabled
= env
->singlestep_enabled
;
1955 cpu_F0s
= tcg_temp_new_i32();
1956 cpu_F1s
= tcg_temp_new_i32();
1957 cpu_F0d
= tcg_temp_new_i64();
1958 cpu_F1d
= tcg_temp_new_i64();
1959 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1962 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1963 if (max_insns
== 0) {
1964 max_insns
= CF_COUNT_MASK
;
1969 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1970 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1971 if (bp
->pc
== dc
->pc
) {
1972 gen_set_pc_im(dc
->pc
);
1973 gen_exception(EXCP_DEBUG
);
1974 dc
->is_jmp
= DISAS_JUMP
;
1975 /* Advance PC so that clearing the breakpoint will
1976 invalidate this TB. */
1977 dc
->pc
+= 2; /* FIXME */
1978 goto done_generating
;
1984 j
= gen_opc_ptr
- gen_opc_buf
;
1988 gen_opc_instr_start
[lj
++] = 0;
1991 gen_opc_pc
[lj
] = dc
->pc
;
1992 gen_opc_instr_start
[lj
] = 1;
1993 gen_opc_icount
[lj
] = num_insns
;
1996 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2000 disas_uc32_insn(env
, dc
);
2003 fprintf(stderr
, "Internal resource leak before %08x\n", dc
->pc
);
2007 if (dc
->condjmp
&& !dc
->is_jmp
) {
2008 gen_set_label(dc
->condlabel
);
2011 /* Translation stops when a conditional branch is encountered.
2012 * Otherwise the subsequent code could get translated several times.
2013 * Also stop translation when a page boundary is reached. This
2014 * ensures prefetch aborts occur at the right place. */
2016 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
2017 !env
->singlestep_enabled
&&
2019 dc
->pc
< next_page_start
&&
2020 num_insns
< max_insns
);
2022 if (tb
->cflags
& CF_LAST_IO
) {
2024 /* FIXME: This can theoretically happen with self-modifying
2026 cpu_abort(env
, "IO on conditional branch instruction");
2031 /* At this stage dc->condjmp will only be set when the skipped
2032 instruction was a conditional branch or trap, and the PC has
2033 already been written. */
2034 if (unlikely(env
->singlestep_enabled
)) {
2035 /* Make sure the pc is updated, and raise a debug exception. */
2037 if (dc
->is_jmp
== DISAS_SYSCALL
) {
2038 gen_exception(UC32_EXCP_PRIV
);
2040 gen_exception(EXCP_DEBUG
);
2042 gen_set_label(dc
->condlabel
);
2044 if (dc
->condjmp
|| !dc
->is_jmp
) {
2045 gen_set_pc_im(dc
->pc
);
2048 if (dc
->is_jmp
== DISAS_SYSCALL
&& !dc
->condjmp
) {
2049 gen_exception(UC32_EXCP_PRIV
);
2051 gen_exception(EXCP_DEBUG
);
2054 /* While branches must always occur at the end of an IT block,
2055 there are a few other things that can cause us to terminate
2056 the TB in the middel of an IT block:
2057 - Exception generating instructions (bkpt, swi, undefined).
2059 - Hardware watchpoints.
2060 Hardware breakpoints have already been handled and skip this code.
2062 switch (dc
->is_jmp
) {
2064 gen_goto_tb(dc
, 1, dc
->pc
);
2069 /* indicate that the hash table must be used to find the next TB */
2073 /* nothing more to generate */
2076 gen_exception(UC32_EXCP_PRIV
);
2080 gen_set_label(dc
->condlabel
);
2081 gen_goto_tb(dc
, 1, dc
->pc
);
2087 gen_icount_end(tb
, num_insns
);
2088 *gen_opc_ptr
= INDEX_op_end
;
2091 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2092 qemu_log("----------------\n");
2093 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2094 log_target_disas(pc_start
, dc
->pc
- pc_start
, 0);
2099 j
= gen_opc_ptr
- gen_opc_buf
;
2102 gen_opc_instr_start
[lj
++] = 0;
2105 tb
->size
= dc
->pc
- pc_start
;
2106 tb
->icount
= num_insns
;
2110 void gen_intermediate_code(CPUUniCore32State
*env
, TranslationBlock
*tb
)
2112 gen_intermediate_code_internal(env
, tb
, 0);
2115 void gen_intermediate_code_pc(CPUUniCore32State
*env
, TranslationBlock
*tb
)
2117 gen_intermediate_code_internal(env
, tb
, 1);
2120 static const char *cpu_mode_names
[16] = {
2121 "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
2122 "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
2125 #define UCF64_DUMP_STATE
2126 void cpu_dump_state(CPUUniCore32State
*env
, FILE *f
, fprintf_function cpu_fprintf
,
2130 #ifdef UCF64_DUMP_STATE
2136 /* ??? This assumes float64 and double have the same layout.
2137 Oh well, it's only debug dumps. */
2145 for (i
= 0; i
< 32; i
++) {
2146 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2148 cpu_fprintf(f
, "\n");
2150 cpu_fprintf(f
, " ");
2153 psr
= cpu_asr_read(env
);
2154 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %s\n",
2156 psr
& (1 << 31) ? 'N' : '-',
2157 psr
& (1 << 30) ? 'Z' : '-',
2158 psr
& (1 << 29) ? 'C' : '-',
2159 psr
& (1 << 28) ? 'V' : '-',
2160 cpu_mode_names
[psr
& 0xf]);
2162 #ifdef UCF64_DUMP_STATE
2163 for (i
= 0; i
< 16; i
++) {
2164 d
.d
= env
->ucf64
.regs
[i
];
2168 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%" PRIx64
"(%8g)\n",
2169 i
* 2, (int)s0
.i
, s0
.s
,
2170 i
* 2 + 1, (int)s1
.i
, s1
.s
,
2171 i
, (uint64_t)d0
.f64
, d0
.d
);
2173 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->ucf64
.xregs
[UC32_UCF64_FPSCR
]);
2177 void restore_state_to_opc(CPUUniCore32State
*env
, TranslationBlock
*tb
, int pc_pos
)
2179 env
->regs
[31] = gen_opc_pc
[pc_pos
];