]>
git.proxmox.com Git - mirror_qemu.git/blob - target/microblaze/translate.c
2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/helper-proto.h"
27 #include "microblaze-decode.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
31 #include "qemu/qemu-print.h"
33 #include "trace-tcg.h"
36 #define EXTRACT_FIELD(src, start, end) \
37 (((src) >> start) & ((1 << (end - start + 1)) - 1))
39 /* is_jmp field values */
40 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
41 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
43 static TCGv_i32 cpu_R
[32];
44 static TCGv_i32 cpu_pc
;
45 static TCGv_i32 cpu_msr
;
46 static TCGv_i32 cpu_msr_c
;
47 static TCGv_i32 cpu_imm
;
48 static TCGv_i32 cpu_btaken
;
49 static TCGv_i32 cpu_btarget
;
50 static TCGv_i32 cpu_iflags
;
51 static TCGv cpu_res_addr
;
52 static TCGv_i32 cpu_res_val
;
54 #include "exec/gen-icount.h"
56 /* This is the state at translation time. */
57 typedef struct DisasContext
{
58 DisasContextBase base
;
72 unsigned int cpustate_changed
;
73 unsigned int delayed_branch
;
74 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
75 unsigned int clear_imm
;
79 #define JMP_DIRECT_CC 2
80 #define JMP_INDIRECT 3
84 int abort_at_next_insn
;
87 static int typeb_imm(DisasContext
*dc
, int x
)
89 if (dc
->tb_flags
& IMM_FLAG
) {
90 return deposit32(dc
->ext_imm
, 0, 16, x
);
95 /* Include the auto-generated decoder. */
96 #include "decode-insns.c.inc"
98 static inline void t_sync_flags(DisasContext
*dc
)
100 /* Synch the tb dependent flags between translator and runtime. */
101 if (dc
->tb_flags
!= dc
->synced_flags
) {
102 tcg_gen_movi_i32(cpu_iflags
, dc
->tb_flags
);
103 dc
->synced_flags
= dc
->tb_flags
;
107 static void gen_raise_exception(DisasContext
*dc
, uint32_t index
)
109 TCGv_i32 tmp
= tcg_const_i32(index
);
111 gen_helper_raise_exception(cpu_env
, tmp
);
112 tcg_temp_free_i32(tmp
);
113 dc
->base
.is_jmp
= DISAS_NORETURN
;
116 static void gen_raise_exception_sync(DisasContext
*dc
, uint32_t index
)
119 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
120 gen_raise_exception(dc
, index
);
123 static void gen_raise_hw_excp(DisasContext
*dc
, uint32_t esr_ec
)
125 TCGv_i32 tmp
= tcg_const_i32(esr_ec
);
126 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUMBState
, esr
));
127 tcg_temp_free_i32(tmp
);
129 gen_raise_exception_sync(dc
, EXCP_HW_EXCP
);
132 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
134 #ifndef CONFIG_USER_ONLY
135 return (dc
->base
.pc_first
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
141 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
143 if (dc
->base
.singlestep_enabled
) {
144 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
145 tcg_gen_movi_i32(cpu_pc
, dest
);
146 gen_helper_raise_exception(cpu_env
, tmp
);
147 tcg_temp_free_i32(tmp
);
148 } else if (use_goto_tb(dc
, dest
)) {
150 tcg_gen_movi_i32(cpu_pc
, dest
);
151 tcg_gen_exit_tb(dc
->base
.tb
, n
);
153 tcg_gen_movi_i32(cpu_pc
, dest
);
154 tcg_gen_exit_tb(NULL
, 0);
156 dc
->base
.is_jmp
= DISAS_NORETURN
;
160 * Returns true if the insn an illegal operation.
161 * If exceptions are enabled, an exception is raised.
163 static bool trap_illegal(DisasContext
*dc
, bool cond
)
165 if (cond
&& (dc
->tb_flags
& MSR_EE_FLAG
)
166 && dc
->cpu
->cfg
.illegal_opcode_exception
) {
167 gen_raise_hw_excp(dc
, ESR_EC_ILLEGAL_OP
);
173 * Returns true if the insn is illegal in userspace.
174 * If exceptions are enabled, an exception is raised.
176 static bool trap_userspace(DisasContext
*dc
, bool cond
)
178 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
179 bool cond_user
= cond
&& mem_index
== MMU_USER_IDX
;
181 if (cond_user
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
182 gen_raise_hw_excp(dc
, ESR_EC_PRIVINSN
);
187 static int32_t dec_alu_typeb_imm(DisasContext
*dc
)
189 tcg_debug_assert(dc
->type_b
);
190 return typeb_imm(dc
, (int16_t)dc
->imm
);
193 static inline TCGv_i32
*dec_alu_op_b(DisasContext
*dc
)
196 tcg_gen_movi_i32(cpu_imm
, dec_alu_typeb_imm(dc
));
199 return &cpu_R
[dc
->rb
];
202 static TCGv_i32
reg_for_read(DisasContext
*dc
, int reg
)
204 if (likely(reg
!= 0)) {
208 if (dc
->r0
== NULL
) {
209 dc
->r0
= tcg_temp_new_i32();
211 tcg_gen_movi_i32(dc
->r0
, 0);
217 static TCGv_i32
reg_for_write(DisasContext
*dc
, int reg
)
219 if (likely(reg
!= 0)) {
222 if (dc
->r0
== NULL
) {
223 dc
->r0
= tcg_temp_new_i32();
228 static bool do_typea(DisasContext
*dc
, arg_typea
*arg
, bool side_effects
,
229 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
233 if (arg
->rd
== 0 && !side_effects
) {
237 rd
= reg_for_write(dc
, arg
->rd
);
238 ra
= reg_for_read(dc
, arg
->ra
);
239 rb
= reg_for_read(dc
, arg
->rb
);
244 static bool do_typeb_imm(DisasContext
*dc
, arg_typeb
*arg
, bool side_effects
,
245 void (*fni
)(TCGv_i32
, TCGv_i32
, int32_t))
249 if (arg
->rd
== 0 && !side_effects
) {
253 rd
= reg_for_write(dc
, arg
->rd
);
254 ra
= reg_for_read(dc
, arg
->ra
);
255 fni(rd
, ra
, arg
->imm
);
259 static bool do_typeb_val(DisasContext
*dc
, arg_typeb
*arg
, bool side_effects
,
260 void (*fn
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
262 TCGv_i32 rd
, ra
, imm
;
264 if (arg
->rd
== 0 && !side_effects
) {
268 rd
= reg_for_write(dc
, arg
->rd
);
269 ra
= reg_for_read(dc
, arg
->ra
);
270 imm
= tcg_const_i32(arg
->imm
);
274 tcg_temp_free_i32(imm
);
278 #define DO_TYPEA(NAME, SE, FN) \
279 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
280 { return do_typea(dc, a, SE, FN); }
282 #define DO_TYPEA_CFG(NAME, CFG, SE, FN) \
283 static bool trans_##NAME(DisasContext *dc, arg_typea *a) \
284 { return dc->cpu->cfg.CFG && do_typea(dc, a, SE, FN); }
286 #define DO_TYPEBI(NAME, SE, FNI) \
287 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
288 { return do_typeb_imm(dc, a, SE, FNI); }
290 #define DO_TYPEBI_CFG(NAME, CFG, SE, FNI) \
291 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
292 { return dc->cpu->cfg.CFG && do_typeb_imm(dc, a, SE, FNI); }
294 #define DO_TYPEBV(NAME, SE, FN) \
295 static bool trans_##NAME(DisasContext *dc, arg_typeb *a) \
296 { return do_typeb_val(dc, a, SE, FN); }
298 /* No input carry, but output carry. */
299 static void gen_add(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
301 TCGv_i32 zero
= tcg_const_i32(0);
303 tcg_gen_add2_i32(out
, cpu_msr_c
, ina
, zero
, inb
, zero
);
305 tcg_temp_free_i32(zero
);
308 /* Input and output carry. */
309 static void gen_addc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
311 TCGv_i32 zero
= tcg_const_i32(0);
312 TCGv_i32 tmp
= tcg_temp_new_i32();
314 tcg_gen_add2_i32(tmp
, cpu_msr_c
, ina
, zero
, cpu_msr_c
, zero
);
315 tcg_gen_add2_i32(out
, cpu_msr_c
, tmp
, cpu_msr_c
, inb
, zero
);
317 tcg_temp_free_i32(tmp
);
318 tcg_temp_free_i32(zero
);
321 /* Input carry, but no output carry. */
322 static void gen_addkc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
324 tcg_gen_add_i32(out
, ina
, inb
);
325 tcg_gen_add_i32(out
, out
, cpu_msr_c
);
328 DO_TYPEA(add
, true, gen_add
)
329 DO_TYPEA(addc
, true, gen_addc
)
330 DO_TYPEA(addk
, false, tcg_gen_add_i32
)
331 DO_TYPEA(addkc
, true, gen_addkc
)
333 DO_TYPEBV(addi
, true, gen_add
)
334 DO_TYPEBV(addic
, true, gen_addc
)
335 DO_TYPEBI(addik
, false, tcg_gen_addi_i32
)
336 DO_TYPEBV(addikc
, true, gen_addkc
)
338 static void gen_andni(TCGv_i32 out
, TCGv_i32 ina
, int32_t imm
)
340 tcg_gen_andi_i32(out
, ina
, ~imm
);
343 DO_TYPEA(and, false, tcg_gen_and_i32
)
344 DO_TYPEBI(andi
, false, tcg_gen_andi_i32
)
345 DO_TYPEA(andn
, false, tcg_gen_andc_i32
)
346 DO_TYPEBI(andni
, false, gen_andni
)
348 static void gen_cmp(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
350 TCGv_i32 lt
= tcg_temp_new_i32();
352 tcg_gen_setcond_i32(TCG_COND_LT
, lt
, inb
, ina
);
353 tcg_gen_sub_i32(out
, inb
, ina
);
354 tcg_gen_deposit_i32(out
, out
, lt
, 31, 1);
355 tcg_temp_free_i32(lt
);
358 static void gen_cmpu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
360 TCGv_i32 lt
= tcg_temp_new_i32();
362 tcg_gen_setcond_i32(TCG_COND_LTU
, lt
, inb
, ina
);
363 tcg_gen_sub_i32(out
, inb
, ina
);
364 tcg_gen_deposit_i32(out
, out
, lt
, 31, 1);
365 tcg_temp_free_i32(lt
);
368 DO_TYPEA(cmp
, false, gen_cmp
)
369 DO_TYPEA(cmpu
, false, gen_cmpu
)
371 static void gen_mulh(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
373 TCGv_i32 tmp
= tcg_temp_new_i32();
374 tcg_gen_muls2_i32(tmp
, out
, ina
, inb
);
375 tcg_temp_free_i32(tmp
);
378 static void gen_mulhu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
380 TCGv_i32 tmp
= tcg_temp_new_i32();
381 tcg_gen_mulu2_i32(tmp
, out
, ina
, inb
);
382 tcg_temp_free_i32(tmp
);
385 static void gen_mulhsu(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
387 TCGv_i32 tmp
= tcg_temp_new_i32();
388 tcg_gen_mulsu2_i32(tmp
, out
, ina
, inb
);
389 tcg_temp_free_i32(tmp
);
392 DO_TYPEA_CFG(mul
, use_hw_mul
, false, tcg_gen_mul_i32
)
393 DO_TYPEA_CFG(mulh
, use_hw_mul
>= 2, false, gen_mulh
)
394 DO_TYPEA_CFG(mulhu
, use_hw_mul
>= 2, false, gen_mulhu
)
395 DO_TYPEA_CFG(mulhsu
, use_hw_mul
>= 2, false, gen_mulhsu
)
396 DO_TYPEBI_CFG(muli
, use_hw_mul
, false, tcg_gen_muli_i32
)
398 DO_TYPEA(or, false, tcg_gen_or_i32
)
399 DO_TYPEBI(ori
, false, tcg_gen_ori_i32
)
401 static void gen_pcmpeq(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
403 tcg_gen_setcond_i32(TCG_COND_EQ
, out
, ina
, inb
);
406 static void gen_pcmpne(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
408 tcg_gen_setcond_i32(TCG_COND_NE
, out
, ina
, inb
);
411 DO_TYPEA_CFG(pcmpbf
, use_pcmp_instr
, false, gen_helper_pcmpbf
)
412 DO_TYPEA_CFG(pcmpeq
, use_pcmp_instr
, false, gen_pcmpeq
)
413 DO_TYPEA_CFG(pcmpne
, use_pcmp_instr
, false, gen_pcmpne
)
415 /* No input carry, but output carry. */
416 static void gen_rsub(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
418 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_msr_c
, inb
, ina
);
419 tcg_gen_sub_i32(out
, inb
, ina
);
422 /* Input and output carry. */
423 static void gen_rsubc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
425 TCGv_i32 zero
= tcg_const_i32(0);
426 TCGv_i32 tmp
= tcg_temp_new_i32();
428 tcg_gen_not_i32(tmp
, ina
);
429 tcg_gen_add2_i32(tmp
, cpu_msr_c
, tmp
, zero
, cpu_msr_c
, zero
);
430 tcg_gen_add2_i32(out
, cpu_msr_c
, tmp
, cpu_msr_c
, inb
, zero
);
432 tcg_temp_free_i32(zero
);
433 tcg_temp_free_i32(tmp
);
436 /* No input or output carry. */
437 static void gen_rsubk(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
439 tcg_gen_sub_i32(out
, inb
, ina
);
442 /* Input carry, no output carry. */
443 static void gen_rsubkc(TCGv_i32 out
, TCGv_i32 ina
, TCGv_i32 inb
)
445 TCGv_i32 nota
= tcg_temp_new_i32();
447 tcg_gen_not_i32(nota
, ina
);
448 tcg_gen_add_i32(out
, inb
, nota
);
449 tcg_gen_add_i32(out
, out
, cpu_msr_c
);
451 tcg_temp_free_i32(nota
);
454 DO_TYPEA(rsub
, true, gen_rsub
)
455 DO_TYPEA(rsubc
, true, gen_rsubc
)
456 DO_TYPEA(rsubk
, false, gen_rsubk
)
457 DO_TYPEA(rsubkc
, true, gen_rsubkc
)
459 DO_TYPEBV(rsubi
, true, gen_rsub
)
460 DO_TYPEBV(rsubic
, true, gen_rsubc
)
461 DO_TYPEBV(rsubik
, false, gen_rsubk
)
462 DO_TYPEBV(rsubikc
, true, gen_rsubkc
)
464 DO_TYPEA(xor, false, tcg_gen_xor_i32
)
465 DO_TYPEBI(xori
, false, tcg_gen_xori_i32
)
467 static bool trans_zero(DisasContext
*dc
, arg_zero
*arg
)
469 /* If opcode_0_illegal, trap. */
470 if (dc
->cpu
->cfg
.opcode_0_illegal
) {
471 trap_illegal(dc
, true);
475 * Otherwise, this is "add r0, r0, r0".
476 * Continue to trans_add so that MSR[C] gets cleared.
481 static void msr_read(DisasContext
*dc
, TCGv_i32 d
)
485 /* Replicate the cpu_msr_c boolean into the proper bit and the copy. */
486 t
= tcg_temp_new_i32();
487 tcg_gen_muli_i32(t
, cpu_msr_c
, MSR_C
| MSR_CC
);
488 tcg_gen_or_i32(d
, cpu_msr
, t
);
489 tcg_temp_free_i32(t
);
492 static void msr_write(DisasContext
*dc
, TCGv_i32 v
)
494 dc
->cpustate_changed
= 1;
497 tcg_gen_extract_i32(cpu_msr_c
, v
, 2, 1);
499 /* Clear MSR_C and MSR_CC; MSR_PVR is not writable, and is always clear. */
500 tcg_gen_andi_i32(cpu_msr
, v
, ~(MSR_C
| MSR_CC
| MSR_PVR
));
503 static void dec_msr(DisasContext
*dc
)
505 CPUState
*cs
= CPU(dc
->cpu
);
508 bool to
, clrset
, extended
= false;
510 sr
= extract32(dc
->imm
, 0, 14);
511 to
= extract32(dc
->imm
, 14, 1);
512 clrset
= extract32(dc
->imm
, 15, 1) == 0;
515 dc
->cpustate_changed
= 1;
518 /* Extended MSRs are only available if addr_size > 32. */
519 if (dc
->cpu
->cfg
.addr_size
> 32) {
520 /* The E-bit is encoded differently for To/From MSR. */
521 static const unsigned int e_bit
[] = { 19, 24 };
523 extended
= extract32(dc
->imm
, e_bit
[to
], 1);
526 /* msrclr and msrset. */
528 bool clr
= extract32(dc
->ir
, 16, 1);
530 if (!dc
->cpu
->cfg
.use_msr_instr
) {
535 if (trap_userspace(dc
, dc
->imm
!= 4 && dc
->imm
!= 0)) {
540 msr_read(dc
, cpu_R
[dc
->rd
]);
542 t0
= tcg_temp_new_i32();
543 t1
= tcg_temp_new_i32();
545 tcg_gen_mov_i32(t1
, *(dec_alu_op_b(dc
)));
548 tcg_gen_not_i32(t1
, t1
);
549 tcg_gen_and_i32(t0
, t0
, t1
);
551 tcg_gen_or_i32(t0
, t0
, t1
);
553 tcg_temp_free_i32(t0
);
554 tcg_temp_free_i32(t1
);
555 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
+ 4);
556 dc
->base
.is_jmp
= DISAS_UPDATE
;
560 if (trap_userspace(dc
, to
)) {
564 #if !defined(CONFIG_USER_ONLY)
565 /* Catch read/writes to the mmu block. */
566 if ((sr
& ~0xff) == 0x1000) {
567 TCGv_i32 tmp_ext
= tcg_const_i32(extended
);
571 tmp_sr
= tcg_const_i32(sr
);
573 gen_helper_mmu_write(cpu_env
, tmp_ext
, tmp_sr
, cpu_R
[dc
->ra
]);
575 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tmp_ext
, tmp_sr
);
577 tcg_temp_free_i32(tmp_sr
);
578 tcg_temp_free_i32(tmp_ext
);
588 msr_write(dc
, cpu_R
[dc
->ra
]);
592 TCGv_i64 t64
= tcg_temp_new_i64();
593 tcg_gen_extu_i32_i64(t64
, cpu_R
[dc
->ra
]);
594 tcg_gen_st_i64(t64
, cpu_env
, offsetof(CPUMBState
, ear
));
595 tcg_temp_free_i64(t64
);
599 tcg_gen_st_i32(cpu_R
[dc
->ra
],
600 cpu_env
, offsetof(CPUMBState
, esr
));
603 tcg_gen_st_i32(cpu_R
[dc
->ra
],
604 cpu_env
, offsetof(CPUMBState
, fsr
));
607 tcg_gen_st_i32(cpu_R
[dc
->ra
],
608 cpu_env
, offsetof(CPUMBState
, btr
));
611 tcg_gen_st_i32(cpu_R
[dc
->ra
],
612 cpu_env
, offsetof(CPUMBState
, edr
));
615 tcg_gen_st_i32(cpu_R
[dc
->ra
],
616 cpu_env
, offsetof(CPUMBState
, slr
));
619 tcg_gen_st_i32(cpu_R
[dc
->ra
],
620 cpu_env
, offsetof(CPUMBState
, shr
));
623 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
629 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->base
.pc_next
);
632 msr_read(dc
, cpu_R
[dc
->rd
]);
636 TCGv_i64 t64
= tcg_temp_new_i64();
637 tcg_gen_ld_i64(t64
, cpu_env
, offsetof(CPUMBState
, ear
));
639 tcg_gen_extrh_i64_i32(cpu_R
[dc
->rd
], t64
);
641 tcg_gen_extrl_i64_i32(cpu_R
[dc
->rd
], t64
);
643 tcg_temp_free_i64(t64
);
647 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
648 cpu_env
, offsetof(CPUMBState
, esr
));
651 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
652 cpu_env
, offsetof(CPUMBState
, fsr
));
655 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
656 cpu_env
, offsetof(CPUMBState
, btr
));
659 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
660 cpu_env
, offsetof(CPUMBState
, edr
));
663 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
664 cpu_env
, offsetof(CPUMBState
, slr
));
667 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
668 cpu_env
, offsetof(CPUMBState
, shr
));
670 case 0x2000 ... 0x200c:
672 tcg_gen_ld_i32(cpu_R
[dc
->rd
],
673 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
676 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
682 tcg_gen_movi_i32(cpu_R
[0], 0);
687 static void dec_div(DisasContext
*dc
)
693 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_div
)) {
698 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
701 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
704 tcg_gen_movi_i32(cpu_R
[dc
->rd
], 0);
707 static void dec_barrel(DisasContext
*dc
)
710 unsigned int imm_w
, imm_s
;
711 bool s
, t
, e
= false, i
= false;
713 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_barrel
)) {
718 /* Insert and extract are only available in immediate mode. */
719 i
= extract32(dc
->imm
, 15, 1);
720 e
= extract32(dc
->imm
, 14, 1);
722 s
= extract32(dc
->imm
, 10, 1);
723 t
= extract32(dc
->imm
, 9, 1);
724 imm_w
= extract32(dc
->imm
, 6, 5);
725 imm_s
= extract32(dc
->imm
, 0, 5);
728 if (imm_w
+ imm_s
> 32 || imm_w
== 0) {
729 /* These inputs have an undefined behavior. */
730 qemu_log_mask(LOG_GUEST_ERROR
, "bsefi: Bad input w=%d s=%d\n",
733 tcg_gen_extract_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], imm_s
, imm_w
);
736 int width
= imm_w
- imm_s
+ 1;
739 /* These inputs have an undefined behavior. */
740 qemu_log_mask(LOG_GUEST_ERROR
, "bsifi: Bad input w=%d s=%d\n",
743 tcg_gen_deposit_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
],
747 t0
= tcg_temp_new_i32();
749 tcg_gen_mov_i32(t0
, *(dec_alu_op_b(dc
)));
750 tcg_gen_andi_i32(t0
, t0
, 31);
753 tcg_gen_shl_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
756 tcg_gen_sar_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
758 tcg_gen_shr_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
761 tcg_temp_free_i32(t0
);
765 static void dec_bit(DisasContext
*dc
)
767 CPUState
*cs
= CPU(dc
->cpu
);
771 op
= dc
->ir
& ((1 << 9) - 1);
775 t0
= tcg_temp_new_i32();
777 tcg_gen_shli_i32(t0
, cpu_msr_c
, 31);
778 tcg_gen_andi_i32(cpu_msr_c
, cpu_R
[dc
->ra
], 1);
780 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
781 tcg_gen_or_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
783 tcg_temp_free_i32(t0
);
789 tcg_gen_andi_i32(cpu_msr_c
, cpu_R
[dc
->ra
], 1);
792 tcg_gen_shri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
794 tcg_gen_sari_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
798 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
801 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
808 trap_userspace(dc
, true);
812 trap_userspace(dc
, true);
815 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_pcmp_instr
)) {
818 if (dc
->cpu
->cfg
.use_pcmp_instr
) {
819 tcg_gen_clzi_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 32);
824 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
828 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
831 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
832 (uint32_t)dc
->base
.pc_next
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
837 static inline void sync_jmpstate(DisasContext
*dc
)
839 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
840 if (dc
->jmp
== JMP_DIRECT
) {
841 tcg_gen_movi_i32(cpu_btaken
, 1);
843 dc
->jmp
= JMP_INDIRECT
;
844 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_pc
);
848 static void dec_imm(DisasContext
*dc
)
850 dc
->ext_imm
= dc
->imm
<< 16;
851 tcg_gen_movi_i32(cpu_imm
, dc
->ext_imm
);
852 dc
->tb_flags
|= IMM_FLAG
;
856 static inline void compute_ldst_addr(DisasContext
*dc
, bool ea
, TCGv t
)
858 /* Should be set to true if r1 is used by loadstores. */
859 bool stackprot
= false;
862 /* All load/stores use ra. */
863 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
867 /* Treat the common cases first. */
870 int addr_size
= dc
->cpu
->cfg
.addr_size
;
872 if (addr_size
== 32) {
873 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
877 tcg_gen_concat_i32_i64(t
, cpu_R
[dc
->rb
], cpu_R
[dc
->ra
]);
878 if (addr_size
< 64) {
879 /* Mask off out of range bits. */
880 tcg_gen_andi_i64(t
, t
, MAKE_64BIT_MASK(0, addr_size
));
885 /* If any of the regs is r0, set t to the value of the other reg. */
887 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->rb
]);
889 } else if (dc
->rb
== 0) {
890 tcg_gen_extu_i32_tl(t
, cpu_R
[dc
->ra
]);
894 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
898 t32
= tcg_temp_new_i32();
899 tcg_gen_add_i32(t32
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
900 tcg_gen_extu_i32_tl(t
, t32
);
901 tcg_temp_free_i32(t32
);
904 gen_helper_stackprot(cpu_env
, t
);
909 t32
= tcg_temp_new_i32();
910 tcg_gen_addi_i32(t32
, cpu_R
[dc
->ra
], dec_alu_typeb_imm(dc
));
911 tcg_gen_extu_i32_tl(t
, t32
);
912 tcg_temp_free_i32(t32
);
915 gen_helper_stackprot(cpu_env
, t
);
920 static void dec_load(DisasContext
*dc
)
925 bool rev
= false, ex
= false, ea
= false;
926 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
929 mop
= dc
->opcode
& 3;
932 ea
= extract32(dc
->ir
, 7, 1);
933 rev
= extract32(dc
->ir
, 9, 1);
934 ex
= extract32(dc
->ir
, 10, 1);
941 if (trap_illegal(dc
, size
> 4)) {
945 if (trap_userspace(dc
, ea
)) {
950 addr
= tcg_temp_new();
951 compute_ldst_addr(dc
, ea
, addr
);
952 /* Extended addressing bypasses the MMU. */
953 mem_index
= ea
? MMU_NOMMU_IDX
: mem_index
;
956 * When doing reverse accesses we need to do two things.
958 * 1. Reverse the address wrt endianness.
959 * 2. Byteswap the data lanes on the way back into the CPU core.
961 if (rev
&& size
!= 4) {
962 /* Endian reverse the address. t is addr. */
966 tcg_gen_xori_tl(addr
, addr
, 3);
973 tcg_gen_xori_tl(addr
, addr
, 2);
976 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
981 /* lwx does not throw unaligned access errors, so force alignment */
983 tcg_gen_andi_tl(addr
, addr
, ~3);
986 /* If we get a fault on a dslot, the jmpstate better be in sync. */
989 /* Verify alignment if needed. */
991 * Microblaze gives MMU faults priority over faults due to
992 * unaligned addresses. That's why we speculatively do the load
993 * into v. If the load succeeds, we verify alignment of the
994 * address and if that succeeds we write into the destination reg.
996 v
= tcg_temp_new_i32();
997 tcg_gen_qemu_ld_i32(v
, addr
, mem_index
, mop
);
999 if (dc
->cpu
->cfg
.unaligned_exceptions
&& size
> 1) {
1000 TCGv_i32 t0
= tcg_const_i32(0);
1001 TCGv_i32 treg
= tcg_const_i32(dc
->rd
);
1002 TCGv_i32 tsize
= tcg_const_i32(size
- 1);
1004 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
1005 gen_helper_memalign(cpu_env
, addr
, treg
, t0
, tsize
);
1007 tcg_temp_free_i32(t0
);
1008 tcg_temp_free_i32(treg
);
1009 tcg_temp_free_i32(tsize
);
1013 tcg_gen_mov_tl(cpu_res_addr
, addr
);
1014 tcg_gen_mov_i32(cpu_res_val
, v
);
1017 tcg_gen_mov_i32(cpu_R
[dc
->rd
], v
);
1019 tcg_temp_free_i32(v
);
1022 /* no support for AXI exclusive so always clear C */
1023 tcg_gen_movi_i32(cpu_msr_c
, 0);
1026 tcg_temp_free(addr
);
1029 static void dec_store(DisasContext
*dc
)
1032 TCGLabel
*swx_skip
= NULL
;
1034 bool rev
= false, ex
= false, ea
= false;
1035 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1038 mop
= dc
->opcode
& 3;
1041 ea
= extract32(dc
->ir
, 7, 1);
1042 rev
= extract32(dc
->ir
, 9, 1);
1043 ex
= extract32(dc
->ir
, 10, 1);
1050 if (trap_illegal(dc
, size
> 4)) {
1054 trap_userspace(dc
, ea
);
1057 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1059 /* SWX needs a temp_local. */
1060 addr
= ex
? tcg_temp_local_new() : tcg_temp_new();
1061 compute_ldst_addr(dc
, ea
, addr
);
1062 /* Extended addressing bypasses the MMU. */
1063 mem_index
= ea
? MMU_NOMMU_IDX
: mem_index
;
1068 /* swx does not throw unaligned access errors, so force alignment */
1069 tcg_gen_andi_tl(addr
, addr
, ~3);
1071 tcg_gen_movi_i32(cpu_msr_c
, 1);
1072 swx_skip
= gen_new_label();
1073 tcg_gen_brcond_tl(TCG_COND_NE
, cpu_res_addr
, addr
, swx_skip
);
1076 * Compare the value loaded at lwx with current contents of
1077 * the reserved location.
1079 tval
= tcg_temp_new_i32();
1081 tcg_gen_atomic_cmpxchg_i32(tval
, addr
, cpu_res_val
,
1082 cpu_R
[dc
->rd
], mem_index
,
1085 tcg_gen_brcond_i32(TCG_COND_NE
, cpu_res_val
, tval
, swx_skip
);
1086 tcg_gen_movi_i32(cpu_msr_c
, 0);
1087 tcg_temp_free_i32(tval
);
1090 if (rev
&& size
!= 4) {
1091 /* Endian reverse the address. t is addr. */
1095 tcg_gen_xori_tl(addr
, addr
, 3);
1102 /* Force addr into the temp. */
1103 tcg_gen_xori_tl(addr
, addr
, 2);
1106 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1112 tcg_gen_qemu_st_i32(cpu_R
[dc
->rd
], addr
, mem_index
, mop
);
1115 /* Verify alignment if needed. */
1116 if (dc
->cpu
->cfg
.unaligned_exceptions
&& size
> 1) {
1117 TCGv_i32 t1
= tcg_const_i32(1);
1118 TCGv_i32 treg
= tcg_const_i32(dc
->rd
);
1119 TCGv_i32 tsize
= tcg_const_i32(size
- 1);
1121 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
1122 /* FIXME: if the alignment is wrong, we should restore the value
1123 * in memory. One possible way to achieve this is to probe
1124 * the MMU prior to the memaccess, thay way we could put
1125 * the alignment checks in between the probe and the mem
1128 gen_helper_memalign(cpu_env
, addr
, treg
, t1
, tsize
);
1130 tcg_temp_free_i32(t1
);
1131 tcg_temp_free_i32(treg
);
1132 tcg_temp_free_i32(tsize
);
1136 gen_set_label(swx_skip
);
1139 tcg_temp_free(addr
);
1142 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1143 TCGv_i32 d
, TCGv_i32 a
)
1145 static const int mb_to_tcg_cc
[] = {
1146 [CC_EQ
] = TCG_COND_EQ
,
1147 [CC_NE
] = TCG_COND_NE
,
1148 [CC_LT
] = TCG_COND_LT
,
1149 [CC_LE
] = TCG_COND_LE
,
1150 [CC_GE
] = TCG_COND_GE
,
1151 [CC_GT
] = TCG_COND_GT
,
1161 tcg_gen_setcondi_i32(mb_to_tcg_cc
[cc
], d
, a
, 0);
1164 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1169 static void eval_cond_jmp(DisasContext
*dc
, TCGv_i32 pc_true
, TCGv_i32 pc_false
)
1171 TCGv_i32 zero
= tcg_const_i32(0);
1173 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_pc
,
1177 tcg_temp_free_i32(zero
);
1180 static void dec_setup_dslot(DisasContext
*dc
)
1182 TCGv_i32 tmp
= tcg_const_i32(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
));
1184 dc
->delayed_branch
= 2;
1185 dc
->tb_flags
|= D_FLAG
;
1187 tcg_gen_st_i32(tmp
, cpu_env
, offsetof(CPUMBState
, bimm
));
1188 tcg_temp_free_i32(tmp
);
1191 static void dec_bcc(DisasContext
*dc
)
1196 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1197 dslot
= dc
->ir
& (1 << 25);
1199 dc
->delayed_branch
= 1;
1201 dec_setup_dslot(dc
);
1205 dc
->jmp
= JMP_DIRECT_CC
;
1206 dc
->jmp_pc
= dc
->base
.pc_next
+ dec_alu_typeb_imm(dc
);
1207 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_pc
);
1209 dc
->jmp
= JMP_INDIRECT
;
1210 tcg_gen_addi_i32(cpu_btarget
, cpu_R
[dc
->rb
], dc
->base
.pc_next
);
1212 eval_cc(dc
, cc
, cpu_btaken
, cpu_R
[dc
->ra
]);
1215 static void dec_br(DisasContext
*dc
)
1217 unsigned int dslot
, link
, abs
, mbar
;
1219 dslot
= dc
->ir
& (1 << 20);
1220 abs
= dc
->ir
& (1 << 19);
1221 link
= dc
->ir
& (1 << 18);
1223 /* Memory barrier. */
1224 mbar
= (dc
->ir
>> 16) & 31;
1225 if (mbar
== 2 && dc
->imm
== 4) {
1226 uint16_t mbar_imm
= dc
->rd
;
1228 /* Data access memory barrier. */
1229 if ((mbar_imm
& 2) == 0) {
1230 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
1233 /* mbar IMM & 16 decodes to sleep. */
1234 if (mbar_imm
& 16) {
1237 if (trap_userspace(dc
, true)) {
1238 /* Sleep is a privileged instruction. */
1244 tmp_1
= tcg_const_i32(1);
1245 tcg_gen_st_i32(tmp_1
, cpu_env
,
1246 -offsetof(MicroBlazeCPU
, env
)
1247 +offsetof(CPUState
, halted
));
1248 tcg_temp_free_i32(tmp_1
);
1250 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
+ 4);
1252 gen_raise_exception(dc
, EXCP_HLT
);
1256 dc
->cpustate_changed
= 1;
1260 if (abs
&& link
&& !dslot
) {
1263 uint32_t imm
= dec_alu_typeb_imm(dc
);
1264 if (trap_userspace(dc
, imm
!= 8 && imm
!= 0x18)) {
1269 if (trap_userspace(dc
, true)) {
1275 dc
->delayed_branch
= 1;
1277 dec_setup_dslot(dc
);
1279 if (link
&& dc
->rd
) {
1280 tcg_gen_movi_i32(cpu_R
[dc
->rd
], dc
->base
.pc_next
);
1285 uint32_t dest
= dec_alu_typeb_imm(dc
);
1287 dc
->jmp
= JMP_DIRECT
;
1289 tcg_gen_movi_i32(cpu_btarget
, dest
);
1290 if (link
&& !dslot
) {
1294 gen_raise_exception_sync(dc
, EXCP_BREAK
);
1297 gen_raise_exception_sync(dc
, EXCP_DEBUG
);
1302 dc
->jmp
= JMP_INDIRECT
;
1303 tcg_gen_mov_i32(cpu_btarget
, cpu_R
[dc
->rb
]);
1304 if (link
&& !dslot
) {
1305 gen_raise_exception_sync(dc
, EXCP_BREAK
);
1308 } else if (dc
->type_b
) {
1309 dc
->jmp
= JMP_DIRECT
;
1310 dc
->jmp_pc
= dc
->base
.pc_next
+ dec_alu_typeb_imm(dc
);
1311 tcg_gen_movi_i32(cpu_btarget
, dc
->jmp_pc
);
1313 dc
->jmp
= JMP_INDIRECT
;
1314 tcg_gen_addi_i32(cpu_btarget
, cpu_R
[dc
->rb
], dc
->base
.pc_next
);
1316 tcg_gen_movi_i32(cpu_btaken
, 1);
1319 static inline void do_rti(DisasContext
*dc
)
1322 t0
= tcg_temp_new_i32();
1323 t1
= tcg_temp_new_i32();
1324 tcg_gen_mov_i32(t1
, cpu_msr
);
1325 tcg_gen_shri_i32(t0
, t1
, 1);
1326 tcg_gen_ori_i32(t1
, t1
, MSR_IE
);
1327 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1329 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1330 tcg_gen_or_i32(t1
, t1
, t0
);
1332 tcg_temp_free_i32(t1
);
1333 tcg_temp_free_i32(t0
);
1334 dc
->tb_flags
&= ~DRTI_FLAG
;
1337 static inline void do_rtb(DisasContext
*dc
)
1340 t0
= tcg_temp_new_i32();
1341 t1
= tcg_temp_new_i32();
1342 tcg_gen_mov_i32(t1
, cpu_msr
);
1343 tcg_gen_andi_i32(t1
, t1
, ~MSR_BIP
);
1344 tcg_gen_shri_i32(t0
, t1
, 1);
1345 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1347 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1348 tcg_gen_or_i32(t1
, t1
, t0
);
1350 tcg_temp_free_i32(t1
);
1351 tcg_temp_free_i32(t0
);
1352 dc
->tb_flags
&= ~DRTB_FLAG
;
1355 static inline void do_rte(DisasContext
*dc
)
1358 t0
= tcg_temp_new_i32();
1359 t1
= tcg_temp_new_i32();
1361 tcg_gen_mov_i32(t1
, cpu_msr
);
1362 tcg_gen_ori_i32(t1
, t1
, MSR_EE
);
1363 tcg_gen_andi_i32(t1
, t1
, ~MSR_EIP
);
1364 tcg_gen_shri_i32(t0
, t1
, 1);
1365 tcg_gen_andi_i32(t0
, t0
, (MSR_VM
| MSR_UM
));
1367 tcg_gen_andi_i32(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1368 tcg_gen_or_i32(t1
, t1
, t0
);
1370 tcg_temp_free_i32(t1
);
1371 tcg_temp_free_i32(t0
);
1372 dc
->tb_flags
&= ~DRTE_FLAG
;
1375 static void dec_rts(DisasContext
*dc
)
1377 unsigned int b_bit
, i_bit
, e_bit
;
1379 i_bit
= dc
->ir
& (1 << 21);
1380 b_bit
= dc
->ir
& (1 << 22);
1381 e_bit
= dc
->ir
& (1 << 23);
1383 if (trap_userspace(dc
, i_bit
|| b_bit
|| e_bit
)) {
1387 dec_setup_dslot(dc
);
1390 dc
->tb_flags
|= DRTI_FLAG
;
1392 dc
->tb_flags
|= DRTB_FLAG
;
1394 dc
->tb_flags
|= DRTE_FLAG
;
1397 dc
->jmp
= JMP_INDIRECT
;
1398 tcg_gen_movi_i32(cpu_btaken
, 1);
1399 tcg_gen_add_i32(cpu_btarget
, cpu_R
[dc
->ra
], *dec_alu_op_b(dc
));
1402 static int dec_check_fpuv2(DisasContext
*dc
)
1404 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1405 gen_raise_hw_excp(dc
, ESR_EC_FPU
);
1407 return (dc
->cpu
->cfg
.use_fpu
== 2) ? PVR2_USE_FPU2_MASK
: 0;
1410 static void dec_fpu(DisasContext
*dc
)
1412 unsigned int fpu_insn
;
1414 if (trap_illegal(dc
, !dc
->cpu
->cfg
.use_fpu
)) {
1418 fpu_insn
= (dc
->ir
>> 7) & 7;
1422 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1427 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1432 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1437 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1442 switch ((dc
->ir
>> 4) & 7) {
1444 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1445 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1448 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1449 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1452 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1453 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1456 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1457 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1460 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1461 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1464 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1465 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1468 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1469 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1472 qemu_log_mask(LOG_UNIMP
,
1473 "unimplemented fcmp fpu_insn=%x pc=%x"
1475 fpu_insn
, (uint32_t)dc
->base
.pc_next
,
1477 dc
->abort_at_next_insn
= 1;
1483 if (!dec_check_fpuv2(dc
)) {
1486 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1490 if (!dec_check_fpuv2(dc
)) {
1493 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1497 if (!dec_check_fpuv2(dc
)) {
1500 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1504 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1506 fpu_insn
, (uint32_t)dc
->base
.pc_next
, dc
->opcode
);
1507 dc
->abort_at_next_insn
= 1;
1512 static void dec_null(DisasContext
*dc
)
1514 if (trap_illegal(dc
, true)) {
1517 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n",
1518 (uint32_t)dc
->base
.pc_next
, dc
->opcode
);
1519 dc
->abort_at_next_insn
= 1;
1522 /* Insns connected to FSL or AXI stream attached devices. */
1523 static void dec_stream(DisasContext
*dc
)
1525 TCGv_i32 t_id
, t_ctrl
;
1528 if (trap_userspace(dc
, true)) {
1532 t_id
= tcg_temp_new_i32();
1534 tcg_gen_movi_i32(t_id
, dc
->imm
& 0xf);
1535 ctrl
= dc
->imm
>> 10;
1537 tcg_gen_andi_i32(t_id
, cpu_R
[dc
->rb
], 0xf);
1538 ctrl
= dc
->imm
>> 5;
1541 t_ctrl
= tcg_const_i32(ctrl
);
1544 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1546 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1548 tcg_temp_free_i32(t_id
);
1549 tcg_temp_free_i32(t_ctrl
);
1552 static struct decoder_info
{
1557 void (*dec
)(DisasContext
*dc
);
1560 {DEC_BARREL
, dec_barrel
},
1562 {DEC_ST
, dec_store
},
1570 {DEC_STREAM
, dec_stream
},
1574 static void old_decode(DisasContext
*dc
, uint32_t ir
)
1580 /* bit 2 seems to indicate insn type. */
1581 dc
->type_b
= ir
& (1 << 29);
1583 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1584 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1585 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1586 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1587 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1589 /* Large switch for all insns. */
1590 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1591 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1598 static void mb_tr_init_disas_context(DisasContextBase
*dcb
, CPUState
*cs
)
1600 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1601 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1605 dc
->synced_flags
= dc
->tb_flags
= dc
->base
.tb
->flags
;
1606 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1607 dc
->jmp
= dc
->delayed_branch
? JMP_INDIRECT
: JMP_NOJMP
;
1608 dc
->cpustate_changed
= 0;
1609 dc
->abort_at_next_insn
= 0;
1610 dc
->ext_imm
= dc
->base
.tb
->cs_base
;
1614 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
1615 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
1618 static void mb_tr_tb_start(DisasContextBase
*dcb
, CPUState
*cs
)
1622 static void mb_tr_insn_start(DisasContextBase
*dcb
, CPUState
*cs
)
1624 tcg_gen_insn_start(dcb
->pc_next
);
1627 static bool mb_tr_breakpoint_check(DisasContextBase
*dcb
, CPUState
*cs
,
1628 const CPUBreakpoint
*bp
)
1630 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1632 gen_raise_exception_sync(dc
, EXCP_DEBUG
);
1635 * The address covered by the breakpoint must be included in
1636 * [tb->pc, tb->pc + tb->size) in order to for it to be
1637 * properly cleared -- thus we increment the PC here so that
1638 * the logic setting tb->size below does the right thing.
1640 dc
->base
.pc_next
+= 4;
1644 static void mb_tr_translate_insn(DisasContextBase
*dcb
, CPUState
*cs
)
1646 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1647 CPUMBState
*env
= cs
->env_ptr
;
1650 /* TODO: This should raise an exception, not terminate qemu. */
1651 if (dc
->base
.pc_next
& 3) {
1652 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n",
1653 (uint32_t)dc
->base
.pc_next
);
1657 ir
= cpu_ldl_code(env
, dc
->base
.pc_next
);
1658 if (!decode(dc
, ir
)) {
1663 tcg_temp_free_i32(dc
->r0
);
1668 if (dc
->clear_imm
&& (dc
->tb_flags
& IMM_FLAG
)) {
1669 dc
->tb_flags
&= ~IMM_FLAG
;
1670 tcg_gen_discard_i32(cpu_imm
);
1672 dc
->base
.pc_next
+= 4;
1674 if (dc
->delayed_branch
&& --dc
->delayed_branch
== 0) {
1675 if (dc
->tb_flags
& DRTI_FLAG
) {
1678 if (dc
->tb_flags
& DRTB_FLAG
) {
1681 if (dc
->tb_flags
& DRTE_FLAG
) {
1684 /* Clear the delay slot flag. */
1685 dc
->tb_flags
&= ~D_FLAG
;
1686 dc
->base
.is_jmp
= DISAS_JUMP
;
1689 /* Force an exit if the per-tb cpu state has changed. */
1690 if (dc
->base
.is_jmp
== DISAS_NEXT
&& dc
->cpustate_changed
) {
1691 dc
->base
.is_jmp
= DISAS_UPDATE
;
1692 tcg_gen_movi_i32(cpu_pc
, dc
->base
.pc_next
);
1696 static void mb_tr_tb_stop(DisasContextBase
*dcb
, CPUState
*cs
)
1698 DisasContext
*dc
= container_of(dcb
, DisasContext
, base
);
1700 assert(!dc
->abort_at_next_insn
);
1702 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
1703 /* We have already exited the TB. */
1708 if (dc
->tb_flags
& D_FLAG
) {
1710 dc
->jmp
= JMP_NOJMP
;
1713 switch (dc
->base
.is_jmp
) {
1714 case DISAS_TOO_MANY
:
1715 assert(dc
->jmp
== JMP_NOJMP
);
1716 gen_goto_tb(dc
, 0, dc
->base
.pc_next
);
1720 assert(dc
->jmp
== JMP_NOJMP
);
1721 if (unlikely(cs
->singlestep_enabled
)) {
1722 gen_raise_exception(dc
, EXCP_DEBUG
);
1724 tcg_gen_exit_tb(NULL
, 0);
1732 TCGv_i32 tmp_pc
= tcg_const_i32(dc
->base
.pc_next
);
1733 eval_cond_jmp(dc
, cpu_btarget
, tmp_pc
);
1734 tcg_temp_free_i32(tmp_pc
);
1736 if (unlikely(cs
->singlestep_enabled
)) {
1737 gen_raise_exception(dc
, EXCP_DEBUG
);
1739 tcg_gen_exit_tb(NULL
, 0);
1746 TCGLabel
*l1
= gen_new_label();
1747 tcg_gen_brcondi_i32(TCG_COND_NE
, cpu_btaken
, 0, l1
);
1748 gen_goto_tb(dc
, 1, dc
->base
.pc_next
);
1754 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1760 g_assert_not_reached();
1764 static void mb_tr_disas_log(const DisasContextBase
*dcb
, CPUState
*cs
)
1766 qemu_log("IN: %s\n", lookup_symbol(dcb
->pc_first
));
1767 log_target_disas(cs
, dcb
->pc_first
, dcb
->tb
->size
);
1770 static const TranslatorOps mb_tr_ops
= {
1771 .init_disas_context
= mb_tr_init_disas_context
,
1772 .tb_start
= mb_tr_tb_start
,
1773 .insn_start
= mb_tr_insn_start
,
1774 .breakpoint_check
= mb_tr_breakpoint_check
,
1775 .translate_insn
= mb_tr_translate_insn
,
1776 .tb_stop
= mb_tr_tb_stop
,
1777 .disas_log
= mb_tr_disas_log
,
1780 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
1783 translator_loop(&mb_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
1786 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
1788 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1789 CPUMBState
*env
= &cpu
->env
;
1796 qemu_fprintf(f
, "IN: PC=%x %s\n",
1797 env
->pc
, lookup_symbol(env
->pc
));
1798 qemu_fprintf(f
, "rmsr=%x resr=%x rear=%" PRIx64
" "
1799 "imm=%x iflags=%x fsr=%x rbtr=%x\n",
1800 env
->msr
, env
->esr
, env
->ear
,
1801 env
->imm
, env
->iflags
, env
->fsr
, env
->btr
);
1802 qemu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1803 env
->btaken
, env
->btarget
,
1804 (env
->msr
& MSR_UM
) ? "user" : "kernel",
1805 (env
->msr
& MSR_UMS
) ? "user" : "kernel",
1806 (bool)(env
->msr
& MSR_EIP
),
1807 (bool)(env
->msr
& MSR_IE
));
1808 for (i
= 0; i
< 12; i
++) {
1809 qemu_fprintf(f
, "rpvr%2.2d=%8.8x ", i
, env
->pvr
.regs
[i
]);
1810 if ((i
+ 1) % 4 == 0) {
1811 qemu_fprintf(f
, "\n");
1815 /* Registers that aren't modeled are reported as 0 */
1816 qemu_fprintf(f
, "redr=%x rpid=0 rzpr=0 rtlbx=0 rtlbsx=0 "
1817 "rtlblo=0 rtlbhi=0\n", env
->edr
);
1818 qemu_fprintf(f
, "slr=%x shr=%x\n", env
->slr
, env
->shr
);
1819 for (i
= 0; i
< 32; i
++) {
1820 qemu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1821 if ((i
+ 1) % 4 == 0)
1822 qemu_fprintf(f
, "\n");
1824 qemu_fprintf(f
, "\n\n");
1827 void mb_tcg_init(void)
1829 #define R(X) { &cpu_R[X], offsetof(CPUMBState, regs[X]), "r" #X }
1830 #define SP(X) { &cpu_##X, offsetof(CPUMBState, X), #X }
1832 static const struct {
1833 TCGv_i32
*var
; int ofs
; char name
[8];
1835 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
1836 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
1837 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1838 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
1853 for (int i
= 0; i
< ARRAY_SIZE(i32s
); ++i
) {
1855 tcg_global_mem_new_i32(cpu_env
, i32s
[i
].ofs
, i32s
[i
].name
);
1859 tcg_global_mem_new(cpu_env
, offsetof(CPUMBState
, res_addr
), "res_addr");
1862 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,