2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "microblaze-decode.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
36 #if DISAS_MB && !SIM_COMPAT
37 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
39 # define LOG_DIS(...) do { } while (0)
44 #define EXTRACT_FIELD(src, start, end) \
45 (((src) >> start) & ((1 << (end - start + 1)) - 1))
47 static TCGv env_debug
;
48 static TCGv_ptr cpu_env
;
49 static TCGv cpu_R
[32];
50 static TCGv cpu_SR
[18];
52 static TCGv env_btaken
;
53 static TCGv env_btarget
;
54 static TCGv env_iflags
;
55 static TCGv env_res_addr
;
56 static TCGv env_res_val
;
58 #include "exec/gen-icount.h"
60 /* This is the state at translation time. */
61 typedef struct DisasContext
{
72 unsigned int cpustate_changed
;
73 unsigned int delayed_branch
;
74 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
75 unsigned int clear_imm
;
80 #define JMP_DIRECT_CC 2
81 #define JMP_INDIRECT 3
85 int abort_at_next_insn
;
87 struct TranslationBlock
*tb
;
88 int singlestep_enabled
;
91 static const char *regnames
[] =
93 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
94 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
95 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
96 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
99 static const char *special_regnames
[] =
101 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
102 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
103 "sr16", "sr17", "sr18"
106 static inline void t_sync_flags(DisasContext
*dc
)
108 /* Synch the tb dependent flags between translator and runtime. */
109 if (dc
->tb_flags
!= dc
->synced_flags
) {
110 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
111 dc
->synced_flags
= dc
->tb_flags
;
115 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
117 TCGv_i32 tmp
= tcg_const_i32(index
);
120 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
121 gen_helper_raise_exception(cpu_env
, tmp
);
122 tcg_temp_free_i32(tmp
);
123 dc
->is_jmp
= DISAS_UPDATE
;
126 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
128 TranslationBlock
*tb
;
130 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
132 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
133 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
135 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
140 static void read_carry(DisasContext
*dc
, TCGv d
)
142 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
146 * write_carry sets the carry bits in MSR based on bit 0 of v.
147 * v[31:1] are ignored.
149 static void write_carry(DisasContext
*dc
, TCGv v
)
151 TCGv t0
= tcg_temp_new();
152 tcg_gen_shli_tl(t0
, v
, 31);
153 tcg_gen_sari_tl(t0
, t0
, 31);
154 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
155 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
157 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
161 static void write_carryi(DisasContext
*dc
, bool carry
)
163 TCGv t0
= tcg_temp_new();
164 tcg_gen_movi_tl(t0
, carry
);
169 /* True if ALU operand b is a small immediate that may deserve
171 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
173 /* Immediate insn without the imm prefix ? */
174 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
177 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
180 if (dc
->tb_flags
& IMM_FLAG
)
181 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
183 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
186 return &cpu_R
[dc
->rb
];
189 static void dec_add(DisasContext
*dc
)
197 LOG_DIS("add%s%s%s r%d r%d r%d\n",
198 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
199 dc
->rd
, dc
->ra
, dc
->rb
);
201 /* Take care of the easy cases first. */
203 /* k - keep carry, no need to update MSR. */
204 /* If rd == r0, it's a nop. */
206 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
209 /* c - Add carry into the result. */
213 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
220 /* From now on, we can assume k is zero. So we need to update MSR. */
226 tcg_gen_movi_tl(cf
, 0);
230 TCGv ncf
= tcg_temp_new();
231 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
232 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
233 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
234 write_carry(dc
, ncf
);
237 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
243 static void dec_sub(DisasContext
*dc
)
245 unsigned int u
, cmp
, k
, c
;
251 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
254 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
257 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
259 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
264 LOG_DIS("sub%s%s r%d, r%d r%d\n",
265 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
267 /* Take care of the easy cases first. */
269 /* k - keep carry, no need to update MSR. */
270 /* If rd == r0, it's a nop. */
272 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
275 /* c - Add carry into the result. */
279 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
286 /* From now on, we can assume k is zero. So we need to update MSR. */
287 /* Extract carry. And complement a into na. */
293 tcg_gen_movi_tl(cf
, 1);
296 /* d = b + ~a + c. carry defaults to 1. */
297 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
300 TCGv ncf
= tcg_temp_new();
301 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
302 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
303 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
304 write_carry(dc
, ncf
);
307 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
314 static void dec_pattern(DisasContext
*dc
)
318 if ((dc
->tb_flags
& MSR_EE_FLAG
)
319 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
320 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
321 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
322 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
325 mode
= dc
->opcode
& 3;
329 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
331 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
334 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
336 tcg_gen_setcond_tl(TCG_COND_EQ
, cpu_R
[dc
->rd
],
337 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
341 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
343 tcg_gen_setcond_tl(TCG_COND_NE
, cpu_R
[dc
->rd
],
344 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
348 cpu_abort(CPU(dc
->cpu
),
349 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
354 static void dec_and(DisasContext
*dc
)
358 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
363 not = dc
->opcode
& (1 << 1);
364 LOG_DIS("and%s\n", not ? "n" : "");
370 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
372 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
375 static void dec_or(DisasContext
*dc
)
377 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
382 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
384 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
387 static void dec_xor(DisasContext
*dc
)
389 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
394 LOG_DIS("xor r%d\n", dc
->rd
);
396 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
399 static inline void msr_read(DisasContext
*dc
, TCGv d
)
401 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
404 static inline void msr_write(DisasContext
*dc
, TCGv v
)
409 dc
->cpustate_changed
= 1;
410 /* PVR bit is not writable. */
411 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
412 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
413 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
417 static void dec_msr(DisasContext
*dc
)
419 CPUState
*cs
= CPU(dc
->cpu
);
421 unsigned int sr
, to
, rn
;
422 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
424 sr
= dc
->imm
& ((1 << 14) - 1);
425 to
= dc
->imm
& (1 << 14);
428 dc
->cpustate_changed
= 1;
430 /* msrclr and msrset. */
431 if (!(dc
->imm
& (1 << 15))) {
432 unsigned int clr
= dc
->ir
& (1 << 16);
434 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
437 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
442 if ((dc
->tb_flags
& MSR_EE_FLAG
)
443 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
444 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
445 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
450 msr_read(dc
, cpu_R
[dc
->rd
]);
455 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
458 tcg_gen_not_tl(t1
, t1
);
459 tcg_gen_and_tl(t0
, t0
, t1
);
461 tcg_gen_or_tl(t0
, t0
, t1
);
465 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
466 dc
->is_jmp
= DISAS_UPDATE
;
471 if ((dc
->tb_flags
& MSR_EE_FLAG
)
472 && mem_index
== MMU_USER_IDX
) {
473 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
474 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
479 #if !defined(CONFIG_USER_ONLY)
480 /* Catch read/writes to the mmu block. */
481 if ((sr
& ~0xff) == 0x1000) {
483 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
485 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
487 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
493 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
498 msr_write(dc
, cpu_R
[dc
->ra
]);
501 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
504 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
507 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
510 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
513 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
516 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
520 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
524 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
527 msr_read(dc
, cpu_R
[dc
->rd
]);
530 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
533 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
536 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
539 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
542 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
545 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
561 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
562 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
565 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
571 tcg_gen_movi_tl(cpu_R
[0], 0);
575 /* 64-bit signed mul, lower result in d and upper in d2. */
576 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
580 t0
= tcg_temp_new_i64();
581 t1
= tcg_temp_new_i64();
583 tcg_gen_ext_i32_i64(t0
, a
);
584 tcg_gen_ext_i32_i64(t1
, b
);
585 tcg_gen_mul_i64(t0
, t0
, t1
);
587 tcg_gen_extrl_i64_i32(d
, t0
);
588 tcg_gen_shri_i64(t0
, t0
, 32);
589 tcg_gen_extrl_i64_i32(d2
, t0
);
591 tcg_temp_free_i64(t0
);
592 tcg_temp_free_i64(t1
);
595 /* 64-bit unsigned muls, lower result in d and upper in d2. */
596 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
600 t0
= tcg_temp_new_i64();
601 t1
= tcg_temp_new_i64();
603 tcg_gen_extu_i32_i64(t0
, a
);
604 tcg_gen_extu_i32_i64(t1
, b
);
605 tcg_gen_mul_i64(t0
, t0
, t1
);
607 tcg_gen_extrl_i64_i32(d
, t0
);
608 tcg_gen_shri_i64(t0
, t0
, 32);
609 tcg_gen_extrl_i64_i32(d2
, t0
);
611 tcg_temp_free_i64(t0
);
612 tcg_temp_free_i64(t1
);
615 /* Multiplier unit. */
616 static void dec_mul(DisasContext
*dc
)
619 unsigned int subcode
;
621 if ((dc
->tb_flags
& MSR_EE_FLAG
)
622 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
623 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
624 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
625 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
629 subcode
= dc
->imm
& 3;
630 d
[0] = tcg_temp_new();
631 d
[1] = tcg_temp_new();
634 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
635 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
639 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
640 if (subcode
>= 1 && subcode
<= 3
641 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
647 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
648 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
651 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
652 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
655 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
656 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
659 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
660 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
663 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
672 static void dec_div(DisasContext
*dc
)
679 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
680 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
681 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
682 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
686 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
689 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
692 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
695 static void dec_barrel(DisasContext
*dc
)
700 if ((dc
->tb_flags
& MSR_EE_FLAG
)
701 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
702 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
703 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
704 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
708 s
= dc
->imm
& (1 << 10);
709 t
= dc
->imm
& (1 << 9);
711 LOG_DIS("bs%s%s r%d r%d r%d\n",
712 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
716 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
717 tcg_gen_andi_tl(t0
, t0
, 31);
720 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
723 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
725 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
729 static void dec_bit(DisasContext
*dc
)
731 CPUState
*cs
= CPU(dc
->cpu
);
734 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
736 op
= dc
->ir
& ((1 << 9) - 1);
742 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
743 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
744 write_carry(dc
, cpu_R
[dc
->ra
]);
746 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
747 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
755 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
757 /* Update carry. Note that write carry only looks at the LSB. */
758 write_carry(dc
, cpu_R
[dc
->ra
]);
761 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
763 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
767 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
768 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
771 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
772 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
779 LOG_DIS("wdc r%d\n", dc
->ra
);
780 if ((dc
->tb_flags
& MSR_EE_FLAG
)
781 && mem_index
== MMU_USER_IDX
) {
782 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
783 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
789 LOG_DIS("wic r%d\n", dc
->ra
);
790 if ((dc
->tb_flags
& MSR_EE_FLAG
)
791 && mem_index
== MMU_USER_IDX
) {
792 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
793 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
798 if ((dc
->tb_flags
& MSR_EE_FLAG
)
799 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
800 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
801 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
802 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
804 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
805 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
810 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
811 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
815 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
816 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
819 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
820 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
825 static inline void sync_jmpstate(DisasContext
*dc
)
827 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
828 if (dc
->jmp
== JMP_DIRECT
) {
829 tcg_gen_movi_tl(env_btaken
, 1);
831 dc
->jmp
= JMP_INDIRECT
;
832 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
836 static void dec_imm(DisasContext
*dc
)
838 LOG_DIS("imm %x\n", dc
->imm
<< 16);
839 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
840 dc
->tb_flags
|= IMM_FLAG
;
844 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
846 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
847 /* Should be set to one if r1 is used by loadstores. */
850 /* All load/stores use ra. */
851 if (dc
->ra
== 1 && dc
->cpu
->cfg
.stackprot
) {
855 /* Treat the common cases first. */
857 /* If any of the regs is r0, return a ptr to the other. */
859 return &cpu_R
[dc
->rb
];
860 } else if (dc
->rb
== 0) {
861 return &cpu_R
[dc
->ra
];
864 if (dc
->rb
== 1 && dc
->cpu
->cfg
.stackprot
) {
869 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
872 gen_helper_stackprot(cpu_env
, *t
);
879 return &cpu_R
[dc
->ra
];
882 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
883 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
886 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
890 gen_helper_stackprot(cpu_env
, *t
);
895 static void dec_load(DisasContext
*dc
)
898 unsigned int size
, rev
= 0, ex
= 0;
901 mop
= dc
->opcode
& 3;
904 rev
= (dc
->ir
>> 9) & 1;
905 ex
= (dc
->ir
>> 10) & 1;
912 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
913 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
914 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
915 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
919 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
923 addr
= compute_ldst_addr(dc
, &t
);
926 * When doing reverse accesses we need to do two things.
928 * 1. Reverse the address wrt endianness.
929 * 2. Byteswap the data lanes on the way back into the CPU core.
931 if (rev
&& size
!= 4) {
932 /* Endian reverse the address. t is addr. */
940 TCGv low
= tcg_temp_new();
942 /* Force addr into the temp. */
945 tcg_gen_mov_tl(t
, *addr
);
949 tcg_gen_andi_tl(low
, t
, 3);
950 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
951 tcg_gen_andi_tl(t
, t
, ~3);
952 tcg_gen_or_tl(t
, t
, low
);
953 tcg_gen_mov_tl(env_imm
, t
);
961 /* Force addr into the temp. */
964 tcg_gen_xori_tl(t
, *addr
, 2);
967 tcg_gen_xori_tl(t
, t
, 2);
971 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
976 /* lwx does not throw unaligned access errors, so force alignment */
978 /* Force addr into the temp. */
981 tcg_gen_mov_tl(t
, *addr
);
984 tcg_gen_andi_tl(t
, t
, ~3);
987 /* If we get a fault on a dslot, the jmpstate better be in sync. */
990 /* Verify alignment if needed. */
992 * Microblaze gives MMU faults priority over faults due to
993 * unaligned addresses. That's why we speculatively do the load
994 * into v. If the load succeeds, we verify alignment of the
995 * address and if that succeeds we write into the destination reg.
998 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1000 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1001 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1002 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1003 tcg_const_tl(0), tcg_const_tl(size
- 1));
1007 tcg_gen_mov_tl(env_res_addr
, *addr
);
1008 tcg_gen_mov_tl(env_res_val
, v
);
1011 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1016 /* no support for AXI exclusive so always clear C */
1017 write_carryi(dc
, 0);
1024 static void dec_store(DisasContext
*dc
)
1026 TCGv t
, *addr
, swx_addr
;
1027 TCGLabel
*swx_skip
= NULL
;
1028 unsigned int size
, rev
= 0, ex
= 0;
1031 mop
= dc
->opcode
& 3;
1034 rev
= (dc
->ir
>> 9) & 1;
1035 ex
= (dc
->ir
>> 10) & 1;
1042 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1043 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1044 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1045 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1049 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1052 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1054 addr
= compute_ldst_addr(dc
, &t
);
1056 swx_addr
= tcg_temp_local_new();
1060 /* Force addr into the swx_addr. */
1061 tcg_gen_mov_tl(swx_addr
, *addr
);
1063 /* swx does not throw unaligned access errors, so force alignment */
1064 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1066 write_carryi(dc
, 1);
1067 swx_skip
= gen_new_label();
1068 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1070 /* Compare the value loaded at lwx with current contents of
1071 the reserved location.
1072 FIXME: This only works for system emulation where we can expect
1073 this compare and the following write to be atomic. For user
1074 emulation we need to add atomicity between threads. */
1075 tval
= tcg_temp_new();
1076 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
, false),
1078 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1079 write_carryi(dc
, 0);
1080 tcg_temp_free(tval
);
1083 if (rev
&& size
!= 4) {
1084 /* Endian reverse the address. t is addr. */
1092 TCGv low
= tcg_temp_new();
1094 /* Force addr into the temp. */
1097 tcg_gen_mov_tl(t
, *addr
);
1101 tcg_gen_andi_tl(low
, t
, 3);
1102 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1103 tcg_gen_andi_tl(t
, t
, ~3);
1104 tcg_gen_or_tl(t
, t
, low
);
1105 tcg_gen_mov_tl(env_imm
, t
);
1113 /* Force addr into the temp. */
1116 tcg_gen_xori_tl(t
, *addr
, 2);
1119 tcg_gen_xori_tl(t
, t
, 2);
1123 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1127 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
, false), mop
);
1129 /* Verify alignment if needed. */
1130 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1131 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1132 /* FIXME: if the alignment is wrong, we should restore the value
1133 * in memory. One possible way to achieve this is to probe
1134 * the MMU prior to the memaccess, thay way we could put
1135 * the alignment checks in between the probe and the mem
1138 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1139 tcg_const_tl(1), tcg_const_tl(size
- 1));
1143 gen_set_label(swx_skip
);
1145 tcg_temp_free(swx_addr
);
1151 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1152 TCGv d
, TCGv a
, TCGv b
)
1156 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1159 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1162 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1165 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1168 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1171 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1174 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1179 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1181 TCGLabel
*l1
= gen_new_label();
1182 /* Conditional jmp. */
1183 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1184 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1185 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1189 static void dec_bcc(DisasContext
*dc
)
1194 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1195 dslot
= dc
->ir
& (1 << 25);
1196 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1198 dc
->delayed_branch
= 1;
1200 dc
->delayed_branch
= 2;
1201 dc
->tb_flags
|= D_FLAG
;
1202 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1203 cpu_env
, offsetof(CPUMBState
, bimm
));
1206 if (dec_alu_op_b_is_small_imm(dc
)) {
1207 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1209 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1210 dc
->jmp
= JMP_DIRECT_CC
;
1211 dc
->jmp_pc
= dc
->pc
+ offset
;
1213 dc
->jmp
= JMP_INDIRECT
;
1214 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1215 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1217 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1220 static void dec_br(DisasContext
*dc
)
1222 unsigned int dslot
, link
, abs
, mbar
;
1223 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1225 dslot
= dc
->ir
& (1 << 20);
1226 abs
= dc
->ir
& (1 << 19);
1227 link
= dc
->ir
& (1 << 18);
1229 /* Memory barrier. */
1230 mbar
= (dc
->ir
>> 16) & 31;
1231 if (mbar
== 2 && dc
->imm
== 4) {
1232 /* mbar IMM & 16 decodes to sleep. */
1234 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1235 TCGv_i32 tmp_1
= tcg_const_i32(1);
1240 tcg_gen_st_i32(tmp_1
, cpu_env
,
1241 -offsetof(MicroBlazeCPU
, env
)
1242 +offsetof(CPUState
, halted
));
1243 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1244 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1245 tcg_temp_free_i32(tmp_hlt
);
1246 tcg_temp_free_i32(tmp_1
);
1249 LOG_DIS("mbar %d\n", dc
->rd
);
1251 dc
->cpustate_changed
= 1;
1255 LOG_DIS("br%s%s%s%s imm=%x\n",
1256 abs
? "a" : "", link
? "l" : "",
1257 dc
->type_b
? "i" : "", dslot
? "d" : "",
1260 dc
->delayed_branch
= 1;
1262 dc
->delayed_branch
= 2;
1263 dc
->tb_flags
|= D_FLAG
;
1264 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1265 cpu_env
, offsetof(CPUMBState
, bimm
));
1268 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1270 dc
->jmp
= JMP_INDIRECT
;
1272 tcg_gen_movi_tl(env_btaken
, 1);
1273 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1274 if (link
&& !dslot
) {
1275 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1276 t_gen_raise_exception(dc
, EXCP_BREAK
);
1278 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1279 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1280 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1284 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1288 if (dec_alu_op_b_is_small_imm(dc
)) {
1289 dc
->jmp
= JMP_DIRECT
;
1290 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1292 tcg_gen_movi_tl(env_btaken
, 1);
1293 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1294 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1299 static inline void do_rti(DisasContext
*dc
)
1302 t0
= tcg_temp_new();
1303 t1
= tcg_temp_new();
1304 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1305 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1306 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1308 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1309 tcg_gen_or_tl(t1
, t1
, t0
);
1313 dc
->tb_flags
&= ~DRTI_FLAG
;
1316 static inline void do_rtb(DisasContext
*dc
)
1319 t0
= tcg_temp_new();
1320 t1
= tcg_temp_new();
1321 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1322 tcg_gen_shri_tl(t0
, t1
, 1);
1323 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1325 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1326 tcg_gen_or_tl(t1
, t1
, t0
);
1330 dc
->tb_flags
&= ~DRTB_FLAG
;
1333 static inline void do_rte(DisasContext
*dc
)
1336 t0
= tcg_temp_new();
1337 t1
= tcg_temp_new();
1339 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1340 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1341 tcg_gen_shri_tl(t0
, t1
, 1);
1342 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1344 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1345 tcg_gen_or_tl(t1
, t1
, t0
);
1349 dc
->tb_flags
&= ~DRTE_FLAG
;
1352 static void dec_rts(DisasContext
*dc
)
1354 unsigned int b_bit
, i_bit
, e_bit
;
1355 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1357 i_bit
= dc
->ir
& (1 << 21);
1358 b_bit
= dc
->ir
& (1 << 22);
1359 e_bit
= dc
->ir
& (1 << 23);
1361 dc
->delayed_branch
= 2;
1362 dc
->tb_flags
|= D_FLAG
;
1363 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1364 cpu_env
, offsetof(CPUMBState
, bimm
));
1367 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1368 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1369 && mem_index
== MMU_USER_IDX
) {
1370 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1371 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1373 dc
->tb_flags
|= DRTI_FLAG
;
1375 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1376 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1377 && mem_index
== MMU_USER_IDX
) {
1378 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1379 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1381 dc
->tb_flags
|= DRTB_FLAG
;
1383 LOG_DIS("rted ir=%x\n", dc
->ir
);
1384 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1385 && mem_index
== MMU_USER_IDX
) {
1386 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1387 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1389 dc
->tb_flags
|= DRTE_FLAG
;
1391 LOG_DIS("rts ir=%x\n", dc
->ir
);
1393 dc
->jmp
= JMP_INDIRECT
;
1394 tcg_gen_movi_tl(env_btaken
, 1);
1395 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1398 static int dec_check_fpuv2(DisasContext
*dc
)
1400 if ((dc
->cpu
->cfg
.use_fpu
!= 2) && (dc
->tb_flags
& MSR_EE_FLAG
)) {
1401 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1402 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1404 return (dc
->cpu
->cfg
.use_fpu
== 2) ? 0 : PVR2_USE_FPU2_MASK
;
1407 static void dec_fpu(DisasContext
*dc
)
1409 unsigned int fpu_insn
;
1411 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1412 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1413 && (dc
->cpu
->cfg
.use_fpu
!= 1)) {
1414 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1415 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1419 fpu_insn
= (dc
->ir
>> 7) & 7;
1423 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1428 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1433 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1438 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1443 switch ((dc
->ir
>> 4) & 7) {
1445 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1446 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1449 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1450 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1453 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1454 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1457 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1458 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1461 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1462 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1465 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1466 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1469 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1470 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1473 qemu_log_mask(LOG_UNIMP
,
1474 "unimplemented fcmp fpu_insn=%x pc=%x"
1476 fpu_insn
, dc
->pc
, dc
->opcode
);
1477 dc
->abort_at_next_insn
= 1;
1483 if (!dec_check_fpuv2(dc
)) {
1486 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1490 if (!dec_check_fpuv2(dc
)) {
1493 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1497 if (!dec_check_fpuv2(dc
)) {
1500 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1504 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1506 fpu_insn
, dc
->pc
, dc
->opcode
);
1507 dc
->abort_at_next_insn
= 1;
1512 static void dec_null(DisasContext
*dc
)
1514 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1515 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1516 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1517 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1520 qemu_log_mask(LOG_GUEST_ERROR
, "unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1521 dc
->abort_at_next_insn
= 1;
1524 /* Insns connected to FSL or AXI stream attached devices. */
1525 static void dec_stream(DisasContext
*dc
)
1527 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
, false);
1528 TCGv_i32 t_id
, t_ctrl
;
1531 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1532 dc
->type_b
? "" : "d", dc
->imm
);
1534 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1535 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1536 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1540 t_id
= tcg_temp_new();
1542 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1543 ctrl
= dc
->imm
>> 10;
1545 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1546 ctrl
= dc
->imm
>> 5;
1549 t_ctrl
= tcg_const_tl(ctrl
);
1552 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1554 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1556 tcg_temp_free(t_id
);
1557 tcg_temp_free(t_ctrl
);
1560 static struct decoder_info
{
1565 void (*dec
)(DisasContext
*dc
);
1573 {DEC_BARREL
, dec_barrel
},
1575 {DEC_ST
, dec_store
},
1584 {DEC_STREAM
, dec_stream
},
1588 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1593 LOG_DIS("%8.8x\t", dc
->ir
);
1598 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1599 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1600 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1601 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1602 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1606 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1608 if (dc
->nr_nops
> 4) {
1609 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1612 /* bit 2 seems to indicate insn type. */
1613 dc
->type_b
= ir
& (1 << 29);
1615 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1616 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1617 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1618 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1619 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1621 /* Large switch for all insns. */
1622 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1623 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1630 /* generate intermediate code for basic block 'tb'. */
1631 void gen_intermediate_code(CPUMBState
*env
, struct TranslationBlock
*tb
)
1633 MicroBlazeCPU
*cpu
= mb_env_get_cpu(env
);
1634 CPUState
*cs
= CPU(cpu
);
1636 struct DisasContext ctx
;
1637 struct DisasContext
*dc
= &ctx
;
1638 uint32_t next_page_start
, org_flags
;
1646 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1648 dc
->is_jmp
= DISAS_NEXT
;
1650 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1651 if (dc
->delayed_branch
) {
1652 dc
->jmp
= JMP_INDIRECT
;
1655 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1656 dc
->cpustate_changed
= 0;
1657 dc
->abort_at_next_insn
= 0;
1661 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1664 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1666 qemu_log("--------------\n");
1667 log_cpu_state(CPU(cpu
), 0);
1671 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1673 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1674 if (max_insns
== 0) {
1675 max_insns
= CF_COUNT_MASK
;
1677 if (max_insns
> TCG_MAX_INSNS
) {
1678 max_insns
= TCG_MAX_INSNS
;
1684 tcg_gen_insn_start(dc
->pc
);
1688 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1689 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1694 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1695 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1696 dc
->is_jmp
= DISAS_UPDATE
;
1697 /* The address covered by the breakpoint must be included in
1698 [tb->pc, tb->pc + tb->size) in order to for it to be
1699 properly cleared -- thus we increment the PC here so that
1700 the logic setting tb->size below does the right thing. */
1706 LOG_DIS("%8.8x:\t", dc
->pc
);
1708 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
1713 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1715 dc
->tb_flags
&= ~IMM_FLAG
;
1718 if (dc
->delayed_branch
) {
1719 dc
->delayed_branch
--;
1720 if (!dc
->delayed_branch
) {
1721 if (dc
->tb_flags
& DRTI_FLAG
)
1723 if (dc
->tb_flags
& DRTB_FLAG
)
1725 if (dc
->tb_flags
& DRTE_FLAG
)
1727 /* Clear the delay slot flag. */
1728 dc
->tb_flags
&= ~D_FLAG
;
1729 /* If it is a direct jump, try direct chaining. */
1730 if (dc
->jmp
== JMP_INDIRECT
) {
1731 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1732 dc
->is_jmp
= DISAS_JUMP
;
1733 } else if (dc
->jmp
== JMP_DIRECT
) {
1735 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1736 dc
->is_jmp
= DISAS_TB_JUMP
;
1737 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1738 TCGLabel
*l1
= gen_new_label();
1740 /* Conditional jmp. */
1741 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1742 gen_goto_tb(dc
, 1, dc
->pc
);
1744 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1746 dc
->is_jmp
= DISAS_TB_JUMP
;
1751 if (cs
->singlestep_enabled
) {
1754 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1755 && !tcg_op_buf_full()
1757 && (dc
->pc
< next_page_start
)
1758 && num_insns
< max_insns
);
1761 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1762 if (dc
->tb_flags
& D_FLAG
) {
1763 dc
->is_jmp
= DISAS_UPDATE
;
1764 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1770 if (tb
->cflags
& CF_LAST_IO
)
1772 /* Force an update if the per-tb cpu state has changed. */
1773 if (dc
->is_jmp
== DISAS_NEXT
1774 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1775 dc
->is_jmp
= DISAS_UPDATE
;
1776 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1780 if (unlikely(cs
->singlestep_enabled
)) {
1781 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1783 if (dc
->is_jmp
!= DISAS_JUMP
) {
1784 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1786 gen_helper_raise_exception(cpu_env
, tmp
);
1787 tcg_temp_free_i32(tmp
);
1789 switch(dc
->is_jmp
) {
1791 gen_goto_tb(dc
, 1, npc
);
1796 /* indicate that the hash table must be used
1797 to find the next TB */
1801 /* nothing more to generate */
1805 gen_tb_end(tb
, num_insns
);
1807 tb
->size
= dc
->pc
- pc_start
;
1808 tb
->icount
= num_insns
;
1812 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1815 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1817 qemu_log("\nisize=%d osize=%d\n",
1818 dc
->pc
- pc_start
, tcg_op_buf_count());
1822 assert(!dc
->abort_at_next_insn
);
1825 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1828 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1829 CPUMBState
*env
= &cpu
->env
;
1835 cpu_fprintf(f
, "IN: PC=%x %s\n",
1836 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1837 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1838 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1839 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1840 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1841 env
->btaken
, env
->btarget
,
1842 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1843 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1844 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1845 (env
->sregs
[SR_MSR
] & MSR_IE
));
1847 for (i
= 0; i
< 32; i
++) {
1848 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1849 if ((i
+ 1) % 4 == 0)
1850 cpu_fprintf(f
, "\n");
1852 cpu_fprintf(f
, "\n\n");
1855 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1859 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1861 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1866 void mb_tcg_init(void)
1870 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1872 env_debug
= tcg_global_mem_new(TCG_AREG0
,
1873 offsetof(CPUMBState
, debug
),
1875 env_iflags
= tcg_global_mem_new(TCG_AREG0
,
1876 offsetof(CPUMBState
, iflags
),
1878 env_imm
= tcg_global_mem_new(TCG_AREG0
,
1879 offsetof(CPUMBState
, imm
),
1881 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
1882 offsetof(CPUMBState
, btarget
),
1884 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
1885 offsetof(CPUMBState
, btaken
),
1887 env_res_addr
= tcg_global_mem_new(TCG_AREG0
,
1888 offsetof(CPUMBState
, res_addr
),
1890 env_res_val
= tcg_global_mem_new(TCG_AREG0
,
1891 offsetof(CPUMBState
, res_val
),
1893 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1894 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1895 offsetof(CPUMBState
, regs
[i
]),
1898 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1899 cpu_SR
[i
] = tcg_global_mem_new(TCG_AREG0
,
1900 offsetof(CPUMBState
, sregs
[i
]),
1901 special_regnames
[i
]);
1905 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
,
1908 env
->sregs
[SR_PC
] = data
[0];