2 * Xilinx MicroBlaze emulation for qemu: main translation routines.
4 * Copyright (c) 2009 Edgar E. Iglesias.
5 * Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "disas/disas.h"
24 #include "exec/helper-proto.h"
25 #include "microblaze-decode.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-gen.h"
32 #if DISAS_MB && !SIM_COMPAT
33 # define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DIS(...) do { } while (0)
40 #define EXTRACT_FIELD(src, start, end) \
41 (((src) >> start) & ((1 << (end - start + 1)) - 1))
43 static TCGv env_debug
;
44 static TCGv_ptr cpu_env
;
45 static TCGv cpu_R
[32];
46 static TCGv cpu_SR
[18];
48 static TCGv env_btaken
;
49 static TCGv env_btarget
;
50 static TCGv env_iflags
;
51 static TCGv env_res_addr
;
52 static TCGv env_res_val
;
54 #include "exec/gen-icount.h"
56 /* This is the state at translation time. */
57 typedef struct DisasContext
{
68 unsigned int cpustate_changed
;
69 unsigned int delayed_branch
;
70 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
71 unsigned int clear_imm
;
76 #define JMP_DIRECT_CC 2
77 #define JMP_INDIRECT 3
81 int abort_at_next_insn
;
83 struct TranslationBlock
*tb
;
84 int singlestep_enabled
;
87 static const char *regnames
[] =
89 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
90 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
91 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
92 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
95 static const char *special_regnames
[] =
97 "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
98 "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
99 "sr16", "sr17", "sr18"
102 /* Sign extend at translation time. */
103 static inline int sign_extend(unsigned int val
, unsigned int width
)
115 static inline void t_sync_flags(DisasContext
*dc
)
117 /* Synch the tb dependent flags between translator and runtime. */
118 if (dc
->tb_flags
!= dc
->synced_flags
) {
119 tcg_gen_movi_tl(env_iflags
, dc
->tb_flags
);
120 dc
->synced_flags
= dc
->tb_flags
;
124 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
126 TCGv_i32 tmp
= tcg_const_i32(index
);
129 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
130 gen_helper_raise_exception(cpu_env
, tmp
);
131 tcg_temp_free_i32(tmp
);
132 dc
->is_jmp
= DISAS_UPDATE
;
135 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
137 TranslationBlock
*tb
;
139 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
141 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
142 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
144 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dest
);
149 static void read_carry(DisasContext
*dc
, TCGv d
)
151 tcg_gen_shri_tl(d
, cpu_SR
[SR_MSR
], 31);
155 * write_carry sets the carry bits in MSR based on bit 0 of v.
156 * v[31:1] are ignored.
158 static void write_carry(DisasContext
*dc
, TCGv v
)
160 TCGv t0
= tcg_temp_new();
161 tcg_gen_shli_tl(t0
, v
, 31);
162 tcg_gen_sari_tl(t0
, t0
, 31);
163 tcg_gen_andi_tl(t0
, t0
, (MSR_C
| MSR_CC
));
164 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
],
166 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], t0
);
170 static void write_carryi(DisasContext
*dc
, bool carry
)
172 TCGv t0
= tcg_temp_new();
173 tcg_gen_movi_tl(t0
, carry
);
178 /* True if ALU operand b is a small immediate that may deserve
180 static inline int dec_alu_op_b_is_small_imm(DisasContext
*dc
)
182 /* Immediate insn without the imm prefix ? */
183 return dc
->type_b
&& !(dc
->tb_flags
& IMM_FLAG
);
186 static inline TCGv
*dec_alu_op_b(DisasContext
*dc
)
189 if (dc
->tb_flags
& IMM_FLAG
)
190 tcg_gen_ori_tl(env_imm
, env_imm
, dc
->imm
);
192 tcg_gen_movi_tl(env_imm
, (int32_t)((int16_t)dc
->imm
));
195 return &cpu_R
[dc
->rb
];
198 static void dec_add(DisasContext
*dc
)
206 LOG_DIS("add%s%s%s r%d r%d r%d\n",
207 dc
->type_b
? "i" : "", k
? "k" : "", c
? "c" : "",
208 dc
->rd
, dc
->ra
, dc
->rb
);
210 /* Take care of the easy cases first. */
212 /* k - keep carry, no need to update MSR. */
213 /* If rd == r0, it's a nop. */
215 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
218 /* c - Add carry into the result. */
222 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
229 /* From now on, we can assume k is zero. So we need to update MSR. */
235 tcg_gen_movi_tl(cf
, 0);
239 TCGv ncf
= tcg_temp_new();
240 gen_helper_carry(ncf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
241 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
242 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
243 write_carry(dc
, ncf
);
246 gen_helper_carry(cf
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)), cf
);
252 static void dec_sub(DisasContext
*dc
)
254 unsigned int u
, cmp
, k
, c
;
260 cmp
= (dc
->imm
& 1) && (!dc
->type_b
) && k
;
263 LOG_DIS("cmp%s r%d, r%d ir=%x\n", u
? "u" : "", dc
->rd
, dc
->ra
, dc
->ir
);
266 gen_helper_cmpu(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
268 gen_helper_cmp(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
273 LOG_DIS("sub%s%s r%d, r%d r%d\n",
274 k
? "k" : "", c
? "c" : "", dc
->rd
, dc
->ra
, dc
->rb
);
276 /* Take care of the easy cases first. */
278 /* k - keep carry, no need to update MSR. */
279 /* If rd == r0, it's a nop. */
281 tcg_gen_sub_tl(cpu_R
[dc
->rd
], *(dec_alu_op_b(dc
)), cpu_R
[dc
->ra
]);
284 /* c - Add carry into the result. */
288 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
295 /* From now on, we can assume k is zero. So we need to update MSR. */
296 /* Extract carry. And complement a into na. */
302 tcg_gen_movi_tl(cf
, 1);
305 /* d = b + ~a + c. carry defaults to 1. */
306 tcg_gen_not_tl(na
, cpu_R
[dc
->ra
]);
309 TCGv ncf
= tcg_temp_new();
310 gen_helper_carry(ncf
, na
, *(dec_alu_op_b(dc
)), cf
);
311 tcg_gen_add_tl(cpu_R
[dc
->rd
], na
, *(dec_alu_op_b(dc
)));
312 tcg_gen_add_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], cf
);
313 write_carry(dc
, ncf
);
316 gen_helper_carry(cf
, na
, *(dec_alu_op_b(dc
)), cf
);
323 static void dec_pattern(DisasContext
*dc
)
328 if ((dc
->tb_flags
& MSR_EE_FLAG
)
329 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
330 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
331 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
332 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
335 mode
= dc
->opcode
& 3;
339 LOG_DIS("pcmpbf r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
341 gen_helper_pcmpbf(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
344 LOG_DIS("pcmpeq r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
346 TCGv t0
= tcg_temp_local_new();
347 l1
= gen_new_label();
348 tcg_gen_movi_tl(t0
, 1);
349 tcg_gen_brcond_tl(TCG_COND_EQ
,
350 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
351 tcg_gen_movi_tl(t0
, 0);
353 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
358 LOG_DIS("pcmpne r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
359 l1
= gen_new_label();
361 TCGv t0
= tcg_temp_local_new();
362 tcg_gen_movi_tl(t0
, 1);
363 tcg_gen_brcond_tl(TCG_COND_NE
,
364 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
], l1
);
365 tcg_gen_movi_tl(t0
, 0);
367 tcg_gen_mov_tl(cpu_R
[dc
->rd
], t0
);
372 cpu_abort(CPU(dc
->cpu
),
373 "unsupported pattern insn opcode=%x\n", dc
->opcode
);
378 static void dec_and(DisasContext
*dc
)
382 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
387 not = dc
->opcode
& (1 << 1);
388 LOG_DIS("and%s\n", not ? "n" : "");
394 tcg_gen_andc_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
396 tcg_gen_and_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
399 static void dec_or(DisasContext
*dc
)
401 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
406 LOG_DIS("or r%d r%d r%d imm=%x\n", dc
->rd
, dc
->ra
, dc
->rb
, dc
->imm
);
408 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
411 static void dec_xor(DisasContext
*dc
)
413 if (!dc
->type_b
&& (dc
->imm
& (1 << 10))) {
418 LOG_DIS("xor r%d\n", dc
->rd
);
420 tcg_gen_xor_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
423 static inline void msr_read(DisasContext
*dc
, TCGv d
)
425 tcg_gen_mov_tl(d
, cpu_SR
[SR_MSR
]);
428 static inline void msr_write(DisasContext
*dc
, TCGv v
)
433 dc
->cpustate_changed
= 1;
434 /* PVR bit is not writable. */
435 tcg_gen_andi_tl(t
, v
, ~MSR_PVR
);
436 tcg_gen_andi_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], MSR_PVR
);
437 tcg_gen_or_tl(cpu_SR
[SR_MSR
], cpu_SR
[SR_MSR
], v
);
441 static void dec_msr(DisasContext
*dc
)
443 CPUState
*cs
= CPU(dc
->cpu
);
445 unsigned int sr
, to
, rn
;
446 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
448 sr
= dc
->imm
& ((1 << 14) - 1);
449 to
= dc
->imm
& (1 << 14);
452 dc
->cpustate_changed
= 1;
454 /* msrclr and msrset. */
455 if (!(dc
->imm
& (1 << 15))) {
456 unsigned int clr
= dc
->ir
& (1 << 16);
458 LOG_DIS("msr%s r%d imm=%x\n", clr
? "clr" : "set",
461 if (!(dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MSR_INSTR
)) {
466 if ((dc
->tb_flags
& MSR_EE_FLAG
)
467 && mem_index
== MMU_USER_IDX
&& (dc
->imm
!= 4 && dc
->imm
!= 0)) {
468 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
469 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
474 msr_read(dc
, cpu_R
[dc
->rd
]);
479 tcg_gen_mov_tl(t1
, *(dec_alu_op_b(dc
)));
482 tcg_gen_not_tl(t1
, t1
);
483 tcg_gen_and_tl(t0
, t0
, t1
);
485 tcg_gen_or_tl(t0
, t0
, t1
);
489 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
490 dc
->is_jmp
= DISAS_UPDATE
;
495 if ((dc
->tb_flags
& MSR_EE_FLAG
)
496 && mem_index
== MMU_USER_IDX
) {
497 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
498 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
503 #if !defined(CONFIG_USER_ONLY)
504 /* Catch read/writes to the mmu block. */
505 if ((sr
& ~0xff) == 0x1000) {
507 LOG_DIS("m%ss sr%d r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
509 gen_helper_mmu_write(cpu_env
, tcg_const_tl(sr
), cpu_R
[dc
->ra
]);
511 gen_helper_mmu_read(cpu_R
[dc
->rd
], cpu_env
, tcg_const_tl(sr
));
517 LOG_DIS("m%ss sr%x r%d imm=%x\n", to
? "t" : "f", sr
, dc
->ra
, dc
->imm
);
522 msr_write(dc
, cpu_R
[dc
->ra
]);
525 tcg_gen_mov_tl(cpu_SR
[SR_EAR
], cpu_R
[dc
->ra
]);
528 tcg_gen_mov_tl(cpu_SR
[SR_ESR
], cpu_R
[dc
->ra
]);
531 tcg_gen_andi_tl(cpu_SR
[SR_FSR
], cpu_R
[dc
->ra
], 31);
534 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, slr
));
537 tcg_gen_st_tl(cpu_R
[dc
->ra
], cpu_env
, offsetof(CPUMBState
, shr
));
540 cpu_abort(CPU(dc
->cpu
), "unknown mts reg %x\n", sr
);
544 LOG_DIS("m%ss r%d sr%x imm=%x\n", to
? "t" : "f", dc
->rd
, sr
, dc
->imm
);
548 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
551 msr_read(dc
, cpu_R
[dc
->rd
]);
554 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_EAR
]);
557 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_ESR
]);
560 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_FSR
]);
563 tcg_gen_mov_tl(cpu_R
[dc
->rd
], cpu_SR
[SR_BTR
]);
566 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, slr
));
569 tcg_gen_ld_tl(cpu_R
[dc
->rd
], cpu_env
, offsetof(CPUMBState
, shr
));
585 tcg_gen_ld_tl(cpu_R
[dc
->rd
],
586 cpu_env
, offsetof(CPUMBState
, pvr
.regs
[rn
]));
589 cpu_abort(cs
, "unknown mfs reg %x\n", sr
);
595 tcg_gen_movi_tl(cpu_R
[0], 0);
599 /* 64-bit signed mul, lower result in d and upper in d2. */
600 static void t_gen_muls(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
604 t0
= tcg_temp_new_i64();
605 t1
= tcg_temp_new_i64();
607 tcg_gen_ext_i32_i64(t0
, a
);
608 tcg_gen_ext_i32_i64(t1
, b
);
609 tcg_gen_mul_i64(t0
, t0
, t1
);
611 tcg_gen_trunc_i64_i32(d
, t0
);
612 tcg_gen_shri_i64(t0
, t0
, 32);
613 tcg_gen_trunc_i64_i32(d2
, t0
);
615 tcg_temp_free_i64(t0
);
616 tcg_temp_free_i64(t1
);
619 /* 64-bit unsigned muls, lower result in d and upper in d2. */
620 static void t_gen_mulu(TCGv d
, TCGv d2
, TCGv a
, TCGv b
)
624 t0
= tcg_temp_new_i64();
625 t1
= tcg_temp_new_i64();
627 tcg_gen_extu_i32_i64(t0
, a
);
628 tcg_gen_extu_i32_i64(t1
, b
);
629 tcg_gen_mul_i64(t0
, t0
, t1
);
631 tcg_gen_trunc_i64_i32(d
, t0
);
632 tcg_gen_shri_i64(t0
, t0
, 32);
633 tcg_gen_trunc_i64_i32(d2
, t0
);
635 tcg_temp_free_i64(t0
);
636 tcg_temp_free_i64(t1
);
639 /* Multiplier unit. */
640 static void dec_mul(DisasContext
*dc
)
643 unsigned int subcode
;
645 if ((dc
->tb_flags
& MSR_EE_FLAG
)
646 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
647 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_HW_MUL_MASK
)) {
648 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
649 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
653 subcode
= dc
->imm
& 3;
654 d
[0] = tcg_temp_new();
655 d
[1] = tcg_temp_new();
658 LOG_DIS("muli r%d r%d %x\n", dc
->rd
, dc
->ra
, dc
->imm
);
659 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
663 /* mulh, mulhsu and mulhu are not available if C_USE_HW_MUL is < 2. */
664 if (subcode
>= 1 && subcode
<= 3
665 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_MUL64_MASK
))) {
671 LOG_DIS("mul r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
672 t_gen_mulu(cpu_R
[dc
->rd
], d
[1], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
675 LOG_DIS("mulh r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
676 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
679 LOG_DIS("mulhsu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
680 t_gen_muls(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
683 LOG_DIS("mulhu r%d r%d r%d\n", dc
->rd
, dc
->ra
, dc
->rb
);
684 t_gen_mulu(d
[0], cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
687 cpu_abort(CPU(dc
->cpu
), "unknown MUL insn %x\n", subcode
);
696 static void dec_div(DisasContext
*dc
)
703 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
704 && !((dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_DIV_MASK
))) {
705 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
706 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
710 gen_helper_divu(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
713 gen_helper_divs(cpu_R
[dc
->rd
], cpu_env
, *(dec_alu_op_b(dc
)),
716 tcg_gen_movi_tl(cpu_R
[dc
->rd
], 0);
719 static void dec_barrel(DisasContext
*dc
)
724 if ((dc
->tb_flags
& MSR_EE_FLAG
)
725 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
726 && !(dc
->cpu
->env
.pvr
.regs
[0] & PVR0_USE_BARREL_MASK
)) {
727 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
728 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
732 s
= dc
->imm
& (1 << 10);
733 t
= dc
->imm
& (1 << 9);
735 LOG_DIS("bs%s%s r%d r%d r%d\n",
736 s
? "l" : "r", t
? "a" : "l", dc
->rd
, dc
->ra
, dc
->rb
);
740 tcg_gen_mov_tl(t0
, *(dec_alu_op_b(dc
)));
741 tcg_gen_andi_tl(t0
, t0
, 31);
744 tcg_gen_shl_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
747 tcg_gen_sar_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
749 tcg_gen_shr_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], t0
);
753 static void dec_bit(DisasContext
*dc
)
755 CPUState
*cs
= CPU(dc
->cpu
);
758 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
760 op
= dc
->ir
& ((1 << 9) - 1);
766 LOG_DIS("src r%d r%d\n", dc
->rd
, dc
->ra
);
767 tcg_gen_andi_tl(t0
, cpu_SR
[SR_MSR
], MSR_CC
);
768 write_carry(dc
, cpu_R
[dc
->ra
]);
770 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
771 tcg_gen_or_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->rd
], t0
);
779 LOG_DIS("srl r%d r%d\n", dc
->rd
, dc
->ra
);
781 /* Update carry. Note that write carry only looks at the LSB. */
782 write_carry(dc
, cpu_R
[dc
->ra
]);
785 tcg_gen_shri_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
787 tcg_gen_sari_tl(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 1);
791 LOG_DIS("ext8s r%d r%d\n", dc
->rd
, dc
->ra
);
792 tcg_gen_ext8s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
795 LOG_DIS("ext16s r%d r%d\n", dc
->rd
, dc
->ra
);
796 tcg_gen_ext16s_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
803 LOG_DIS("wdc r%d\n", dc
->ra
);
804 if ((dc
->tb_flags
& MSR_EE_FLAG
)
805 && mem_index
== MMU_USER_IDX
) {
806 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
807 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
813 LOG_DIS("wic r%d\n", dc
->ra
);
814 if ((dc
->tb_flags
& MSR_EE_FLAG
)
815 && mem_index
== MMU_USER_IDX
) {
816 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
817 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
822 if ((dc
->tb_flags
& MSR_EE_FLAG
)
823 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
824 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
))) {
825 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
826 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
828 if (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_PCMP_INSTR
) {
829 gen_helper_clz(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
834 LOG_DIS("swapb r%d r%d\n", dc
->rd
, dc
->ra
);
835 tcg_gen_bswap32_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
]);
839 LOG_DIS("swaph r%d r%d\n", dc
->rd
, dc
->ra
);
840 tcg_gen_rotri_i32(cpu_R
[dc
->rd
], cpu_R
[dc
->ra
], 16);
843 cpu_abort(cs
, "unknown bit oc=%x op=%x rd=%d ra=%d rb=%d\n",
844 dc
->pc
, op
, dc
->rd
, dc
->ra
, dc
->rb
);
849 static inline void sync_jmpstate(DisasContext
*dc
)
851 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
852 if (dc
->jmp
== JMP_DIRECT
) {
853 tcg_gen_movi_tl(env_btaken
, 1);
855 dc
->jmp
= JMP_INDIRECT
;
856 tcg_gen_movi_tl(env_btarget
, dc
->jmp_pc
);
860 static void dec_imm(DisasContext
*dc
)
862 LOG_DIS("imm %x\n", dc
->imm
<< 16);
863 tcg_gen_movi_tl(env_imm
, (dc
->imm
<< 16));
864 dc
->tb_flags
|= IMM_FLAG
;
868 static inline TCGv
*compute_ldst_addr(DisasContext
*dc
, TCGv
*t
)
870 unsigned int extimm
= dc
->tb_flags
& IMM_FLAG
;
871 /* Should be set to one if r1 is used by loadstores. */
874 /* All load/stores use ra. */
879 /* Treat the common cases first. */
881 /* If any of the regs is r0, return a ptr to the other. */
883 return &cpu_R
[dc
->rb
];
884 } else if (dc
->rb
== 0) {
885 return &cpu_R
[dc
->ra
];
893 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
896 gen_helper_stackprot(cpu_env
, *t
);
903 return &cpu_R
[dc
->ra
];
906 tcg_gen_movi_tl(*t
, (int32_t)((int16_t)dc
->imm
));
907 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *t
);
910 tcg_gen_add_tl(*t
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
914 gen_helper_stackprot(cpu_env
, *t
);
919 static void dec_load(DisasContext
*dc
)
922 unsigned int size
, rev
= 0, ex
= 0;
925 mop
= dc
->opcode
& 3;
928 rev
= (dc
->ir
>> 9) & 1;
929 ex
= (dc
->ir
>> 10) & 1;
936 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
937 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
938 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
939 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
943 LOG_DIS("l%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
947 addr
= compute_ldst_addr(dc
, &t
);
950 * When doing reverse accesses we need to do two things.
952 * 1. Reverse the address wrt endianness.
953 * 2. Byteswap the data lanes on the way back into the CPU core.
955 if (rev
&& size
!= 4) {
956 /* Endian reverse the address. t is addr. */
964 TCGv low
= tcg_temp_new();
966 /* Force addr into the temp. */
969 tcg_gen_mov_tl(t
, *addr
);
973 tcg_gen_andi_tl(low
, t
, 3);
974 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
975 tcg_gen_andi_tl(t
, t
, ~3);
976 tcg_gen_or_tl(t
, t
, low
);
977 tcg_gen_mov_tl(env_imm
, t
);
985 /* Force addr into the temp. */
988 tcg_gen_xori_tl(t
, *addr
, 2);
991 tcg_gen_xori_tl(t
, t
, 2);
995 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1000 /* lwx does not throw unaligned access errors, so force alignment */
1002 /* Force addr into the temp. */
1005 tcg_gen_mov_tl(t
, *addr
);
1008 tcg_gen_andi_tl(t
, t
, ~3);
1011 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1014 /* Verify alignment if needed. */
1016 * Microblaze gives MMU faults priority over faults due to
1017 * unaligned addresses. That's why we speculatively do the load
1018 * into v. If the load succeeds, we verify alignment of the
1019 * address and if that succeeds we write into the destination reg.
1022 tcg_gen_qemu_ld_tl(v
, *addr
, cpu_mmu_index(&dc
->cpu
->env
), mop
);
1024 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1025 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1026 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1027 tcg_const_tl(0), tcg_const_tl(size
- 1));
1031 tcg_gen_mov_tl(env_res_addr
, *addr
);
1032 tcg_gen_mov_tl(env_res_val
, v
);
1035 tcg_gen_mov_tl(cpu_R
[dc
->rd
], v
);
1040 /* no support for for AXI exclusive so always clear C */
1041 write_carryi(dc
, 0);
1048 static void dec_store(DisasContext
*dc
)
1050 TCGv t
, *addr
, swx_addr
;
1052 unsigned int size
, rev
= 0, ex
= 0;
1055 mop
= dc
->opcode
& 3;
1058 rev
= (dc
->ir
>> 9) & 1;
1059 ex
= (dc
->ir
>> 10) & 1;
1066 if (size
> 4 && (dc
->tb_flags
& MSR_EE_FLAG
)
1067 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1068 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1069 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1073 LOG_DIS("s%d%s%s%s\n", size
, dc
->type_b
? "i" : "", rev
? "r" : "",
1076 /* If we get a fault on a dslot, the jmpstate better be in sync. */
1078 addr
= compute_ldst_addr(dc
, &t
);
1080 swx_addr
= tcg_temp_local_new();
1084 /* Force addr into the swx_addr. */
1085 tcg_gen_mov_tl(swx_addr
, *addr
);
1087 /* swx does not throw unaligned access errors, so force alignment */
1088 tcg_gen_andi_tl(swx_addr
, swx_addr
, ~3);
1090 write_carryi(dc
, 1);
1091 swx_skip
= gen_new_label();
1092 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_addr
, swx_addr
, swx_skip
);
1094 /* Compare the value loaded at lwx with current contents of
1095 the reserved location.
1096 FIXME: This only works for system emulation where we can expect
1097 this compare and the following write to be atomic. For user
1098 emulation we need to add atomicity between threads. */
1099 tval
= tcg_temp_new();
1100 tcg_gen_qemu_ld_tl(tval
, swx_addr
, cpu_mmu_index(&dc
->cpu
->env
),
1102 tcg_gen_brcond_tl(TCG_COND_NE
, env_res_val
, tval
, swx_skip
);
1103 write_carryi(dc
, 0);
1104 tcg_temp_free(tval
);
1107 if (rev
&& size
!= 4) {
1108 /* Endian reverse the address. t is addr. */
1116 TCGv low
= tcg_temp_new();
1118 /* Force addr into the temp. */
1121 tcg_gen_mov_tl(t
, *addr
);
1125 tcg_gen_andi_tl(low
, t
, 3);
1126 tcg_gen_sub_tl(low
, tcg_const_tl(3), low
);
1127 tcg_gen_andi_tl(t
, t
, ~3);
1128 tcg_gen_or_tl(t
, t
, low
);
1129 tcg_gen_mov_tl(env_imm
, t
);
1137 /* Force addr into the temp. */
1140 tcg_gen_xori_tl(t
, *addr
, 2);
1143 tcg_gen_xori_tl(t
, t
, 2);
1147 cpu_abort(CPU(dc
->cpu
), "Invalid reverse size\n");
1151 tcg_gen_qemu_st_tl(cpu_R
[dc
->rd
], *addr
, cpu_mmu_index(&dc
->cpu
->env
), mop
);
1153 /* Verify alignment if needed. */
1154 if ((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_UNALIGNED_EXC_MASK
) && size
> 1) {
1155 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1156 /* FIXME: if the alignment is wrong, we should restore the value
1157 * in memory. One possible way to achieve this is to probe
1158 * the MMU prior to the memaccess, thay way we could put
1159 * the alignment checks in between the probe and the mem
1162 gen_helper_memalign(cpu_env
, *addr
, tcg_const_tl(dc
->rd
),
1163 tcg_const_tl(1), tcg_const_tl(size
- 1));
1167 gen_set_label(swx_skip
);
1169 tcg_temp_free(swx_addr
);
1175 static inline void eval_cc(DisasContext
*dc
, unsigned int cc
,
1176 TCGv d
, TCGv a
, TCGv b
)
1180 tcg_gen_setcond_tl(TCG_COND_EQ
, d
, a
, b
);
1183 tcg_gen_setcond_tl(TCG_COND_NE
, d
, a
, b
);
1186 tcg_gen_setcond_tl(TCG_COND_LT
, d
, a
, b
);
1189 tcg_gen_setcond_tl(TCG_COND_LE
, d
, a
, b
);
1192 tcg_gen_setcond_tl(TCG_COND_GE
, d
, a
, b
);
1195 tcg_gen_setcond_tl(TCG_COND_GT
, d
, a
, b
);
1198 cpu_abort(CPU(dc
->cpu
), "Unknown condition code %x.\n", cc
);
1203 static void eval_cond_jmp(DisasContext
*dc
, TCGv pc_true
, TCGv pc_false
)
1207 l1
= gen_new_label();
1208 /* Conditional jmp. */
1209 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_false
);
1210 tcg_gen_brcondi_tl(TCG_COND_EQ
, env_btaken
, 0, l1
);
1211 tcg_gen_mov_tl(cpu_SR
[SR_PC
], pc_true
);
1215 static void dec_bcc(DisasContext
*dc
)
1220 cc
= EXTRACT_FIELD(dc
->ir
, 21, 23);
1221 dslot
= dc
->ir
& (1 << 25);
1222 LOG_DIS("bcc%s r%d %x\n", dslot
? "d" : "", dc
->ra
, dc
->imm
);
1224 dc
->delayed_branch
= 1;
1226 dc
->delayed_branch
= 2;
1227 dc
->tb_flags
|= D_FLAG
;
1228 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1229 cpu_env
, offsetof(CPUMBState
, bimm
));
1232 if (dec_alu_op_b_is_small_imm(dc
)) {
1233 int32_t offset
= (int32_t)((int16_t)dc
->imm
); /* sign-extend. */
1235 tcg_gen_movi_tl(env_btarget
, dc
->pc
+ offset
);
1236 dc
->jmp
= JMP_DIRECT_CC
;
1237 dc
->jmp_pc
= dc
->pc
+ offset
;
1239 dc
->jmp
= JMP_INDIRECT
;
1240 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1241 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1243 eval_cc(dc
, cc
, env_btaken
, cpu_R
[dc
->ra
], tcg_const_tl(0));
1246 static void dec_br(DisasContext
*dc
)
1248 unsigned int dslot
, link
, abs
, mbar
;
1249 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1251 dslot
= dc
->ir
& (1 << 20);
1252 abs
= dc
->ir
& (1 << 19);
1253 link
= dc
->ir
& (1 << 18);
1255 /* Memory barrier. */
1256 mbar
= (dc
->ir
>> 16) & 31;
1257 if (mbar
== 2 && dc
->imm
== 4) {
1258 /* mbar IMM & 16 decodes to sleep. */
1260 TCGv_i32 tmp_hlt
= tcg_const_i32(EXCP_HLT
);
1261 TCGv_i32 tmp_1
= tcg_const_i32(1);
1266 tcg_gen_st_i32(tmp_1
, cpu_env
,
1267 -offsetof(MicroBlazeCPU
, env
)
1268 +offsetof(CPUState
, halted
));
1269 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
+ 4);
1270 gen_helper_raise_exception(cpu_env
, tmp_hlt
);
1271 tcg_temp_free_i32(tmp_hlt
);
1272 tcg_temp_free_i32(tmp_1
);
1275 LOG_DIS("mbar %d\n", dc
->rd
);
1277 dc
->cpustate_changed
= 1;
1281 LOG_DIS("br%s%s%s%s imm=%x\n",
1282 abs
? "a" : "", link
? "l" : "",
1283 dc
->type_b
? "i" : "", dslot
? "d" : "",
1286 dc
->delayed_branch
= 1;
1288 dc
->delayed_branch
= 2;
1289 dc
->tb_flags
|= D_FLAG
;
1290 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1291 cpu_env
, offsetof(CPUMBState
, bimm
));
1294 tcg_gen_movi_tl(cpu_R
[dc
->rd
], dc
->pc
);
1296 dc
->jmp
= JMP_INDIRECT
;
1298 tcg_gen_movi_tl(env_btaken
, 1);
1299 tcg_gen_mov_tl(env_btarget
, *(dec_alu_op_b(dc
)));
1300 if (link
&& !dslot
) {
1301 if (!(dc
->tb_flags
& IMM_FLAG
) && (dc
->imm
== 8 || dc
->imm
== 0x18))
1302 t_gen_raise_exception(dc
, EXCP_BREAK
);
1304 if ((dc
->tb_flags
& MSR_EE_FLAG
) && mem_index
== MMU_USER_IDX
) {
1305 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1306 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1310 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1314 if (dec_alu_op_b_is_small_imm(dc
)) {
1315 dc
->jmp
= JMP_DIRECT
;
1316 dc
->jmp_pc
= dc
->pc
+ (int32_t)((int16_t)dc
->imm
);
1318 tcg_gen_movi_tl(env_btaken
, 1);
1319 tcg_gen_movi_tl(env_btarget
, dc
->pc
);
1320 tcg_gen_add_tl(env_btarget
, env_btarget
, *(dec_alu_op_b(dc
)));
1325 static inline void do_rti(DisasContext
*dc
)
1328 t0
= tcg_temp_new();
1329 t1
= tcg_temp_new();
1330 tcg_gen_shri_tl(t0
, cpu_SR
[SR_MSR
], 1);
1331 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_IE
);
1332 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1334 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1335 tcg_gen_or_tl(t1
, t1
, t0
);
1339 dc
->tb_flags
&= ~DRTI_FLAG
;
1342 static inline void do_rtb(DisasContext
*dc
)
1345 t0
= tcg_temp_new();
1346 t1
= tcg_temp_new();
1347 tcg_gen_andi_tl(t1
, cpu_SR
[SR_MSR
], ~MSR_BIP
);
1348 tcg_gen_shri_tl(t0
, t1
, 1);
1349 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1351 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1352 tcg_gen_or_tl(t1
, t1
, t0
);
1356 dc
->tb_flags
&= ~DRTB_FLAG
;
1359 static inline void do_rte(DisasContext
*dc
)
1362 t0
= tcg_temp_new();
1363 t1
= tcg_temp_new();
1365 tcg_gen_ori_tl(t1
, cpu_SR
[SR_MSR
], MSR_EE
);
1366 tcg_gen_andi_tl(t1
, t1
, ~MSR_EIP
);
1367 tcg_gen_shri_tl(t0
, t1
, 1);
1368 tcg_gen_andi_tl(t0
, t0
, (MSR_VM
| MSR_UM
));
1370 tcg_gen_andi_tl(t1
, t1
, ~(MSR_VM
| MSR_UM
));
1371 tcg_gen_or_tl(t1
, t1
, t0
);
1375 dc
->tb_flags
&= ~DRTE_FLAG
;
1378 static void dec_rts(DisasContext
*dc
)
1380 unsigned int b_bit
, i_bit
, e_bit
;
1381 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1383 i_bit
= dc
->ir
& (1 << 21);
1384 b_bit
= dc
->ir
& (1 << 22);
1385 e_bit
= dc
->ir
& (1 << 23);
1387 dc
->delayed_branch
= 2;
1388 dc
->tb_flags
|= D_FLAG
;
1389 tcg_gen_st_tl(tcg_const_tl(dc
->type_b
&& (dc
->tb_flags
& IMM_FLAG
)),
1390 cpu_env
, offsetof(CPUMBState
, bimm
));
1393 LOG_DIS("rtid ir=%x\n", dc
->ir
);
1394 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1395 && mem_index
== MMU_USER_IDX
) {
1396 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1397 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1399 dc
->tb_flags
|= DRTI_FLAG
;
1401 LOG_DIS("rtbd ir=%x\n", dc
->ir
);
1402 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1403 && mem_index
== MMU_USER_IDX
) {
1404 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1405 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1407 dc
->tb_flags
|= DRTB_FLAG
;
1409 LOG_DIS("rted ir=%x\n", dc
->ir
);
1410 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1411 && mem_index
== MMU_USER_IDX
) {
1412 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1413 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1415 dc
->tb_flags
|= DRTE_FLAG
;
1417 LOG_DIS("rts ir=%x\n", dc
->ir
);
1419 dc
->jmp
= JMP_INDIRECT
;
1420 tcg_gen_movi_tl(env_btaken
, 1);
1421 tcg_gen_add_tl(env_btarget
, cpu_R
[dc
->ra
], *(dec_alu_op_b(dc
)));
1424 static int dec_check_fpuv2(DisasContext
*dc
)
1428 r
= dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_FPU2_MASK
;
1430 if (!r
&& (dc
->tb_flags
& MSR_EE_FLAG
)) {
1431 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_FPU
);
1432 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1437 static void dec_fpu(DisasContext
*dc
)
1439 unsigned int fpu_insn
;
1441 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1442 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1443 && !((dc
->cpu
->env
.pvr
.regs
[2] & PVR2_USE_FPU_MASK
))) {
1444 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1445 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1449 fpu_insn
= (dc
->ir
>> 7) & 7;
1453 gen_helper_fadd(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1458 gen_helper_frsub(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1463 gen_helper_fmul(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1468 gen_helper_fdiv(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
],
1473 switch ((dc
->ir
>> 4) & 7) {
1475 gen_helper_fcmp_un(cpu_R
[dc
->rd
], cpu_env
,
1476 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1479 gen_helper_fcmp_lt(cpu_R
[dc
->rd
], cpu_env
,
1480 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1483 gen_helper_fcmp_eq(cpu_R
[dc
->rd
], cpu_env
,
1484 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1487 gen_helper_fcmp_le(cpu_R
[dc
->rd
], cpu_env
,
1488 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1491 gen_helper_fcmp_gt(cpu_R
[dc
->rd
], cpu_env
,
1492 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1495 gen_helper_fcmp_ne(cpu_R
[dc
->rd
], cpu_env
,
1496 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1499 gen_helper_fcmp_ge(cpu_R
[dc
->rd
], cpu_env
,
1500 cpu_R
[dc
->ra
], cpu_R
[dc
->rb
]);
1503 qemu_log_mask(LOG_UNIMP
,
1504 "unimplemented fcmp fpu_insn=%x pc=%x"
1506 fpu_insn
, dc
->pc
, dc
->opcode
);
1507 dc
->abort_at_next_insn
= 1;
1513 if (!dec_check_fpuv2(dc
)) {
1516 gen_helper_flt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1520 if (!dec_check_fpuv2(dc
)) {
1523 gen_helper_fint(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1527 if (!dec_check_fpuv2(dc
)) {
1530 gen_helper_fsqrt(cpu_R
[dc
->rd
], cpu_env
, cpu_R
[dc
->ra
]);
1534 qemu_log_mask(LOG_UNIMP
, "unimplemented FPU insn fpu_insn=%x pc=%x"
1536 fpu_insn
, dc
->pc
, dc
->opcode
);
1537 dc
->abort_at_next_insn
= 1;
1542 static void dec_null(DisasContext
*dc
)
1544 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1545 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)) {
1546 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1547 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1550 qemu_log ("unknown insn pc=%x opc=%x\n", dc
->pc
, dc
->opcode
);
1551 dc
->abort_at_next_insn
= 1;
1554 /* Insns connected to FSL or AXI stream attached devices. */
1555 static void dec_stream(DisasContext
*dc
)
1557 int mem_index
= cpu_mmu_index(&dc
->cpu
->env
);
1558 TCGv_i32 t_id
, t_ctrl
;
1561 LOG_DIS("%s%s imm=%x\n", dc
->rd
? "get" : "put",
1562 dc
->type_b
? "" : "d", dc
->imm
);
1564 if ((dc
->tb_flags
& MSR_EE_FLAG
) && (mem_index
== MMU_USER_IDX
)) {
1565 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_PRIVINSN
);
1566 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1570 t_id
= tcg_temp_new();
1572 tcg_gen_movi_tl(t_id
, dc
->imm
& 0xf);
1573 ctrl
= dc
->imm
>> 10;
1575 tcg_gen_andi_tl(t_id
, cpu_R
[dc
->rb
], 0xf);
1576 ctrl
= dc
->imm
>> 5;
1579 t_ctrl
= tcg_const_tl(ctrl
);
1582 gen_helper_put(t_id
, t_ctrl
, cpu_R
[dc
->ra
]);
1584 gen_helper_get(cpu_R
[dc
->rd
], t_id
, t_ctrl
);
1586 tcg_temp_free(t_id
);
1587 tcg_temp_free(t_ctrl
);
1590 static struct decoder_info
{
1595 void (*dec
)(DisasContext
*dc
);
1603 {DEC_BARREL
, dec_barrel
},
1605 {DEC_ST
, dec_store
},
1614 {DEC_STREAM
, dec_stream
},
1618 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1622 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1623 tcg_gen_debug_insn_start(dc
->pc
);
1627 LOG_DIS("%8.8x\t", dc
->ir
);
1632 if ((dc
->tb_flags
& MSR_EE_FLAG
)
1633 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_ILL_OPCODE_EXC_MASK
)
1634 && (dc
->cpu
->env
.pvr
.regs
[2] & PVR2_OPCODE_0x0_ILL_MASK
)) {
1635 tcg_gen_movi_tl(cpu_SR
[SR_ESR
], ESR_EC_ILLEGAL_OP
);
1636 t_gen_raise_exception(dc
, EXCP_HW_EXCP
);
1640 LOG_DIS("nr_nops=%d\t", dc
->nr_nops
);
1642 if (dc
->nr_nops
> 4) {
1643 cpu_abort(CPU(dc
->cpu
), "fetching nop sequence\n");
1646 /* bit 2 seems to indicate insn type. */
1647 dc
->type_b
= ir
& (1 << 29);
1649 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1650 dc
->rd
= EXTRACT_FIELD(ir
, 21, 25);
1651 dc
->ra
= EXTRACT_FIELD(ir
, 16, 20);
1652 dc
->rb
= EXTRACT_FIELD(ir
, 11, 15);
1653 dc
->imm
= EXTRACT_FIELD(ir
, 0, 15);
1655 /* Large switch for all insns. */
1656 for (i
= 0; i
< ARRAY_SIZE(decinfo
); i
++) {
1657 if ((dc
->opcode
& decinfo
[i
].mask
) == decinfo
[i
].bits
) {
1664 static void check_breakpoint(CPUMBState
*env
, DisasContext
*dc
)
1666 CPUState
*cs
= CPU(mb_env_get_cpu(env
));
1669 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1670 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1671 if (bp
->pc
== dc
->pc
) {
1672 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1673 dc
->is_jmp
= DISAS_UPDATE
;
1679 /* generate intermediate code for basic block 'tb'. */
1681 gen_intermediate_code_internal(MicroBlazeCPU
*cpu
, TranslationBlock
*tb
,
1684 CPUState
*cs
= CPU(cpu
);
1685 CPUMBState
*env
= &cpu
->env
;
1686 uint16_t *gen_opc_end
;
1689 struct DisasContext ctx
;
1690 struct DisasContext
*dc
= &ctx
;
1691 uint32_t next_page_start
, org_flags
;
1699 org_flags
= dc
->synced_flags
= dc
->tb_flags
= tb
->flags
;
1701 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1703 dc
->is_jmp
= DISAS_NEXT
;
1705 dc
->delayed_branch
= !!(dc
->tb_flags
& D_FLAG
);
1706 if (dc
->delayed_branch
) {
1707 dc
->jmp
= JMP_INDIRECT
;
1710 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1711 dc
->cpustate_changed
= 0;
1712 dc
->abort_at_next_insn
= 0;
1716 cpu_abort(cs
, "Microblaze: unaligned PC=%x\n", pc_start
);
1719 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1721 qemu_log("--------------\n");
1722 log_cpu_state(CPU(cpu
), 0);
1726 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1729 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1731 max_insns
= CF_COUNT_MASK
;
1737 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1738 tcg_gen_movi_tl(cpu_SR
[SR_PC
], dc
->pc
);
1742 check_breakpoint(env
, dc
);
1745 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1749 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1751 tcg_ctx
.gen_opc_pc
[lj
] = dc
->pc
;
1752 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
1753 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
1757 LOG_DIS("%8.8x:\t", dc
->pc
);
1759 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1763 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1765 dc
->tb_flags
&= ~IMM_FLAG
;
1769 if (dc
->delayed_branch
) {
1770 dc
->delayed_branch
--;
1771 if (!dc
->delayed_branch
) {
1772 if (dc
->tb_flags
& DRTI_FLAG
)
1774 if (dc
->tb_flags
& DRTB_FLAG
)
1776 if (dc
->tb_flags
& DRTE_FLAG
)
1778 /* Clear the delay slot flag. */
1779 dc
->tb_flags
&= ~D_FLAG
;
1780 /* If it is a direct jump, try direct chaining. */
1781 if (dc
->jmp
== JMP_INDIRECT
) {
1782 eval_cond_jmp(dc
, env_btarget
, tcg_const_tl(dc
->pc
));
1783 dc
->is_jmp
= DISAS_JUMP
;
1784 } else if (dc
->jmp
== JMP_DIRECT
) {
1786 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1787 dc
->is_jmp
= DISAS_TB_JUMP
;
1788 } else if (dc
->jmp
== JMP_DIRECT_CC
) {
1792 l1
= gen_new_label();
1793 /* Conditional jmp. */
1794 tcg_gen_brcondi_tl(TCG_COND_NE
, env_btaken
, 0, l1
);
1795 gen_goto_tb(dc
, 1, dc
->pc
);
1797 gen_goto_tb(dc
, 0, dc
->jmp_pc
);
1799 dc
->is_jmp
= DISAS_TB_JUMP
;
1804 if (cs
->singlestep_enabled
) {
1807 } while (!dc
->is_jmp
&& !dc
->cpustate_changed
1808 && tcg_ctx
.gen_opc_ptr
< gen_opc_end
1810 && (dc
->pc
< next_page_start
)
1811 && num_insns
< max_insns
);
1814 if (dc
->jmp
== JMP_DIRECT
|| dc
->jmp
== JMP_DIRECT_CC
) {
1815 if (dc
->tb_flags
& D_FLAG
) {
1816 dc
->is_jmp
= DISAS_UPDATE
;
1817 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1823 if (tb
->cflags
& CF_LAST_IO
)
1825 /* Force an update if the per-tb cpu state has changed. */
1826 if (dc
->is_jmp
== DISAS_NEXT
1827 && (dc
->cpustate_changed
|| org_flags
!= dc
->tb_flags
)) {
1828 dc
->is_jmp
= DISAS_UPDATE
;
1829 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1833 if (unlikely(cs
->singlestep_enabled
)) {
1834 TCGv_i32 tmp
= tcg_const_i32(EXCP_DEBUG
);
1836 if (dc
->is_jmp
!= DISAS_JUMP
) {
1837 tcg_gen_movi_tl(cpu_SR
[SR_PC
], npc
);
1839 gen_helper_raise_exception(cpu_env
, tmp
);
1840 tcg_temp_free_i32(tmp
);
1842 switch(dc
->is_jmp
) {
1844 gen_goto_tb(dc
, 1, npc
);
1849 /* indicate that the hash table must be used
1850 to find the next TB */
1854 /* nothing more to generate */
1858 gen_tb_end(tb
, num_insns
);
1859 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1861 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1864 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
1866 tb
->size
= dc
->pc
- pc_start
;
1867 tb
->icount
= num_insns
;
1872 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1875 log_target_disas(env
, pc_start
, dc
->pc
- pc_start
, 0);
1877 qemu_log("\nisize=%d osize=%td\n",
1878 dc
->pc
- pc_start
, tcg_ctx
.gen_opc_ptr
-
1879 tcg_ctx
.gen_opc_buf
);
1883 assert(!dc
->abort_at_next_insn
);
1886 void gen_intermediate_code (CPUMBState
*env
, struct TranslationBlock
*tb
)
1888 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, false);
1891 void gen_intermediate_code_pc (CPUMBState
*env
, struct TranslationBlock
*tb
)
1893 gen_intermediate_code_internal(mb_env_get_cpu(env
), tb
, true);
1896 void mb_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1899 MicroBlazeCPU
*cpu
= MICROBLAZE_CPU(cs
);
1900 CPUMBState
*env
= &cpu
->env
;
1906 cpu_fprintf(f
, "IN: PC=%x %s\n",
1907 env
->sregs
[SR_PC
], lookup_symbol(env
->sregs
[SR_PC
]));
1908 cpu_fprintf(f
, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
1909 env
->sregs
[SR_MSR
], env
->sregs
[SR_ESR
], env
->sregs
[SR_EAR
],
1910 env
->debug
, env
->imm
, env
->iflags
, env
->sregs
[SR_FSR
]);
1911 cpu_fprintf(f
, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
1912 env
->btaken
, env
->btarget
,
1913 (env
->sregs
[SR_MSR
] & MSR_UM
) ? "user" : "kernel",
1914 (env
->sregs
[SR_MSR
] & MSR_UMS
) ? "user" : "kernel",
1915 (env
->sregs
[SR_MSR
] & MSR_EIP
),
1916 (env
->sregs
[SR_MSR
] & MSR_IE
));
1918 for (i
= 0; i
< 32; i
++) {
1919 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1920 if ((i
+ 1) % 4 == 0)
1921 cpu_fprintf(f
, "\n");
1923 cpu_fprintf(f
, "\n\n");
1926 MicroBlazeCPU
*cpu_mb_init(const char *cpu_model
)
1930 cpu
= MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU
));
1932 object_property_set_bool(OBJECT(cpu
), true, "realized", NULL
);
1937 void mb_tcg_init(void)
1941 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1943 env_debug
= tcg_global_mem_new(TCG_AREG0
,
1944 offsetof(CPUMBState
, debug
),
1946 env_iflags
= tcg_global_mem_new(TCG_AREG0
,
1947 offsetof(CPUMBState
, iflags
),
1949 env_imm
= tcg_global_mem_new(TCG_AREG0
,
1950 offsetof(CPUMBState
, imm
),
1952 env_btarget
= tcg_global_mem_new(TCG_AREG0
,
1953 offsetof(CPUMBState
, btarget
),
1955 env_btaken
= tcg_global_mem_new(TCG_AREG0
,
1956 offsetof(CPUMBState
, btaken
),
1958 env_res_addr
= tcg_global_mem_new(TCG_AREG0
,
1959 offsetof(CPUMBState
, res_addr
),
1961 env_res_val
= tcg_global_mem_new(TCG_AREG0
,
1962 offsetof(CPUMBState
, res_val
),
1964 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1965 cpu_R
[i
] = tcg_global_mem_new(TCG_AREG0
,
1966 offsetof(CPUMBState
, regs
[i
]),
1969 for (i
= 0; i
< ARRAY_SIZE(cpu_SR
); i
++) {
1970 cpu_SR
[i
] = tcg_global_mem_new(TCG_AREG0
,
1971 offsetof(CPUMBState
, sregs
[i
]),
1972 special_regnames
[i
]);
1976 void restore_state_to_opc(CPUMBState
*env
, TranslationBlock
*tb
, int pc_pos
)
1978 env
->sregs
[SR_PC
] = tcg_ctx
.gen_opc_pc
[pc_pos
];