2 * LatticeMico32 main translation routines.
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/translator.h"
28 #include "exec/cpu_ldst.h"
29 #include "hw/lm32/lm32_pic.h"
31 #include "exec/helper-gen.h"
33 #include "trace-tcg.h"
39 #define LOG_DIS(...) \
42 qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
46 #define EXTRACT_FIELD(src, start, end) \
47 (((src) >> start) & ((1 << (end - start + 1)) - 1))
51 /* is_jmp field values */
52 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
53 #define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
54 #define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
56 static TCGv_env cpu_env
;
57 static TCGv cpu_R
[32];
67 static TCGv cpu_bp
[4];
68 static TCGv cpu_wp
[4];
70 #include "exec/gen-icount.h"
79 /* This is the state at translation time. */
80 typedef struct DisasContext
{
87 uint8_t r0
, r1
, r2
, csr
;
92 unsigned int delayed_branch
;
93 unsigned int tb_flags
, synced_flags
; /* tb dependent flags. */
96 struct TranslationBlock
*tb
;
97 int singlestep_enabled
;
100 uint8_t num_breakpoints
;
101 uint8_t num_watchpoints
;
104 static const char *regnames
[] = {
105 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
106 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
107 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
108 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
109 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
113 static inline int zero_extend(unsigned int val
, int width
)
115 return val
& ((1 << width
) - 1);
118 static inline int sign_extend(unsigned int val
, int width
)
131 static inline void t_gen_raise_exception(DisasContext
*dc
, uint32_t index
)
133 TCGv_i32 tmp
= tcg_const_i32(index
);
135 gen_helper_raise_exception(cpu_env
, tmp
);
136 tcg_temp_free_i32(tmp
);
139 static inline void t_gen_illegal_insn(DisasContext
*dc
)
141 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
142 gen_helper_ill(cpu_env
);
145 static inline bool use_goto_tb(DisasContext
*dc
, target_ulong dest
)
147 if (unlikely(dc
->singlestep_enabled
)) {
151 #ifndef CONFIG_USER_ONLY
152 return (dc
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
158 static void gen_goto_tb(DisasContext
*dc
, int n
, target_ulong dest
)
160 if (use_goto_tb(dc
, dest
)) {
162 tcg_gen_movi_tl(cpu_pc
, dest
);
163 tcg_gen_exit_tb((uintptr_t)dc
->tb
+ n
);
165 tcg_gen_movi_tl(cpu_pc
, dest
);
166 if (dc
->singlestep_enabled
) {
167 t_gen_raise_exception(dc
, EXCP_DEBUG
);
173 static void dec_add(DisasContext
*dc
)
175 if (dc
->format
== OP_FMT_RI
) {
176 if (dc
->r0
== R_R0
) {
177 if (dc
->r1
== R_R0
&& dc
->imm16
== 0) {
180 LOG_DIS("mvi r%d, %d\n", dc
->r1
, sign_extend(dc
->imm16
, 16));
183 LOG_DIS("addi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
184 sign_extend(dc
->imm16
, 16));
187 LOG_DIS("add r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
190 if (dc
->format
== OP_FMT_RI
) {
191 tcg_gen_addi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
192 sign_extend(dc
->imm16
, 16));
194 tcg_gen_add_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
198 static void dec_and(DisasContext
*dc
)
200 if (dc
->format
== OP_FMT_RI
) {
201 LOG_DIS("andi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
202 zero_extend(dc
->imm16
, 16));
204 LOG_DIS("and r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
207 if (dc
->format
== OP_FMT_RI
) {
208 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
209 zero_extend(dc
->imm16
, 16));
211 if (dc
->r0
== 0 && dc
->r1
== 0 && dc
->r2
== 0) {
212 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
213 gen_helper_hlt(cpu_env
);
215 tcg_gen_and_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
220 static void dec_andhi(DisasContext
*dc
)
222 LOG_DIS("andhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
224 tcg_gen_andi_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
227 static void dec_b(DisasContext
*dc
)
229 if (dc
->r0
== R_RA
) {
231 } else if (dc
->r0
== R_EA
) {
233 } else if (dc
->r0
== R_BA
) {
236 LOG_DIS("b r%d\n", dc
->r0
);
239 /* restore IE.IE in case of an eret */
240 if (dc
->r0
== R_EA
) {
241 TCGv t0
= tcg_temp_new();
242 TCGLabel
*l1
= gen_new_label();
243 tcg_gen_andi_tl(t0
, cpu_ie
, IE_EIE
);
244 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
245 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_EIE
, l1
);
246 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
249 } else if (dc
->r0
== R_BA
) {
250 TCGv t0
= tcg_temp_new();
251 TCGLabel
*l1
= gen_new_label();
252 tcg_gen_andi_tl(t0
, cpu_ie
, IE_BIE
);
253 tcg_gen_ori_tl(cpu_ie
, cpu_ie
, IE_IE
);
254 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, IE_BIE
, l1
);
255 tcg_gen_andi_tl(cpu_ie
, cpu_ie
, ~IE_IE
);
259 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
261 dc
->is_jmp
= DISAS_JUMP
;
264 static void dec_bi(DisasContext
*dc
)
266 LOG_DIS("bi %d\n", sign_extend(dc
->imm26
<< 2, 26));
268 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
270 dc
->is_jmp
= DISAS_TB_JUMP
;
273 static inline void gen_cond_branch(DisasContext
*dc
, int cond
)
275 TCGLabel
*l1
= gen_new_label();
276 tcg_gen_brcond_tl(cond
, cpu_R
[dc
->r0
], cpu_R
[dc
->r1
], l1
);
277 gen_goto_tb(dc
, 0, dc
->pc
+ 4);
279 gen_goto_tb(dc
, 1, dc
->pc
+ (sign_extend(dc
->imm16
<< 2, 16)));
280 dc
->is_jmp
= DISAS_TB_JUMP
;
283 static void dec_be(DisasContext
*dc
)
285 LOG_DIS("be r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
286 sign_extend(dc
->imm16
, 16) * 4);
288 gen_cond_branch(dc
, TCG_COND_EQ
);
291 static void dec_bg(DisasContext
*dc
)
293 LOG_DIS("bg r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
294 sign_extend(dc
->imm16
, 16 * 4));
296 gen_cond_branch(dc
, TCG_COND_GT
);
299 static void dec_bge(DisasContext
*dc
)
301 LOG_DIS("bge r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
302 sign_extend(dc
->imm16
, 16) * 4);
304 gen_cond_branch(dc
, TCG_COND_GE
);
307 static void dec_bgeu(DisasContext
*dc
)
309 LOG_DIS("bgeu r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
310 sign_extend(dc
->imm16
, 16) * 4);
312 gen_cond_branch(dc
, TCG_COND_GEU
);
315 static void dec_bgu(DisasContext
*dc
)
317 LOG_DIS("bgu r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
318 sign_extend(dc
->imm16
, 16) * 4);
320 gen_cond_branch(dc
, TCG_COND_GTU
);
323 static void dec_bne(DisasContext
*dc
)
325 LOG_DIS("bne r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
326 sign_extend(dc
->imm16
, 16) * 4);
328 gen_cond_branch(dc
, TCG_COND_NE
);
331 static void dec_call(DisasContext
*dc
)
333 LOG_DIS("call r%d\n", dc
->r0
);
335 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
336 tcg_gen_mov_tl(cpu_pc
, cpu_R
[dc
->r0
]);
338 dc
->is_jmp
= DISAS_JUMP
;
341 static void dec_calli(DisasContext
*dc
)
343 LOG_DIS("calli %d\n", sign_extend(dc
->imm26
, 26) * 4);
345 tcg_gen_movi_tl(cpu_R
[R_RA
], dc
->pc
+ 4);
346 gen_goto_tb(dc
, 0, dc
->pc
+ (sign_extend(dc
->imm26
<< 2, 26)));
348 dc
->is_jmp
= DISAS_TB_JUMP
;
351 static inline void gen_compare(DisasContext
*dc
, int cond
)
355 if (dc
->format
== OP_FMT_RI
) {
359 i
= zero_extend(dc
->imm16
, 16);
362 i
= sign_extend(dc
->imm16
, 16);
366 tcg_gen_setcondi_tl(cond
, cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], i
);
368 tcg_gen_setcond_tl(cond
, cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
372 static void dec_cmpe(DisasContext
*dc
)
374 if (dc
->format
== OP_FMT_RI
) {
375 LOG_DIS("cmpei r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
376 sign_extend(dc
->imm16
, 16));
378 LOG_DIS("cmpe r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
381 gen_compare(dc
, TCG_COND_EQ
);
384 static void dec_cmpg(DisasContext
*dc
)
386 if (dc
->format
== OP_FMT_RI
) {
387 LOG_DIS("cmpgi r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
388 sign_extend(dc
->imm16
, 16));
390 LOG_DIS("cmpg r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
393 gen_compare(dc
, TCG_COND_GT
);
396 static void dec_cmpge(DisasContext
*dc
)
398 if (dc
->format
== OP_FMT_RI
) {
399 LOG_DIS("cmpgei r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
400 sign_extend(dc
->imm16
, 16));
402 LOG_DIS("cmpge r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
405 gen_compare(dc
, TCG_COND_GE
);
408 static void dec_cmpgeu(DisasContext
*dc
)
410 if (dc
->format
== OP_FMT_RI
) {
411 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
412 zero_extend(dc
->imm16
, 16));
414 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
417 gen_compare(dc
, TCG_COND_GEU
);
420 static void dec_cmpgu(DisasContext
*dc
)
422 if (dc
->format
== OP_FMT_RI
) {
423 LOG_DIS("cmpgui r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
424 zero_extend(dc
->imm16
, 16));
426 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
429 gen_compare(dc
, TCG_COND_GTU
);
432 static void dec_cmpne(DisasContext
*dc
)
434 if (dc
->format
== OP_FMT_RI
) {
435 LOG_DIS("cmpnei r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
436 sign_extend(dc
->imm16
, 16));
438 LOG_DIS("cmpne r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
441 gen_compare(dc
, TCG_COND_NE
);
444 static void dec_divu(DisasContext
*dc
)
448 LOG_DIS("divu r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
450 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
451 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
452 t_gen_illegal_insn(dc
);
456 l1
= gen_new_label();
457 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
458 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
459 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
461 tcg_gen_divu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
464 static void dec_lb(DisasContext
*dc
)
468 LOG_DIS("lb r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
471 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
472 tcg_gen_qemu_ld8s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
476 static void dec_lbu(DisasContext
*dc
)
480 LOG_DIS("lbu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
483 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
484 tcg_gen_qemu_ld8u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
488 static void dec_lh(DisasContext
*dc
)
492 LOG_DIS("lh r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
495 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
496 tcg_gen_qemu_ld16s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
500 static void dec_lhu(DisasContext
*dc
)
504 LOG_DIS("lhu r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, dc
->imm16
);
507 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
508 tcg_gen_qemu_ld16u(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
512 static void dec_lw(DisasContext
*dc
)
516 LOG_DIS("lw r%d, (r%d+%d)\n", dc
->r1
, dc
->r0
, sign_extend(dc
->imm16
, 16));
519 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
520 tcg_gen_qemu_ld32s(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
524 static void dec_modu(DisasContext
*dc
)
528 LOG_DIS("modu r%d, r%d, %d\n", dc
->r2
, dc
->r0
, dc
->r1
);
530 if (!(dc
->features
& LM32_FEATURE_DIVIDE
)) {
531 qemu_log_mask(LOG_GUEST_ERROR
, "hardware divider is not available\n");
532 t_gen_illegal_insn(dc
);
536 l1
= gen_new_label();
537 tcg_gen_brcondi_tl(TCG_COND_NE
, cpu_R
[dc
->r1
], 0, l1
);
538 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
539 t_gen_raise_exception(dc
, EXCP_DIVIDE_BY_ZERO
);
541 tcg_gen_remu_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
544 static void dec_mul(DisasContext
*dc
)
546 if (dc
->format
== OP_FMT_RI
) {
547 LOG_DIS("muli r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
548 sign_extend(dc
->imm16
, 16));
550 LOG_DIS("mul r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
553 if (!(dc
->features
& LM32_FEATURE_MULTIPLY
)) {
554 qemu_log_mask(LOG_GUEST_ERROR
,
555 "hardware multiplier is not available\n");
556 t_gen_illegal_insn(dc
);
560 if (dc
->format
== OP_FMT_RI
) {
561 tcg_gen_muli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
562 sign_extend(dc
->imm16
, 16));
564 tcg_gen_mul_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
568 static void dec_nor(DisasContext
*dc
)
570 if (dc
->format
== OP_FMT_RI
) {
571 LOG_DIS("nori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
572 zero_extend(dc
->imm16
, 16));
574 LOG_DIS("nor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
577 if (dc
->format
== OP_FMT_RI
) {
578 TCGv t0
= tcg_temp_new();
579 tcg_gen_movi_tl(t0
, zero_extend(dc
->imm16
, 16));
580 tcg_gen_nor_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], t0
);
583 tcg_gen_nor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
587 static void dec_or(DisasContext
*dc
)
589 if (dc
->format
== OP_FMT_RI
) {
590 LOG_DIS("ori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
591 zero_extend(dc
->imm16
, 16));
593 if (dc
->r1
== R_R0
) {
594 LOG_DIS("mv r%d, r%d\n", dc
->r2
, dc
->r0
);
596 LOG_DIS("or r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
600 if (dc
->format
== OP_FMT_RI
) {
601 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
602 zero_extend(dc
->imm16
, 16));
604 tcg_gen_or_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
608 static void dec_orhi(DisasContext
*dc
)
610 if (dc
->r0
== R_R0
) {
611 LOG_DIS("mvhi r%d, %d\n", dc
->r1
, dc
->imm16
);
613 LOG_DIS("orhi r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm16
);
616 tcg_gen_ori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], (dc
->imm16
<< 16));
619 static void dec_scall(DisasContext
*dc
)
624 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
625 t_gen_raise_exception(dc
, EXCP_BREAKPOINT
);
629 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
630 t_gen_raise_exception(dc
, EXCP_SYSTEMCALL
);
633 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode @0x%x", dc
->pc
);
634 t_gen_illegal_insn(dc
);
639 static void dec_rcsr(DisasContext
*dc
)
641 LOG_DIS("rcsr r%d, %d\n", dc
->r2
, dc
->csr
);
645 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_ie
);
648 gen_helper_rcsr_im(cpu_R
[dc
->r2
], cpu_env
);
651 gen_helper_rcsr_ip(cpu_R
[dc
->r2
], cpu_env
);
654 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cc
);
657 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_cfg
);
660 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_eba
);
663 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_dc
);
666 tcg_gen_mov_tl(cpu_R
[dc
->r2
], cpu_deba
);
669 gen_helper_rcsr_jtx(cpu_R
[dc
->r2
], cpu_env
);
672 gen_helper_rcsr_jrx(cpu_R
[dc
->r2
], cpu_env
);
684 qemu_log_mask(LOG_GUEST_ERROR
, "invalid read access csr=%x\n", dc
->csr
);
687 qemu_log_mask(LOG_GUEST_ERROR
, "read_csr: unknown csr=%x\n", dc
->csr
);
692 static void dec_sb(DisasContext
*dc
)
696 LOG_DIS("sb (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
699 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
700 tcg_gen_qemu_st8(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
704 static void dec_sextb(DisasContext
*dc
)
706 LOG_DIS("sextb r%d, r%d\n", dc
->r2
, dc
->r0
);
708 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
709 qemu_log_mask(LOG_GUEST_ERROR
,
710 "hardware sign extender is not available\n");
711 t_gen_illegal_insn(dc
);
715 tcg_gen_ext8s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
718 static void dec_sexth(DisasContext
*dc
)
720 LOG_DIS("sexth r%d, r%d\n", dc
->r2
, dc
->r0
);
722 if (!(dc
->features
& LM32_FEATURE_SIGN_EXTEND
)) {
723 qemu_log_mask(LOG_GUEST_ERROR
,
724 "hardware sign extender is not available\n");
725 t_gen_illegal_insn(dc
);
729 tcg_gen_ext16s_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
]);
732 static void dec_sh(DisasContext
*dc
)
736 LOG_DIS("sh (r%d+%d), r%d\n", dc
->r0
, dc
->imm16
, dc
->r1
);
739 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
740 tcg_gen_qemu_st16(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
744 static void dec_sl(DisasContext
*dc
)
746 if (dc
->format
== OP_FMT_RI
) {
747 LOG_DIS("sli r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
749 LOG_DIS("sl r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
752 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
753 qemu_log_mask(LOG_GUEST_ERROR
, "hardware shifter is not available\n");
754 t_gen_illegal_insn(dc
);
758 if (dc
->format
== OP_FMT_RI
) {
759 tcg_gen_shli_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
761 TCGv t0
= tcg_temp_new();
762 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
763 tcg_gen_shl_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
768 static void dec_sr(DisasContext
*dc
)
770 if (dc
->format
== OP_FMT_RI
) {
771 LOG_DIS("sri r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
773 LOG_DIS("sr r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
776 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
778 if (dc
->format
== OP_FMT_RI
) {
779 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
780 qemu_log_mask(LOG_GUEST_ERROR
,
781 "hardware shifter is not available\n");
782 t_gen_illegal_insn(dc
);
785 tcg_gen_sari_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
787 TCGLabel
*l1
= gen_new_label();
788 TCGLabel
*l2
= gen_new_label();
789 TCGv t0
= tcg_temp_local_new();
790 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
792 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
793 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
794 t_gen_illegal_insn(dc
);
799 tcg_gen_sar_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
806 static void dec_sru(DisasContext
*dc
)
808 if (dc
->format
== OP_FMT_RI
) {
809 LOG_DIS("srui r%d, r%d, %d\n", dc
->r1
, dc
->r0
, dc
->imm5
);
811 LOG_DIS("sru r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
814 if (dc
->format
== OP_FMT_RI
) {
815 if (!(dc
->features
& LM32_FEATURE_SHIFT
) && (dc
->imm5
!= 1)) {
816 qemu_log_mask(LOG_GUEST_ERROR
,
817 "hardware shifter is not available\n");
818 t_gen_illegal_insn(dc
);
821 tcg_gen_shri_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
], dc
->imm5
);
823 TCGLabel
*l1
= gen_new_label();
824 TCGLabel
*l2
= gen_new_label();
825 TCGv t0
= tcg_temp_local_new();
826 tcg_gen_andi_tl(t0
, cpu_R
[dc
->r1
], 0x1f);
828 if (!(dc
->features
& LM32_FEATURE_SHIFT
)) {
829 tcg_gen_brcondi_tl(TCG_COND_EQ
, t0
, 1, l1
);
830 t_gen_illegal_insn(dc
);
835 tcg_gen_shr_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], t0
);
842 static void dec_sub(DisasContext
*dc
)
844 LOG_DIS("sub r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
846 tcg_gen_sub_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
849 static void dec_sw(DisasContext
*dc
)
853 LOG_DIS("sw (r%d+%d), r%d\n", dc
->r0
, sign_extend(dc
->imm16
, 16), dc
->r1
);
856 tcg_gen_addi_tl(t0
, cpu_R
[dc
->r0
], sign_extend(dc
->imm16
, 16));
857 tcg_gen_qemu_st32(cpu_R
[dc
->r1
], t0
, MEM_INDEX
);
861 static void dec_user(DisasContext
*dc
)
865 qemu_log_mask(LOG_GUEST_ERROR
, "user instruction undefined\n");
866 t_gen_illegal_insn(dc
);
869 static void dec_wcsr(DisasContext
*dc
)
873 LOG_DIS("wcsr %d, r%d\n", dc
->csr
, dc
->r1
);
877 tcg_gen_mov_tl(cpu_ie
, cpu_R
[dc
->r1
]);
878 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
879 dc
->is_jmp
= DISAS_UPDATE
;
882 /* mark as an io operation because it could cause an interrupt */
883 if (tb_cflags(dc
->tb
) & CF_USE_ICOUNT
) {
886 gen_helper_wcsr_im(cpu_env
, cpu_R
[dc
->r1
]);
887 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
888 if (tb_cflags(dc
->tb
) & CF_USE_ICOUNT
) {
891 dc
->is_jmp
= DISAS_UPDATE
;
894 /* mark as an io operation because it could cause an interrupt */
895 if (tb_cflags(dc
->tb
) & CF_USE_ICOUNT
) {
898 gen_helper_wcsr_ip(cpu_env
, cpu_R
[dc
->r1
]);
899 tcg_gen_movi_tl(cpu_pc
, dc
->pc
+ 4);
900 if (tb_cflags(dc
->tb
) & CF_USE_ICOUNT
) {
903 dc
->is_jmp
= DISAS_UPDATE
;
912 tcg_gen_mov_tl(cpu_eba
, cpu_R
[dc
->r1
]);
915 tcg_gen_mov_tl(cpu_deba
, cpu_R
[dc
->r1
]);
918 gen_helper_wcsr_jtx(cpu_env
, cpu_R
[dc
->r1
]);
921 gen_helper_wcsr_jrx(cpu_env
, cpu_R
[dc
->r1
]);
924 gen_helper_wcsr_dc(cpu_env
, cpu_R
[dc
->r1
]);
930 no
= dc
->csr
- CSR_BP0
;
931 if (dc
->num_breakpoints
<= no
) {
932 qemu_log_mask(LOG_GUEST_ERROR
,
933 "breakpoint #%i is not available\n", no
);
934 t_gen_illegal_insn(dc
);
937 gen_helper_wcsr_bp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
943 no
= dc
->csr
- CSR_WP0
;
944 if (dc
->num_watchpoints
<= no
) {
945 qemu_log_mask(LOG_GUEST_ERROR
,
946 "watchpoint #%i is not available\n", no
);
947 t_gen_illegal_insn(dc
);
950 gen_helper_wcsr_wp(cpu_env
, cpu_R
[dc
->r1
], tcg_const_i32(no
));
954 qemu_log_mask(LOG_GUEST_ERROR
, "invalid write access csr=%x\n",
958 qemu_log_mask(LOG_GUEST_ERROR
, "write_csr: unknown csr=%x\n",
964 static void dec_xnor(DisasContext
*dc
)
966 if (dc
->format
== OP_FMT_RI
) {
967 LOG_DIS("xnori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
968 zero_extend(dc
->imm16
, 16));
970 if (dc
->r1
== R_R0
) {
971 LOG_DIS("not r%d, r%d\n", dc
->r2
, dc
->r0
);
973 LOG_DIS("xnor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
977 if (dc
->format
== OP_FMT_RI
) {
978 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
979 zero_extend(dc
->imm16
, 16));
980 tcg_gen_not_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r1
]);
982 tcg_gen_eqv_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
986 static void dec_xor(DisasContext
*dc
)
988 if (dc
->format
== OP_FMT_RI
) {
989 LOG_DIS("xori r%d, r%d, %d\n", dc
->r1
, dc
->r0
,
990 zero_extend(dc
->imm16
, 16));
992 LOG_DIS("xor r%d, r%d, r%d\n", dc
->r2
, dc
->r0
, dc
->r1
);
995 if (dc
->format
== OP_FMT_RI
) {
996 tcg_gen_xori_tl(cpu_R
[dc
->r1
], cpu_R
[dc
->r0
],
997 zero_extend(dc
->imm16
, 16));
999 tcg_gen_xor_tl(cpu_R
[dc
->r2
], cpu_R
[dc
->r0
], cpu_R
[dc
->r1
]);
1003 static void dec_ill(DisasContext
*dc
)
1005 qemu_log_mask(LOG_GUEST_ERROR
, "invalid opcode 0x%02x\n", dc
->opcode
);
1006 t_gen_illegal_insn(dc
);
1009 typedef void (*DecoderInfo
)(DisasContext
*dc
);
1010 static const DecoderInfo decinfo
[] = {
1011 dec_sru
, dec_nor
, dec_mul
, dec_sh
, dec_lb
, dec_sr
, dec_xor
, dec_lh
,
1012 dec_and
, dec_xnor
, dec_lw
, dec_lhu
, dec_sb
, dec_add
, dec_or
, dec_sl
,
1013 dec_lbu
, dec_be
, dec_bg
, dec_bge
, dec_bgeu
, dec_bgu
, dec_sw
, dec_bne
,
1014 dec_andhi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_orhi
,
1016 dec_sru
, dec_nor
, dec_mul
, dec_divu
, dec_rcsr
, dec_sr
, dec_xor
, dec_ill
,
1017 dec_and
, dec_xnor
, dec_ill
, dec_scall
, dec_sextb
, dec_add
, dec_or
, dec_sl
,
1018 dec_b
, dec_modu
, dec_sub
, dec_user
, dec_wcsr
, dec_ill
, dec_call
, dec_sexth
,
1019 dec_bi
, dec_cmpe
, dec_cmpg
, dec_cmpge
, dec_cmpgeu
, dec_cmpgu
, dec_calli
,
1023 static inline void decode(DisasContext
*dc
, uint32_t ir
)
1026 LOG_DIS("%8.8x\t", dc
->ir
);
1028 dc
->opcode
= EXTRACT_FIELD(ir
, 26, 31);
1030 dc
->imm5
= EXTRACT_FIELD(ir
, 0, 4);
1031 dc
->imm16
= EXTRACT_FIELD(ir
, 0, 15);
1032 dc
->imm26
= EXTRACT_FIELD(ir
, 0, 25);
1034 dc
->csr
= EXTRACT_FIELD(ir
, 21, 25);
1035 dc
->r0
= EXTRACT_FIELD(ir
, 21, 25);
1036 dc
->r1
= EXTRACT_FIELD(ir
, 16, 20);
1037 dc
->r2
= EXTRACT_FIELD(ir
, 11, 15);
1039 /* bit 31 seems to indicate insn type. */
1040 if (ir
& (1 << 31)) {
1041 dc
->format
= OP_FMT_RR
;
1043 dc
->format
= OP_FMT_RI
;
1046 assert(ARRAY_SIZE(decinfo
) == 64);
1047 assert(dc
->opcode
< 64);
1049 decinfo
[dc
->opcode
](dc
);
1052 /* generate intermediate code for basic block 'tb'. */
1053 void gen_intermediate_code(CPUState
*cs
, struct TranslationBlock
*tb
)
1055 CPULM32State
*env
= cs
->env_ptr
;
1056 LM32CPU
*cpu
= lm32_env_get_cpu(env
);
1057 struct DisasContext ctx
, *dc
= &ctx
;
1059 uint32_t next_page_start
;
1064 dc
->features
= cpu
->features
;
1065 dc
->num_breakpoints
= cpu
->num_breakpoints
;
1066 dc
->num_watchpoints
= cpu
->num_watchpoints
;
1069 dc
->is_jmp
= DISAS_NEXT
;
1071 dc
->singlestep_enabled
= cs
->singlestep_enabled
;
1074 qemu_log_mask(LOG_GUEST_ERROR
,
1075 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start
);
1079 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
1081 max_insns
= tb_cflags(tb
) & CF_COUNT_MASK
;
1082 if (max_insns
== 0) {
1083 max_insns
= CF_COUNT_MASK
;
1085 if (max_insns
> TCG_MAX_INSNS
) {
1086 max_insns
= TCG_MAX_INSNS
;
1091 tcg_gen_insn_start(dc
->pc
);
1094 if (unlikely(cpu_breakpoint_test(cs
, dc
->pc
, BP_ANY
))) {
1095 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1096 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1097 dc
->is_jmp
= DISAS_UPDATE
;
1098 /* The address covered by the breakpoint must be included in
1099 [tb->pc, tb->pc + tb->size) in order to for it to be
1100 properly cleared -- thus we increment the PC here so that
1101 the logic setting tb->size below does the right thing. */
1107 LOG_DIS("%8.8x:\t", dc
->pc
);
1109 if (num_insns
== max_insns
&& (tb_cflags(tb
) & CF_LAST_IO
)) {
1113 decode(dc
, cpu_ldl_code(env
, dc
->pc
));
1115 } while (!dc
->is_jmp
1116 && !tcg_op_buf_full()
1117 && !cs
->singlestep_enabled
1119 && (dc
->pc
< next_page_start
)
1120 && num_insns
< max_insns
);
1122 if (tb_cflags(tb
) & CF_LAST_IO
) {
1126 if (unlikely(cs
->singlestep_enabled
)) {
1127 if (dc
->is_jmp
== DISAS_NEXT
) {
1128 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1130 t_gen_raise_exception(dc
, EXCP_DEBUG
);
1132 switch (dc
->is_jmp
) {
1134 gen_goto_tb(dc
, 1, dc
->pc
);
1139 /* indicate that the hash table must be used
1140 to find the next TB */
1144 /* nothing more to generate */
1149 gen_tb_end(tb
, num_insns
);
1151 tb
->size
= dc
->pc
- pc_start
;
1152 tb
->icount
= num_insns
;
1155 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
1156 && qemu_log_in_addr_range(pc_start
)) {
1159 log_target_disas(cs
, pc_start
, dc
->pc
- pc_start
, 0);
1160 qemu_log("\nisize=%d osize=%d\n",
1161 dc
->pc
- pc_start
, tcg_op_buf_count());
1167 void lm32_cpu_dump_state(CPUState
*cs
, FILE *f
, fprintf_function cpu_fprintf
,
1170 LM32CPU
*cpu
= LM32_CPU(cs
);
1171 CPULM32State
*env
= &cpu
->env
;
1178 cpu_fprintf(f
, "IN: PC=%x %s\n",
1179 env
->pc
, lookup_symbol(env
->pc
));
1181 cpu_fprintf(f
, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1183 (env
->ie
& IE_IE
) ? 1 : 0,
1184 (env
->ie
& IE_EIE
) ? 1 : 0,
1185 (env
->ie
& IE_BIE
) ? 1 : 0,
1186 lm32_pic_get_im(env
->pic_state
),
1187 lm32_pic_get_ip(env
->pic_state
));
1188 cpu_fprintf(f
, "eba=%8.8x deba=%8.8x\n",
1192 for (i
= 0; i
< 32; i
++) {
1193 cpu_fprintf(f
, "r%2.2d=%8.8x ", i
, env
->regs
[i
]);
1194 if ((i
+ 1) % 4 == 0) {
1195 cpu_fprintf(f
, "\n");
1198 cpu_fprintf(f
, "\n\n");
1201 void restore_state_to_opc(CPULM32State
*env
, TranslationBlock
*tb
,
1207 void lm32_translate_init(void)
1211 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
1212 tcg_ctx
->tcg_env
= cpu_env
;
1214 for (i
= 0; i
< ARRAY_SIZE(cpu_R
); i
++) {
1215 cpu_R
[i
] = tcg_global_mem_new(cpu_env
,
1216 offsetof(CPULM32State
, regs
[i
]),
1220 for (i
= 0; i
< ARRAY_SIZE(cpu_bp
); i
++) {
1221 cpu_bp
[i
] = tcg_global_mem_new(cpu_env
,
1222 offsetof(CPULM32State
, bp
[i
]),
1226 for (i
= 0; i
< ARRAY_SIZE(cpu_wp
); i
++) {
1227 cpu_wp
[i
] = tcg_global_mem_new(cpu_env
,
1228 offsetof(CPULM32State
, wp
[i
]),
1232 cpu_pc
= tcg_global_mem_new(cpu_env
,
1233 offsetof(CPULM32State
, pc
),
1235 cpu_ie
= tcg_global_mem_new(cpu_env
,
1236 offsetof(CPULM32State
, ie
),
1238 cpu_icc
= tcg_global_mem_new(cpu_env
,
1239 offsetof(CPULM32State
, icc
),
1241 cpu_dcc
= tcg_global_mem_new(cpu_env
,
1242 offsetof(CPULM32State
, dcc
),
1244 cpu_cc
= tcg_global_mem_new(cpu_env
,
1245 offsetof(CPULM32State
, cc
),
1247 cpu_cfg
= tcg_global_mem_new(cpu_env
,
1248 offsetof(CPULM32State
, cfg
),
1250 cpu_eba
= tcg_global_mem_new(cpu_env
,
1251 offsetof(CPULM32State
, eba
),
1253 cpu_dc
= tcg_global_mem_new(cpu_env
,
1254 offsetof(CPULM32State
, dc
),
1256 cpu_deba
= tcg_global_mem_new(cpu_env
,
1257 offsetof(CPULM32State
, deba
),