4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
32 typedef struct DisasContext
{
33 struct TranslationBlock
*tb
;
42 int singlestep_enabled
;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
54 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
57 BS_STOP
= 1, /* We want to stop translation for any reason */
58 BS_BRANCH
= 2, /* We reached a branch condition */
59 BS_EXCP
= 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_gregs
[24];
65 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
66 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
67 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
68 static TCGv cpu_fregs
[32];
70 /* internal register indexes */
71 static TCGv cpu_flags
, cpu_delayed_pc
;
73 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
75 #include "gen-icount.h"
77 static void sh4_translate_init(void)
80 static int done_init
= 0;
81 static const char * const gregnames
[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames
[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 24; i
++)
105 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUSH4State
, gregs
[i
]),
109 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUSH4State
, pc
), "PC");
111 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUSH4State
, sr
), "SR");
113 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUSH4State
, ssr
), "SSR");
115 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUSH4State
, spc
), "SPC");
117 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUSH4State
, gbr
), "GBR");
119 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUSH4State
, vbr
), "VBR");
121 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUSH4State
, sgr
), "SGR");
123 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUSH4State
, dbr
), "DBR");
125 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUSH4State
, mach
), "MACH");
127 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUSH4State
, macl
), "MACL");
129 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUSH4State
, pr
), "PR");
131 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUSH4State
, fpscr
), "FPSCR");
133 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, fpul
), "FPUL");
136 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
137 offsetof(CPUSH4State
, flags
), "_flags_");
138 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, delayed_pc
),
141 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, ldst
), "_ldst_");
144 for (i
= 0; i
< 32; i
++)
145 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
146 offsetof(CPUSH4State
, fregs
[i
]),
149 /* register helpers */
156 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
157 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
161 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
163 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
165 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
167 for (i
= 0; i
< 24; i
+= 4) {
168 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
170 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
172 if (env
->flags
& DELAY_SLOT
) {
173 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
175 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
176 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
190 static sh4_def_t sh4_defs
[] = {
193 .id
= SH_CPU_SH7750R
,
197 .features
= SH_FEATURE_BCR3_AND_BCR4
,
200 .id
= SH_CPU_SH7751R
,
203 .cvr
= 0x00110000, /* Neutered caches, should be 0x20480000 */
204 .features
= SH_FEATURE_BCR3_AND_BCR4
,
211 .features
= SH_FEATURE_SH4A
,
215 static const sh4_def_t
*cpu_sh4_find_by_name(const char *name
)
219 if (strcasecmp(name
, "any") == 0)
222 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
223 if (strcasecmp(name
, sh4_defs
[i
].name
) == 0)
229 void sh4_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
233 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
234 (*cpu_fprintf
)(f
, "%s\n", sh4_defs
[i
].name
);
237 static void cpu_register(CPUSH4State
*env
, const sh4_def_t
*def
)
245 SuperHCPU
*cpu_sh4_init(const char *cpu_model
)
249 const sh4_def_t
*def
;
251 def
= cpu_sh4_find_by_name(cpu_model
);
254 cpu
= SUPERH_CPU(object_new(TYPE_SUPERH_CPU
));
256 env
->features
= def
->features
;
257 sh4_translate_init();
258 env
->cpu_model_str
= cpu_model
;
260 cpu_register(env
, def
);
265 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
267 TranslationBlock
*tb
;
270 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
271 !ctx
->singlestep_enabled
) {
272 /* Use a direct jump if in same page and singlestep not enabled */
274 tcg_gen_movi_i32(cpu_pc
, dest
);
275 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
277 tcg_gen_movi_i32(cpu_pc
, dest
);
278 if (ctx
->singlestep_enabled
)
279 gen_helper_debug(cpu_env
);
284 static void gen_jump(DisasContext
* ctx
)
286 if (ctx
->delayed_pc
== (uint32_t) - 1) {
287 /* Target is not statically known, it comes necessarily from a
288 delayed jump as immediate jump are conditinal jumps */
289 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
290 if (ctx
->singlestep_enabled
)
291 gen_helper_debug(cpu_env
);
294 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
298 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
301 int label
= gen_new_label();
302 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
304 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
305 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
306 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
307 gen_set_label(label
);
310 /* Immediate conditional jump (bt or bf) */
311 static void gen_conditional_jump(DisasContext
* ctx
,
312 target_ulong ift
, target_ulong ifnott
)
317 l1
= gen_new_label();
319 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
320 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
321 gen_goto_tb(ctx
, 0, ifnott
);
323 gen_goto_tb(ctx
, 1, ift
);
326 /* Delayed conditional jump (bt or bf) */
327 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
332 l1
= gen_new_label();
334 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
335 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
336 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
338 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
342 static inline void gen_set_t(void)
344 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
347 static inline void gen_clr_t(void)
349 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
352 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
357 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
358 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
359 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
364 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
369 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
370 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
371 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
376 static inline void gen_store_flags(uint32_t flags
)
378 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
379 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
382 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
384 TCGv tmp
= tcg_temp_new();
389 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
390 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
392 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
394 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
395 tcg_gen_or_i32(t0
, t0
, tmp
);
400 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
402 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
405 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
407 TCGv_i32 tmp
= tcg_temp_new_i32();
408 tcg_gen_trunc_i64_i32(tmp
, t
);
409 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
410 tcg_gen_shri_i64(t
, t
, 32);
411 tcg_gen_trunc_i64_i32(tmp
, t
);
412 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
413 tcg_temp_free_i32(tmp
);
416 #define B3_0 (ctx->opcode & 0xf)
417 #define B6_4 ((ctx->opcode >> 4) & 0x7)
418 #define B7_4 ((ctx->opcode >> 4) & 0xf)
419 #define B7_0 (ctx->opcode & 0xff)
420 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
421 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
422 (ctx->opcode & 0xfff))
423 #define B11_8 ((ctx->opcode >> 8) & 0xf)
424 #define B15_12 ((ctx->opcode >> 12) & 0xf)
426 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
427 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
429 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
430 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
432 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
433 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
434 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
435 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
437 #define CHECK_NOT_DELAY_SLOT \
438 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
440 gen_helper_raise_slot_illegal_instruction(cpu_env); \
441 ctx->bstate = BS_EXCP; \
445 #define CHECK_PRIVILEGED \
446 if (IS_USER(ctx)) { \
447 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
448 gen_helper_raise_slot_illegal_instruction(cpu_env); \
450 gen_helper_raise_illegal_instruction(cpu_env); \
452 ctx->bstate = BS_EXCP; \
456 #define CHECK_FPU_ENABLED \
457 if (ctx->flags & SR_FD) { \
458 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
459 gen_helper_raise_slot_fpu_disable(cpu_env); \
461 gen_helper_raise_fpu_disable(cpu_env); \
463 ctx->bstate = BS_EXCP; \
467 static void _decode_opc(DisasContext
* ctx
)
469 /* This code tries to make movcal emulation sufficiently
470 accurate for Linux purposes. This instruction writes
471 memory, and prior to that, always allocates a cache line.
472 It is used in two contexts:
473 - in memcpy, where data is copied in blocks, the first write
474 of to a block uses movca.l for performance.
475 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
476 to flush the cache. Here, the data written by movcal.l is never
477 written to memory, and the data written is just bogus.
479 To simulate this, we simulate movcal.l, we store the value to memory,
480 but we also remember the previous content. If we see ocbi, we check
481 if movcal.l for that address was done previously. If so, the write should
482 not have hit the memory, so we restore the previous content.
483 When we see an instruction that is neither movca.l
484 nor ocbi, the previous content is discarded.
486 To optimize, we only try to flush stores when we're at the start of
487 TB, or if we already saw movca.l in this TB and did not flush stores
491 int opcode
= ctx
->opcode
& 0xf0ff;
492 if (opcode
!= 0x0093 /* ocbi */
493 && opcode
!= 0x00c3 /* movca.l */)
495 gen_helper_discard_movcal_backup(cpu_env
);
501 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
504 switch (ctx
->opcode
) {
505 case 0x0019: /* div0u */
506 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
508 case 0x000b: /* rts */
510 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
511 ctx
->flags
|= DELAY_SLOT
;
512 ctx
->delayed_pc
= (uint32_t) - 1;
514 case 0x0028: /* clrmac */
515 tcg_gen_movi_i32(cpu_mach
, 0);
516 tcg_gen_movi_i32(cpu_macl
, 0);
518 case 0x0048: /* clrs */
519 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
521 case 0x0008: /* clrt */
524 case 0x0038: /* ldtlb */
526 gen_helper_ldtlb(cpu_env
);
528 case 0x002b: /* rte */
531 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
532 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
533 ctx
->flags
|= DELAY_SLOT
;
534 ctx
->delayed_pc
= (uint32_t) - 1;
536 case 0x0058: /* sets */
537 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
539 case 0x0018: /* sett */
542 case 0xfbfd: /* frchg */
543 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
544 ctx
->bstate
= BS_STOP
;
546 case 0xf3fd: /* fschg */
547 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
548 ctx
->bstate
= BS_STOP
;
550 case 0x0009: /* nop */
552 case 0x001b: /* sleep */
554 gen_helper_sleep(cpu_env
, tcg_const_i32(ctx
->pc
+ 2));
558 switch (ctx
->opcode
& 0xf000) {
559 case 0x1000: /* mov.l Rm,@(disp,Rn) */
561 TCGv addr
= tcg_temp_new();
562 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
563 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
567 case 0x5000: /* mov.l @(disp,Rm),Rn */
569 TCGv addr
= tcg_temp_new();
570 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
571 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
575 case 0xe000: /* mov #imm,Rn */
576 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
578 case 0x9000: /* mov.w @(disp,PC),Rn */
580 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
581 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
585 case 0xd000: /* mov.l @(disp,PC),Rn */
587 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
588 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
592 case 0x7000: /* add #imm,Rn */
593 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
595 case 0xa000: /* bra disp */
597 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
598 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
599 ctx
->flags
|= DELAY_SLOT
;
601 case 0xb000: /* bsr disp */
603 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
604 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
605 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
606 ctx
->flags
|= DELAY_SLOT
;
610 switch (ctx
->opcode
& 0xf00f) {
611 case 0x6003: /* mov Rm,Rn */
612 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
614 case 0x2000: /* mov.b Rm,@Rn */
615 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
617 case 0x2001: /* mov.w Rm,@Rn */
618 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
620 case 0x2002: /* mov.l Rm,@Rn */
621 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
623 case 0x6000: /* mov.b @Rm,Rn */
624 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
626 case 0x6001: /* mov.w @Rm,Rn */
627 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
629 case 0x6002: /* mov.l @Rm,Rn */
630 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
632 case 0x2004: /* mov.b Rm,@-Rn */
634 TCGv addr
= tcg_temp_new();
635 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
636 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
637 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
641 case 0x2005: /* mov.w Rm,@-Rn */
643 TCGv addr
= tcg_temp_new();
644 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
645 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
646 tcg_gen_mov_i32(REG(B11_8
), addr
);
650 case 0x2006: /* mov.l Rm,@-Rn */
652 TCGv addr
= tcg_temp_new();
653 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
654 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
655 tcg_gen_mov_i32(REG(B11_8
), addr
);
658 case 0x6004: /* mov.b @Rm+,Rn */
659 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
661 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
663 case 0x6005: /* mov.w @Rm+,Rn */
664 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
666 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
668 case 0x6006: /* mov.l @Rm+,Rn */
669 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
671 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
673 case 0x0004: /* mov.b Rm,@(R0,Rn) */
675 TCGv addr
= tcg_temp_new();
676 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
677 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
681 case 0x0005: /* mov.w Rm,@(R0,Rn) */
683 TCGv addr
= tcg_temp_new();
684 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
685 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
689 case 0x0006: /* mov.l Rm,@(R0,Rn) */
691 TCGv addr
= tcg_temp_new();
692 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
693 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
697 case 0x000c: /* mov.b @(R0,Rm),Rn */
699 TCGv addr
= tcg_temp_new();
700 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
701 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
705 case 0x000d: /* mov.w @(R0,Rm),Rn */
707 TCGv addr
= tcg_temp_new();
708 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
709 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
713 case 0x000e: /* mov.l @(R0,Rm),Rn */
715 TCGv addr
= tcg_temp_new();
716 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
717 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
721 case 0x6008: /* swap.b Rm,Rn */
724 high
= tcg_temp_new();
725 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
726 low
= tcg_temp_new();
727 tcg_gen_ext16u_i32(low
, REG(B7_4
));
728 tcg_gen_bswap16_i32(low
, low
);
729 tcg_gen_or_i32(REG(B11_8
), high
, low
);
734 case 0x6009: /* swap.w Rm,Rn */
737 high
= tcg_temp_new();
738 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
739 low
= tcg_temp_new();
740 tcg_gen_shri_i32(low
, REG(B7_4
), 16);
741 tcg_gen_ext16u_i32(low
, low
);
742 tcg_gen_or_i32(REG(B11_8
), high
, low
);
747 case 0x200d: /* xtrct Rm,Rn */
750 high
= tcg_temp_new();
751 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
752 low
= tcg_temp_new();
753 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
754 tcg_gen_ext16u_i32(low
, low
);
755 tcg_gen_or_i32(REG(B11_8
), high
, low
);
760 case 0x300c: /* add Rm,Rn */
761 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
763 case 0x300e: /* addc Rm,Rn */
767 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
769 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
770 tcg_gen_add_i32(t0
, t0
, t1
);
772 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
773 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
774 tcg_gen_or_i32(t1
, t1
, t2
);
776 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
777 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
779 tcg_gen_mov_i32(REG(B11_8
), t0
);
783 case 0x300f: /* addv Rm,Rn */
787 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
789 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
791 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
792 tcg_gen_andc_i32(t1
, t1
, t2
);
794 tcg_gen_shri_i32(t1
, t1
, 31);
795 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
796 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
798 tcg_gen_mov_i32(REG(B7_4
), t0
);
802 case 0x2009: /* and Rm,Rn */
803 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
805 case 0x3000: /* cmp/eq Rm,Rn */
806 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
808 case 0x3003: /* cmp/ge Rm,Rn */
809 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
811 case 0x3007: /* cmp/gt Rm,Rn */
812 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
814 case 0x3006: /* cmp/hi Rm,Rn */
815 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
817 case 0x3002: /* cmp/hs Rm,Rn */
818 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
820 case 0x200c: /* cmp/str Rm,Rn */
822 TCGv cmp1
= tcg_temp_new();
823 TCGv cmp2
= tcg_temp_new();
824 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
825 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
826 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
827 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
828 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
829 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
830 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
831 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
832 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
833 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
834 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
835 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
836 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
837 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
842 case 0x2007: /* div0s Rm,Rn */
844 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
845 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
846 TCGv val
= tcg_temp_new();
847 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
848 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
852 case 0x3004: /* div1 Rm,Rn */
853 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
855 case 0x300d: /* dmuls.l Rm,Rn */
857 TCGv_i64 tmp1
= tcg_temp_new_i64();
858 TCGv_i64 tmp2
= tcg_temp_new_i64();
860 tcg_gen_ext_i32_i64(tmp1
, REG(B7_4
));
861 tcg_gen_ext_i32_i64(tmp2
, REG(B11_8
));
862 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
863 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
864 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
865 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
867 tcg_temp_free_i64(tmp2
);
868 tcg_temp_free_i64(tmp1
);
871 case 0x3005: /* dmulu.l Rm,Rn */
873 TCGv_i64 tmp1
= tcg_temp_new_i64();
874 TCGv_i64 tmp2
= tcg_temp_new_i64();
876 tcg_gen_extu_i32_i64(tmp1
, REG(B7_4
));
877 tcg_gen_extu_i32_i64(tmp2
, REG(B11_8
));
878 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
879 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
880 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
881 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
883 tcg_temp_free_i64(tmp2
);
884 tcg_temp_free_i64(tmp1
);
887 case 0x600e: /* exts.b Rm,Rn */
888 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
890 case 0x600f: /* exts.w Rm,Rn */
891 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
893 case 0x600c: /* extu.b Rm,Rn */
894 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
896 case 0x600d: /* extu.w Rm,Rn */
897 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
899 case 0x000f: /* mac.l @Rm+,@Rn+ */
902 arg0
= tcg_temp_new();
903 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
904 arg1
= tcg_temp_new();
905 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
906 gen_helper_macl(cpu_env
, arg0
, arg1
);
909 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
910 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
913 case 0x400f: /* mac.w @Rm+,@Rn+ */
916 arg0
= tcg_temp_new();
917 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
918 arg1
= tcg_temp_new();
919 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
920 gen_helper_macw(cpu_env
, arg0
, arg1
);
923 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
924 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
927 case 0x0007: /* mul.l Rm,Rn */
928 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
930 case 0x200f: /* muls.w Rm,Rn */
933 arg0
= tcg_temp_new();
934 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
935 arg1
= tcg_temp_new();
936 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
937 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
942 case 0x200e: /* mulu.w Rm,Rn */
945 arg0
= tcg_temp_new();
946 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
947 arg1
= tcg_temp_new();
948 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
949 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
954 case 0x600b: /* neg Rm,Rn */
955 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
957 case 0x600a: /* negc Rm,Rn */
961 tcg_gen_neg_i32(t0
, REG(B7_4
));
963 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
964 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
965 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
966 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
967 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
968 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
969 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
974 case 0x6007: /* not Rm,Rn */
975 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
977 case 0x200b: /* or Rm,Rn */
978 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
980 case 0x400c: /* shad Rm,Rn */
982 int label1
= gen_new_label();
983 int label2
= gen_new_label();
984 int label3
= gen_new_label();
985 int label4
= gen_new_label();
987 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
988 /* Rm positive, shift to the left */
989 shift
= tcg_temp_new();
990 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
991 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
992 tcg_temp_free(shift
);
994 /* Rm negative, shift to the right */
995 gen_set_label(label1
);
996 shift
= tcg_temp_new();
997 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
998 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
999 tcg_gen_not_i32(shift
, REG(B7_4
));
1000 tcg_gen_andi_i32(shift
, shift
, 0x1f);
1001 tcg_gen_addi_i32(shift
, shift
, 1);
1002 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
1003 tcg_temp_free(shift
);
1006 gen_set_label(label2
);
1007 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
1008 tcg_gen_movi_i32(REG(B11_8
), 0);
1010 gen_set_label(label3
);
1011 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
1012 gen_set_label(label4
);
1015 case 0x400d: /* shld Rm,Rn */
1017 int label1
= gen_new_label();
1018 int label2
= gen_new_label();
1019 int label3
= gen_new_label();
1021 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
1022 /* Rm positive, shift to the left */
1023 shift
= tcg_temp_new();
1024 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
1025 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
1026 tcg_temp_free(shift
);
1028 /* Rm negative, shift to the right */
1029 gen_set_label(label1
);
1030 shift
= tcg_temp_new();
1031 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
1032 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
1033 tcg_gen_not_i32(shift
, REG(B7_4
));
1034 tcg_gen_andi_i32(shift
, shift
, 0x1f);
1035 tcg_gen_addi_i32(shift
, shift
, 1);
1036 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
1037 tcg_temp_free(shift
);
1040 gen_set_label(label2
);
1041 tcg_gen_movi_i32(REG(B11_8
), 0);
1042 gen_set_label(label3
);
1045 case 0x3008: /* sub Rm,Rn */
1046 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1048 case 0x300a: /* subc Rm,Rn */
1051 t0
= tcg_temp_new();
1052 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
1053 t1
= tcg_temp_new();
1054 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
1055 tcg_gen_sub_i32(t0
, t1
, t0
);
1056 t2
= tcg_temp_new();
1057 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
1058 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
1059 tcg_gen_or_i32(t1
, t1
, t2
);
1061 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1062 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
1064 tcg_gen_mov_i32(REG(B11_8
), t0
);
1068 case 0x300b: /* subv Rm,Rn */
1071 t0
= tcg_temp_new();
1072 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
1073 t1
= tcg_temp_new();
1074 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
1075 t2
= tcg_temp_new();
1076 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
1077 tcg_gen_and_i32(t1
, t1
, t2
);
1079 tcg_gen_shri_i32(t1
, t1
, 31);
1080 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1081 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
1083 tcg_gen_mov_i32(REG(B11_8
), t0
);
1087 case 0x2008: /* tst Rm,Rn */
1089 TCGv val
= tcg_temp_new();
1090 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
1091 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1095 case 0x200a: /* xor Rm,Rn */
1096 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1098 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1100 if (ctx
->fpscr
& FPSCR_SZ
) {
1101 TCGv_i64 fp
= tcg_temp_new_i64();
1102 gen_load_fpr64(fp
, XREG(B7_4
));
1103 gen_store_fpr64(fp
, XREG(B11_8
));
1104 tcg_temp_free_i64(fp
);
1106 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1109 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1111 if (ctx
->fpscr
& FPSCR_SZ
) {
1112 TCGv addr_hi
= tcg_temp_new();
1113 int fr
= XREG(B7_4
);
1114 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
1115 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
1116 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1117 tcg_temp_free(addr_hi
);
1119 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
1122 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1124 if (ctx
->fpscr
& FPSCR_SZ
) {
1125 TCGv addr_hi
= tcg_temp_new();
1126 int fr
= XREG(B11_8
);
1127 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1128 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1129 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1130 tcg_temp_free(addr_hi
);
1132 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1135 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1137 if (ctx
->fpscr
& FPSCR_SZ
) {
1138 TCGv addr_hi
= tcg_temp_new();
1139 int fr
= XREG(B11_8
);
1140 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1141 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1142 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1143 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1144 tcg_temp_free(addr_hi
);
1146 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1147 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1150 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1152 if (ctx
->fpscr
& FPSCR_SZ
) {
1153 TCGv addr
= tcg_temp_new_i32();
1154 int fr
= XREG(B7_4
);
1155 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1156 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1157 tcg_gen_subi_i32(addr
, addr
, 4);
1158 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1159 tcg_gen_mov_i32(REG(B11_8
), addr
);
1160 tcg_temp_free(addr
);
1163 addr
= tcg_temp_new_i32();
1164 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1165 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1166 tcg_gen_mov_i32(REG(B11_8
), addr
);
1167 tcg_temp_free(addr
);
1170 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1173 TCGv addr
= tcg_temp_new_i32();
1174 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1175 if (ctx
->fpscr
& FPSCR_SZ
) {
1176 int fr
= XREG(B11_8
);
1177 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1178 tcg_gen_addi_i32(addr
, addr
, 4);
1179 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1181 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1183 tcg_temp_free(addr
);
1186 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1189 TCGv addr
= tcg_temp_new();
1190 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1191 if (ctx
->fpscr
& FPSCR_SZ
) {
1192 int fr
= XREG(B7_4
);
1193 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1194 tcg_gen_addi_i32(addr
, addr
, 4);
1195 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1197 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1199 tcg_temp_free(addr
);
1202 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1203 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1204 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1205 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1206 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1207 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1210 if (ctx
->fpscr
& FPSCR_PR
) {
1213 if (ctx
->opcode
& 0x0110)
1214 break; /* illegal instruction */
1215 fp0
= tcg_temp_new_i64();
1216 fp1
= tcg_temp_new_i64();
1217 gen_load_fpr64(fp0
, DREG(B11_8
));
1218 gen_load_fpr64(fp1
, DREG(B7_4
));
1219 switch (ctx
->opcode
& 0xf00f) {
1220 case 0xf000: /* fadd Rm,Rn */
1221 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1223 case 0xf001: /* fsub Rm,Rn */
1224 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1226 case 0xf002: /* fmul Rm,Rn */
1227 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1229 case 0xf003: /* fdiv Rm,Rn */
1230 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1232 case 0xf004: /* fcmp/eq Rm,Rn */
1233 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1235 case 0xf005: /* fcmp/gt Rm,Rn */
1236 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1239 gen_store_fpr64(fp0
, DREG(B11_8
));
1240 tcg_temp_free_i64(fp0
);
1241 tcg_temp_free_i64(fp1
);
1243 switch (ctx
->opcode
& 0xf00f) {
1244 case 0xf000: /* fadd Rm,Rn */
1245 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1246 cpu_fregs
[FREG(B11_8
)],
1247 cpu_fregs
[FREG(B7_4
)]);
1249 case 0xf001: /* fsub Rm,Rn */
1250 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1251 cpu_fregs
[FREG(B11_8
)],
1252 cpu_fregs
[FREG(B7_4
)]);
1254 case 0xf002: /* fmul Rm,Rn */
1255 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1256 cpu_fregs
[FREG(B11_8
)],
1257 cpu_fregs
[FREG(B7_4
)]);
1259 case 0xf003: /* fdiv Rm,Rn */
1260 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1261 cpu_fregs
[FREG(B11_8
)],
1262 cpu_fregs
[FREG(B7_4
)]);
1264 case 0xf004: /* fcmp/eq Rm,Rn */
1265 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1266 cpu_fregs
[FREG(B7_4
)]);
1268 case 0xf005: /* fcmp/gt Rm,Rn */
1269 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1270 cpu_fregs
[FREG(B7_4
)]);
1276 case 0xf00e: /* fmac FR0,RM,Rn */
1279 if (ctx
->fpscr
& FPSCR_PR
) {
1280 break; /* illegal instruction */
1282 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1283 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1284 cpu_fregs
[FREG(B11_8
)]);
1290 switch (ctx
->opcode
& 0xff00) {
1291 case 0xc900: /* and #imm,R0 */
1292 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1294 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1297 addr
= tcg_temp_new();
1298 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1299 val
= tcg_temp_new();
1300 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1301 tcg_gen_andi_i32(val
, val
, B7_0
);
1302 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1304 tcg_temp_free(addr
);
1307 case 0x8b00: /* bf label */
1308 CHECK_NOT_DELAY_SLOT
1309 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1310 ctx
->pc
+ 4 + B7_0s
* 2);
1311 ctx
->bstate
= BS_BRANCH
;
1313 case 0x8f00: /* bf/s label */
1314 CHECK_NOT_DELAY_SLOT
1315 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1316 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1318 case 0x8900: /* bt label */
1319 CHECK_NOT_DELAY_SLOT
1320 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1322 ctx
->bstate
= BS_BRANCH
;
1324 case 0x8d00: /* bt/s label */
1325 CHECK_NOT_DELAY_SLOT
1326 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1327 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1329 case 0x8800: /* cmp/eq #imm,R0 */
1330 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1332 case 0xc400: /* mov.b @(disp,GBR),R0 */
1334 TCGv addr
= tcg_temp_new();
1335 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1336 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1337 tcg_temp_free(addr
);
1340 case 0xc500: /* mov.w @(disp,GBR),R0 */
1342 TCGv addr
= tcg_temp_new();
1343 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1344 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1345 tcg_temp_free(addr
);
1348 case 0xc600: /* mov.l @(disp,GBR),R0 */
1350 TCGv addr
= tcg_temp_new();
1351 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1352 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1353 tcg_temp_free(addr
);
1356 case 0xc000: /* mov.b R0,@(disp,GBR) */
1358 TCGv addr
= tcg_temp_new();
1359 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1360 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1361 tcg_temp_free(addr
);
1364 case 0xc100: /* mov.w R0,@(disp,GBR) */
1366 TCGv addr
= tcg_temp_new();
1367 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1368 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1369 tcg_temp_free(addr
);
1372 case 0xc200: /* mov.l R0,@(disp,GBR) */
1374 TCGv addr
= tcg_temp_new();
1375 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1376 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1377 tcg_temp_free(addr
);
1380 case 0x8000: /* mov.b R0,@(disp,Rn) */
1382 TCGv addr
= tcg_temp_new();
1383 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1384 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1385 tcg_temp_free(addr
);
1388 case 0x8100: /* mov.w R0,@(disp,Rn) */
1390 TCGv addr
= tcg_temp_new();
1391 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1392 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1393 tcg_temp_free(addr
);
1396 case 0x8400: /* mov.b @(disp,Rn),R0 */
1398 TCGv addr
= tcg_temp_new();
1399 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1400 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1401 tcg_temp_free(addr
);
1404 case 0x8500: /* mov.w @(disp,Rn),R0 */
1406 TCGv addr
= tcg_temp_new();
1407 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1408 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1409 tcg_temp_free(addr
);
1412 case 0xc700: /* mova @(disp,PC),R0 */
1413 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1415 case 0xcb00: /* or #imm,R0 */
1416 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1418 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1421 addr
= tcg_temp_new();
1422 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1423 val
= tcg_temp_new();
1424 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1425 tcg_gen_ori_i32(val
, val
, B7_0
);
1426 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1428 tcg_temp_free(addr
);
1431 case 0xc300: /* trapa #imm */
1434 CHECK_NOT_DELAY_SLOT
1435 imm
= tcg_const_i32(B7_0
);
1436 gen_helper_trapa(cpu_env
, imm
);
1438 ctx
->bstate
= BS_BRANCH
;
1441 case 0xc800: /* tst #imm,R0 */
1443 TCGv val
= tcg_temp_new();
1444 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1445 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1449 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1451 TCGv val
= tcg_temp_new();
1452 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1453 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1454 tcg_gen_andi_i32(val
, val
, B7_0
);
1455 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1459 case 0xca00: /* xor #imm,R0 */
1460 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1462 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1465 addr
= tcg_temp_new();
1466 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1467 val
= tcg_temp_new();
1468 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1469 tcg_gen_xori_i32(val
, val
, B7_0
);
1470 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1472 tcg_temp_free(addr
);
1477 switch (ctx
->opcode
& 0xf08f) {
1478 case 0x408e: /* ldc Rm,Rn_BANK */
1480 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1482 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1484 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1485 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1487 case 0x0082: /* stc Rm_BANK,Rn */
1489 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1491 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1494 TCGv addr
= tcg_temp_new();
1495 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1496 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1497 tcg_gen_mov_i32(REG(B11_8
), addr
);
1498 tcg_temp_free(addr
);
1503 switch (ctx
->opcode
& 0xf0ff) {
1504 case 0x0023: /* braf Rn */
1505 CHECK_NOT_DELAY_SLOT
1506 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1507 ctx
->flags
|= DELAY_SLOT
;
1508 ctx
->delayed_pc
= (uint32_t) - 1;
1510 case 0x0003: /* bsrf Rn */
1511 CHECK_NOT_DELAY_SLOT
1512 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1513 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1514 ctx
->flags
|= DELAY_SLOT
;
1515 ctx
->delayed_pc
= (uint32_t) - 1;
1517 case 0x4015: /* cmp/pl Rn */
1518 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1520 case 0x4011: /* cmp/pz Rn */
1521 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1523 case 0x4010: /* dt Rn */
1524 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1525 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1527 case 0x402b: /* jmp @Rn */
1528 CHECK_NOT_DELAY_SLOT
1529 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1530 ctx
->flags
|= DELAY_SLOT
;
1531 ctx
->delayed_pc
= (uint32_t) - 1;
1533 case 0x400b: /* jsr @Rn */
1534 CHECK_NOT_DELAY_SLOT
1535 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1536 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1537 ctx
->flags
|= DELAY_SLOT
;
1538 ctx
->delayed_pc
= (uint32_t) - 1;
1540 case 0x400e: /* ldc Rm,SR */
1542 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1543 ctx
->bstate
= BS_STOP
;
1545 case 0x4007: /* ldc.l @Rm+,SR */
1548 TCGv val
= tcg_temp_new();
1549 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1550 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1552 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1553 ctx
->bstate
= BS_STOP
;
1556 case 0x0002: /* stc SR,Rn */
1558 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1560 case 0x4003: /* stc SR,@-Rn */
1563 TCGv addr
= tcg_temp_new();
1564 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1565 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1566 tcg_gen_mov_i32(REG(B11_8
), addr
);
1567 tcg_temp_free(addr
);
1570 #define LD(reg,ldnum,ldpnum,prechk) \
1573 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1577 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1578 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1580 #define ST(reg,stnum,stpnum,prechk) \
1583 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1588 TCGv addr = tcg_temp_new(); \
1589 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1590 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1591 tcg_gen_mov_i32(REG(B11_8), addr); \
1592 tcg_temp_free(addr); \
1595 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1596 LD(reg,ldnum,ldpnum,prechk) \
1597 ST(reg,stnum,stpnum,prechk)
1598 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1599 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1600 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1601 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1602 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1603 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1604 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1605 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1606 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1607 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1608 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1609 case 0x406a: /* lds Rm,FPSCR */
1611 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1612 ctx
->bstate
= BS_STOP
;
1614 case 0x4066: /* lds.l @Rm+,FPSCR */
1617 TCGv addr
= tcg_temp_new();
1618 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1619 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1620 gen_helper_ld_fpscr(cpu_env
, addr
);
1621 tcg_temp_free(addr
);
1622 ctx
->bstate
= BS_STOP
;
1625 case 0x006a: /* sts FPSCR,Rn */
1627 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1629 case 0x4062: /* sts FPSCR,@-Rn */
1633 val
= tcg_temp_new();
1634 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1635 addr
= tcg_temp_new();
1636 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1637 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1638 tcg_gen_mov_i32(REG(B11_8
), addr
);
1639 tcg_temp_free(addr
);
1643 case 0x00c3: /* movca.l R0,@Rm */
1645 TCGv val
= tcg_temp_new();
1646 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1647 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1648 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1650 ctx
->has_movcal
= 1;
1653 /* MOVUA.L @Rm,R0 (Rm) -> R0
1654 Load non-boundary-aligned data */
1655 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1658 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1659 Load non-boundary-aligned data */
1660 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1661 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1663 case 0x0029: /* movt Rn */
1664 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1669 If (T == 1) R0 -> (Rn)
1672 if (ctx
->features
& SH_FEATURE_SH4A
) {
1673 int label
= gen_new_label();
1675 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1676 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1677 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1678 gen_set_label(label
);
1679 tcg_gen_movi_i32(cpu_ldst
, 0);
1687 When interrupt/exception
1690 if (ctx
->features
& SH_FEATURE_SH4A
) {
1691 tcg_gen_movi_i32(cpu_ldst
, 0);
1692 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1693 tcg_gen_movi_i32(cpu_ldst
, 1);
1697 case 0x0093: /* ocbi @Rn */
1699 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1702 case 0x00a3: /* ocbp @Rn */
1703 case 0x00b3: /* ocbwb @Rn */
1704 /* These instructions are supposed to do nothing in case of
1705 a cache miss. Given that we only partially emulate caches
1706 it is safe to simply ignore them. */
1708 case 0x0083: /* pref @Rn */
1710 case 0x00d3: /* prefi @Rn */
1711 if (ctx
->features
& SH_FEATURE_SH4A
)
1715 case 0x00e3: /* icbi @Rn */
1716 if (ctx
->features
& SH_FEATURE_SH4A
)
1720 case 0x00ab: /* synco */
1721 if (ctx
->features
& SH_FEATURE_SH4A
)
1725 case 0x4024: /* rotcl Rn */
1727 TCGv tmp
= tcg_temp_new();
1728 tcg_gen_mov_i32(tmp
, cpu_sr
);
1729 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1730 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1731 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1735 case 0x4025: /* rotcr Rn */
1737 TCGv tmp
= tcg_temp_new();
1738 tcg_gen_mov_i32(tmp
, cpu_sr
);
1739 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1740 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1741 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1745 case 0x4004: /* rotl Rn */
1746 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1747 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1749 case 0x4005: /* rotr Rn */
1750 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1751 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1753 case 0x4000: /* shll Rn */
1754 case 0x4020: /* shal Rn */
1755 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1756 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1758 case 0x4021: /* shar Rn */
1759 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1760 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1762 case 0x4001: /* shlr Rn */
1763 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1764 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1766 case 0x4008: /* shll2 Rn */
1767 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1769 case 0x4018: /* shll8 Rn */
1770 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1772 case 0x4028: /* shll16 Rn */
1773 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1775 case 0x4009: /* shlr2 Rn */
1776 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1778 case 0x4019: /* shlr8 Rn */
1779 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1781 case 0x4029: /* shlr16 Rn */
1782 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1784 case 0x401b: /* tas.b @Rn */
1787 addr
= tcg_temp_local_new();
1788 tcg_gen_mov_i32(addr
, REG(B11_8
));
1789 val
= tcg_temp_local_new();
1790 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1791 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1792 tcg_gen_ori_i32(val
, val
, 0x80);
1793 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1795 tcg_temp_free(addr
);
1798 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1800 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1802 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1804 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1806 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1808 if (ctx
->fpscr
& FPSCR_PR
) {
1810 if (ctx
->opcode
& 0x0100)
1811 break; /* illegal instruction */
1812 fp
= tcg_temp_new_i64();
1813 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1814 gen_store_fpr64(fp
, DREG(B11_8
));
1815 tcg_temp_free_i64(fp
);
1818 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1821 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1823 if (ctx
->fpscr
& FPSCR_PR
) {
1825 if (ctx
->opcode
& 0x0100)
1826 break; /* illegal instruction */
1827 fp
= tcg_temp_new_i64();
1828 gen_load_fpr64(fp
, DREG(B11_8
));
1829 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1830 tcg_temp_free_i64(fp
);
1833 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1836 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1839 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1842 case 0xf05d: /* fabs FRn/DRn */
1844 if (ctx
->fpscr
& FPSCR_PR
) {
1845 if (ctx
->opcode
& 0x0100)
1846 break; /* illegal instruction */
1847 TCGv_i64 fp
= tcg_temp_new_i64();
1848 gen_load_fpr64(fp
, DREG(B11_8
));
1849 gen_helper_fabs_DT(fp
, fp
);
1850 gen_store_fpr64(fp
, DREG(B11_8
));
1851 tcg_temp_free_i64(fp
);
1853 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1856 case 0xf06d: /* fsqrt FRn */
1858 if (ctx
->fpscr
& FPSCR_PR
) {
1859 if (ctx
->opcode
& 0x0100)
1860 break; /* illegal instruction */
1861 TCGv_i64 fp
= tcg_temp_new_i64();
1862 gen_load_fpr64(fp
, DREG(B11_8
));
1863 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1864 gen_store_fpr64(fp
, DREG(B11_8
));
1865 tcg_temp_free_i64(fp
);
1867 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1868 cpu_fregs
[FREG(B11_8
)]);
1871 case 0xf07d: /* fsrra FRn */
1874 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1876 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1877 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1880 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1882 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1883 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1886 case 0xf0ad: /* fcnvsd FPUL,DRn */
1889 TCGv_i64 fp
= tcg_temp_new_i64();
1890 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1891 gen_store_fpr64(fp
, DREG(B11_8
));
1892 tcg_temp_free_i64(fp
);
1895 case 0xf0bd: /* fcnvds DRn,FPUL */
1898 TCGv_i64 fp
= tcg_temp_new_i64();
1899 gen_load_fpr64(fp
, DREG(B11_8
));
1900 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1901 tcg_temp_free_i64(fp
);
1904 case 0xf0ed: /* fipr FVm,FVn */
1906 if ((ctx
->fpscr
& FPSCR_PR
) == 0) {
1908 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1909 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1910 gen_helper_fipr(cpu_env
, m
, n
);
1916 case 0xf0fd: /* ftrv XMTRX,FVn */
1918 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1919 (ctx
->fpscr
& FPSCR_PR
) == 0) {
1921 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1922 gen_helper_ftrv(cpu_env
, n
);
1929 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1930 ctx
->opcode
, ctx
->pc
);
1933 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1934 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1936 gen_helper_raise_illegal_instruction(cpu_env
);
1938 ctx
->bstate
= BS_EXCP
;
1941 static void decode_opc(DisasContext
* ctx
)
1943 uint32_t old_flags
= ctx
->flags
;
1945 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
1946 tcg_gen_debug_insn_start(ctx
->pc
);
1951 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1952 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1955 /* go out of the delay slot */
1956 uint32_t new_flags
= ctx
->flags
;
1957 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1958 gen_store_flags(new_flags
);
1961 ctx
->bstate
= BS_BRANCH
;
1962 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1963 gen_delayed_conditional_jump(ctx
);
1964 } else if (old_flags
& DELAY_SLOT
) {
1970 /* go into a delay slot */
1971 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1972 gen_store_flags(ctx
->flags
);
1976 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1980 target_ulong pc_start
;
1981 static uint16_t *gen_opc_end
;
1988 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1990 ctx
.flags
= (uint32_t)tb
->flags
;
1991 ctx
.bstate
= BS_NONE
;
1993 ctx
.fpscr
= env
->fpscr
;
1994 ctx
.memidx
= (env
->sr
& SR_MD
) == 0 ? 1 : 0;
1995 /* We don't know if the delayed pc came from a dynamic or static branch,
1996 so assume it is a dynamic branch. */
1997 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1999 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
2000 ctx
.features
= env
->features
;
2001 ctx
.has_movcal
= (tb
->flags
& TB_FLAG_PENDING_MOVCA
);
2005 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2007 max_insns
= CF_COUNT_MASK
;
2009 while (ctx
.bstate
== BS_NONE
&& gen_opc_ptr
< gen_opc_end
) {
2010 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2011 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2012 if (ctx
.pc
== bp
->pc
) {
2013 /* We have hit a breakpoint - make sure PC is up-to-date */
2014 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
2015 gen_helper_debug(cpu_env
);
2016 ctx
.bstate
= BS_EXCP
;
2022 i
= gen_opc_ptr
- gen_opc_buf
;
2026 gen_opc_instr_start
[ii
++] = 0;
2028 gen_opc_pc
[ii
] = ctx
.pc
;
2029 gen_opc_hflags
[ii
] = ctx
.flags
;
2030 gen_opc_instr_start
[ii
] = 1;
2031 gen_opc_icount
[ii
] = num_insns
;
2033 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2036 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
2039 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
2043 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2045 if (env
->singlestep_enabled
)
2047 if (num_insns
>= max_insns
)
2052 if (tb
->cflags
& CF_LAST_IO
)
2054 if (env
->singlestep_enabled
) {
2055 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
2056 gen_helper_debug(cpu_env
);
2058 switch (ctx
.bstate
) {
2060 /* gen_op_interrupt_restart(); */
2064 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
2066 gen_goto_tb(&ctx
, 0, ctx
.pc
);
2069 /* gen_op_interrupt_restart(); */
2078 gen_icount_end(tb
, num_insns
);
2079 *gen_opc_ptr
= INDEX_op_end
;
2081 i
= gen_opc_ptr
- gen_opc_buf
;
2084 gen_opc_instr_start
[ii
++] = 0;
2086 tb
->size
= ctx
.pc
- pc_start
;
2087 tb
->icount
= num_insns
;
2091 #ifdef SH4_DEBUG_DISAS
2092 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2094 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2095 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2096 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 0);
2102 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2104 gen_intermediate_code_internal(env
, tb
, 0);
2107 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2109 gen_intermediate_code_internal(env
, tb
, 1);
2112 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
2114 env
->pc
= gen_opc_pc
[pc_pos
];
2115 env
->flags
= gen_opc_hflags
[pc_pos
];