4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
32 typedef struct DisasContext
{
33 struct TranslationBlock
*tb
;
42 int singlestep_enabled
;
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
54 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
57 BS_STOP
= 1, /* We want to stop translation for any reason */
58 BS_BRANCH
= 2, /* We reached a branch condition */
59 BS_EXCP
= 3, /* We reached an exception condition */
62 /* global register indexes */
63 static TCGv_ptr cpu_env
;
64 static TCGv cpu_gregs
[24];
65 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
66 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
67 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
68 static TCGv cpu_fregs
[32];
70 /* internal register indexes */
71 static TCGv cpu_flags
, cpu_delayed_pc
;
73 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
75 #include "gen-icount.h"
77 static void sh4_translate_init(void)
80 static int done_init
= 0;
81 static const char * const gregnames
[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
88 static const char * const fregnames
[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
104 for (i
= 0; i
< 24; i
++)
105 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUSH4State
, gregs
[i
]),
109 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUSH4State
, pc
), "PC");
111 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUSH4State
, sr
), "SR");
113 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUSH4State
, ssr
), "SSR");
115 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUSH4State
, spc
), "SPC");
117 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUSH4State
, gbr
), "GBR");
119 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUSH4State
, vbr
), "VBR");
121 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUSH4State
, sgr
), "SGR");
123 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUSH4State
, dbr
), "DBR");
125 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUSH4State
, mach
), "MACH");
127 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUSH4State
, macl
), "MACL");
129 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUSH4State
, pr
), "PR");
131 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
132 offsetof(CPUSH4State
, fpscr
), "FPSCR");
133 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
134 offsetof(CPUSH4State
, fpul
), "FPUL");
136 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
137 offsetof(CPUSH4State
, flags
), "_flags_");
138 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
139 offsetof(CPUSH4State
, delayed_pc
),
141 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, ldst
), "_ldst_");
144 for (i
= 0; i
< 32; i
++)
145 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
146 offsetof(CPUSH4State
, fregs
[i
]),
149 /* register helpers */
156 void cpu_dump_state(CPUSH4State
* env
, FILE * f
,
157 int (*cpu_fprintf
) (FILE * f
, const char *fmt
, ...),
161 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
163 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
165 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
167 for (i
= 0; i
< 24; i
+= 4) {
168 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
170 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
172 if (env
->flags
& DELAY_SLOT
) {
173 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
175 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
176 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 void cpu_state_reset(CPUSH4State
*env
)
183 cpu_reset(ENV_GET_CPU(env
));
195 static sh4_def_t sh4_defs
[] = {
198 .id
= SH_CPU_SH7750R
,
202 .features
= SH_FEATURE_BCR3_AND_BCR4
,
205 .id
= SH_CPU_SH7751R
,
208 .cvr
= 0x00110000, /* Neutered caches, should be 0x20480000 */
209 .features
= SH_FEATURE_BCR3_AND_BCR4
,
216 .features
= SH_FEATURE_SH4A
,
220 static const sh4_def_t
*cpu_sh4_find_by_name(const char *name
)
224 if (strcasecmp(name
, "any") == 0)
227 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
228 if (strcasecmp(name
, sh4_defs
[i
].name
) == 0)
234 void sh4_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
238 for (i
= 0; i
< ARRAY_SIZE(sh4_defs
); i
++)
239 (*cpu_fprintf
)(f
, "%s\n", sh4_defs
[i
].name
);
242 static void cpu_register(CPUSH4State
*env
, const sh4_def_t
*def
)
250 CPUSH4State
*cpu_sh4_init(const char *cpu_model
)
254 const sh4_def_t
*def
;
256 def
= cpu_sh4_find_by_name(cpu_model
);
259 cpu
= SUPERH_CPU(object_new(TYPE_SUPERH_CPU
));
261 env
->features
= def
->features
;
263 env
->movcal_backup_tail
= &(env
->movcal_backup
);
264 sh4_translate_init();
265 env
->cpu_model_str
= cpu_model
;
267 cpu_register(env
, def
);
272 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
274 TranslationBlock
*tb
;
277 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
278 !ctx
->singlestep_enabled
) {
279 /* Use a direct jump if in same page and singlestep not enabled */
281 tcg_gen_movi_i32(cpu_pc
, dest
);
282 tcg_gen_exit_tb((tcg_target_long
)tb
+ n
);
284 tcg_gen_movi_i32(cpu_pc
, dest
);
285 if (ctx
->singlestep_enabled
)
291 static void gen_jump(DisasContext
* ctx
)
293 if (ctx
->delayed_pc
== (uint32_t) - 1) {
294 /* Target is not statically known, it comes necessarily from a
295 delayed jump as immediate jump are conditinal jumps */
296 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
297 if (ctx
->singlestep_enabled
)
301 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
305 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
308 int label
= gen_new_label();
309 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
311 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
312 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
313 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
314 gen_set_label(label
);
317 /* Immediate conditional jump (bt or bf) */
318 static void gen_conditional_jump(DisasContext
* ctx
,
319 target_ulong ift
, target_ulong ifnott
)
324 l1
= gen_new_label();
326 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
327 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
328 gen_goto_tb(ctx
, 0, ifnott
);
330 gen_goto_tb(ctx
, 1, ift
);
333 /* Delayed conditional jump (bt or bf) */
334 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
339 l1
= gen_new_label();
341 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
342 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
343 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
345 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
349 static inline void gen_set_t(void)
351 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
354 static inline void gen_clr_t(void)
356 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
359 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
364 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
365 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
366 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
371 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
376 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
377 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
378 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
383 static inline void gen_store_flags(uint32_t flags
)
385 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
386 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
389 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
391 TCGv tmp
= tcg_temp_new();
396 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
397 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
399 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
401 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
402 tcg_gen_or_i32(t0
, t0
, tmp
);
407 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
409 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
412 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
414 TCGv_i32 tmp
= tcg_temp_new_i32();
415 tcg_gen_trunc_i64_i32(tmp
, t
);
416 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
417 tcg_gen_shri_i64(t
, t
, 32);
418 tcg_gen_trunc_i64_i32(tmp
, t
);
419 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
420 tcg_temp_free_i32(tmp
);
423 #define B3_0 (ctx->opcode & 0xf)
424 #define B6_4 ((ctx->opcode >> 4) & 0x7)
425 #define B7_4 ((ctx->opcode >> 4) & 0xf)
426 #define B7_0 (ctx->opcode & 0xff)
427 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
428 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
429 (ctx->opcode & 0xfff))
430 #define B11_8 ((ctx->opcode >> 8) & 0xf)
431 #define B15_12 ((ctx->opcode >> 12) & 0xf)
433 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
434 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
436 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
437 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
439 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
440 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
441 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
442 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
444 #define CHECK_NOT_DELAY_SLOT \
445 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
447 gen_helper_raise_slot_illegal_instruction(); \
448 ctx->bstate = BS_EXCP; \
452 #define CHECK_PRIVILEGED \
453 if (IS_USER(ctx)) { \
454 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
455 gen_helper_raise_slot_illegal_instruction(); \
457 gen_helper_raise_illegal_instruction(); \
459 ctx->bstate = BS_EXCP; \
463 #define CHECK_FPU_ENABLED \
464 if (ctx->flags & SR_FD) { \
465 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
466 gen_helper_raise_slot_fpu_disable(); \
468 gen_helper_raise_fpu_disable(); \
470 ctx->bstate = BS_EXCP; \
474 static void _decode_opc(DisasContext
* ctx
)
476 /* This code tries to make movcal emulation sufficiently
477 accurate for Linux purposes. This instruction writes
478 memory, and prior to that, always allocates a cache line.
479 It is used in two contexts:
480 - in memcpy, where data is copied in blocks, the first write
481 of to a block uses movca.l for performance.
482 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
483 to flush the cache. Here, the data written by movcal.l is never
484 written to memory, and the data written is just bogus.
486 To simulate this, we simulate movcal.l, we store the value to memory,
487 but we also remember the previous content. If we see ocbi, we check
488 if movcal.l for that address was done previously. If so, the write should
489 not have hit the memory, so we restore the previous content.
490 When we see an instruction that is neither movca.l
491 nor ocbi, the previous content is discarded.
493 To optimize, we only try to flush stores when we're at the start of
494 TB, or if we already saw movca.l in this TB and did not flush stores
498 int opcode
= ctx
->opcode
& 0xf0ff;
499 if (opcode
!= 0x0093 /* ocbi */
500 && opcode
!= 0x00c3 /* movca.l */)
502 gen_helper_discard_movcal_backup ();
508 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
511 switch (ctx
->opcode
) {
512 case 0x0019: /* div0u */
513 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
515 case 0x000b: /* rts */
517 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
518 ctx
->flags
|= DELAY_SLOT
;
519 ctx
->delayed_pc
= (uint32_t) - 1;
521 case 0x0028: /* clrmac */
522 tcg_gen_movi_i32(cpu_mach
, 0);
523 tcg_gen_movi_i32(cpu_macl
, 0);
525 case 0x0048: /* clrs */
526 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
528 case 0x0008: /* clrt */
531 case 0x0038: /* ldtlb */
535 case 0x002b: /* rte */
538 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
539 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
540 ctx
->flags
|= DELAY_SLOT
;
541 ctx
->delayed_pc
= (uint32_t) - 1;
543 case 0x0058: /* sets */
544 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
546 case 0x0018: /* sett */
549 case 0xfbfd: /* frchg */
550 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
551 ctx
->bstate
= BS_STOP
;
553 case 0xf3fd: /* fschg */
554 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
555 ctx
->bstate
= BS_STOP
;
557 case 0x0009: /* nop */
559 case 0x001b: /* sleep */
561 gen_helper_sleep(tcg_const_i32(ctx
->pc
+ 2));
565 switch (ctx
->opcode
& 0xf000) {
566 case 0x1000: /* mov.l Rm,@(disp,Rn) */
568 TCGv addr
= tcg_temp_new();
569 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
570 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
574 case 0x5000: /* mov.l @(disp,Rm),Rn */
576 TCGv addr
= tcg_temp_new();
577 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
578 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
582 case 0xe000: /* mov #imm,Rn */
583 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
585 case 0x9000: /* mov.w @(disp,PC),Rn */
587 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
588 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
592 case 0xd000: /* mov.l @(disp,PC),Rn */
594 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
595 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
599 case 0x7000: /* add #imm,Rn */
600 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
602 case 0xa000: /* bra disp */
604 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
605 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
606 ctx
->flags
|= DELAY_SLOT
;
608 case 0xb000: /* bsr disp */
610 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
611 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
612 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
613 ctx
->flags
|= DELAY_SLOT
;
617 switch (ctx
->opcode
& 0xf00f) {
618 case 0x6003: /* mov Rm,Rn */
619 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
621 case 0x2000: /* mov.b Rm,@Rn */
622 tcg_gen_qemu_st8(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
624 case 0x2001: /* mov.w Rm,@Rn */
625 tcg_gen_qemu_st16(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
627 case 0x2002: /* mov.l Rm,@Rn */
628 tcg_gen_qemu_st32(REG(B7_4
), REG(B11_8
), ctx
->memidx
);
630 case 0x6000: /* mov.b @Rm,Rn */
631 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
633 case 0x6001: /* mov.w @Rm,Rn */
634 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
636 case 0x6002: /* mov.l @Rm,Rn */
637 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
639 case 0x2004: /* mov.b Rm,@-Rn */
641 TCGv addr
= tcg_temp_new();
642 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
643 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
); /* might cause re-execution */
644 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
648 case 0x2005: /* mov.w Rm,@-Rn */
650 TCGv addr
= tcg_temp_new();
651 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
652 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
653 tcg_gen_mov_i32(REG(B11_8
), addr
);
657 case 0x2006: /* mov.l Rm,@-Rn */
659 TCGv addr
= tcg_temp_new();
660 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
661 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
662 tcg_gen_mov_i32(REG(B11_8
), addr
);
665 case 0x6004: /* mov.b @Rm+,Rn */
666 tcg_gen_qemu_ld8s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
668 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
670 case 0x6005: /* mov.w @Rm+,Rn */
671 tcg_gen_qemu_ld16s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
673 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
675 case 0x6006: /* mov.l @Rm+,Rn */
676 tcg_gen_qemu_ld32s(REG(B11_8
), REG(B7_4
), ctx
->memidx
);
678 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
680 case 0x0004: /* mov.b Rm,@(R0,Rn) */
682 TCGv addr
= tcg_temp_new();
683 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
684 tcg_gen_qemu_st8(REG(B7_4
), addr
, ctx
->memidx
);
688 case 0x0005: /* mov.w Rm,@(R0,Rn) */
690 TCGv addr
= tcg_temp_new();
691 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
692 tcg_gen_qemu_st16(REG(B7_4
), addr
, ctx
->memidx
);
696 case 0x0006: /* mov.l Rm,@(R0,Rn) */
698 TCGv addr
= tcg_temp_new();
699 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
700 tcg_gen_qemu_st32(REG(B7_4
), addr
, ctx
->memidx
);
704 case 0x000c: /* mov.b @(R0,Rm),Rn */
706 TCGv addr
= tcg_temp_new();
707 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
708 tcg_gen_qemu_ld8s(REG(B11_8
), addr
, ctx
->memidx
);
712 case 0x000d: /* mov.w @(R0,Rm),Rn */
714 TCGv addr
= tcg_temp_new();
715 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
716 tcg_gen_qemu_ld16s(REG(B11_8
), addr
, ctx
->memidx
);
720 case 0x000e: /* mov.l @(R0,Rm),Rn */
722 TCGv addr
= tcg_temp_new();
723 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
724 tcg_gen_qemu_ld32s(REG(B11_8
), addr
, ctx
->memidx
);
728 case 0x6008: /* swap.b Rm,Rn */
731 high
= tcg_temp_new();
732 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
733 low
= tcg_temp_new();
734 tcg_gen_ext16u_i32(low
, REG(B7_4
));
735 tcg_gen_bswap16_i32(low
, low
);
736 tcg_gen_or_i32(REG(B11_8
), high
, low
);
741 case 0x6009: /* swap.w Rm,Rn */
744 high
= tcg_temp_new();
745 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
746 low
= tcg_temp_new();
747 tcg_gen_shri_i32(low
, REG(B7_4
), 16);
748 tcg_gen_ext16u_i32(low
, low
);
749 tcg_gen_or_i32(REG(B11_8
), high
, low
);
754 case 0x200d: /* xtrct Rm,Rn */
757 high
= tcg_temp_new();
758 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
759 low
= tcg_temp_new();
760 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
761 tcg_gen_ext16u_i32(low
, low
);
762 tcg_gen_or_i32(REG(B11_8
), high
, low
);
767 case 0x300c: /* add Rm,Rn */
768 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
770 case 0x300e: /* addc Rm,Rn */
771 gen_helper_addc(REG(B11_8
), REG(B7_4
), REG(B11_8
));
773 case 0x300f: /* addv Rm,Rn */
774 gen_helper_addv(REG(B11_8
), REG(B7_4
), REG(B11_8
));
776 case 0x2009: /* and Rm,Rn */
777 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
779 case 0x3000: /* cmp/eq Rm,Rn */
780 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
782 case 0x3003: /* cmp/ge Rm,Rn */
783 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
785 case 0x3007: /* cmp/gt Rm,Rn */
786 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
788 case 0x3006: /* cmp/hi Rm,Rn */
789 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
791 case 0x3002: /* cmp/hs Rm,Rn */
792 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
794 case 0x200c: /* cmp/str Rm,Rn */
796 TCGv cmp1
= tcg_temp_new();
797 TCGv cmp2
= tcg_temp_new();
798 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
799 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
800 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
801 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
802 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
803 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
804 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
805 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
806 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
807 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
808 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
809 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
810 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
811 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
816 case 0x2007: /* div0s Rm,Rn */
818 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
819 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
820 TCGv val
= tcg_temp_new();
821 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
822 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
826 case 0x3004: /* div1 Rm,Rn */
827 gen_helper_div1(REG(B11_8
), REG(B7_4
), REG(B11_8
));
829 case 0x300d: /* dmuls.l Rm,Rn */
831 TCGv_i64 tmp1
= tcg_temp_new_i64();
832 TCGv_i64 tmp2
= tcg_temp_new_i64();
834 tcg_gen_ext_i32_i64(tmp1
, REG(B7_4
));
835 tcg_gen_ext_i32_i64(tmp2
, REG(B11_8
));
836 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
837 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
838 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
839 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
841 tcg_temp_free_i64(tmp2
);
842 tcg_temp_free_i64(tmp1
);
845 case 0x3005: /* dmulu.l Rm,Rn */
847 TCGv_i64 tmp1
= tcg_temp_new_i64();
848 TCGv_i64 tmp2
= tcg_temp_new_i64();
850 tcg_gen_extu_i32_i64(tmp1
, REG(B7_4
));
851 tcg_gen_extu_i32_i64(tmp2
, REG(B11_8
));
852 tcg_gen_mul_i64(tmp1
, tmp1
, tmp2
);
853 tcg_gen_trunc_i64_i32(cpu_macl
, tmp1
);
854 tcg_gen_shri_i64(tmp1
, tmp1
, 32);
855 tcg_gen_trunc_i64_i32(cpu_mach
, tmp1
);
857 tcg_temp_free_i64(tmp2
);
858 tcg_temp_free_i64(tmp1
);
861 case 0x600e: /* exts.b Rm,Rn */
862 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
864 case 0x600f: /* exts.w Rm,Rn */
865 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
867 case 0x600c: /* extu.b Rm,Rn */
868 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
870 case 0x600d: /* extu.w Rm,Rn */
871 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
873 case 0x000f: /* mac.l @Rm+,@Rn+ */
876 arg0
= tcg_temp_new();
877 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
878 arg1
= tcg_temp_new();
879 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
880 gen_helper_macl(arg0
, arg1
);
883 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
884 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
887 case 0x400f: /* mac.w @Rm+,@Rn+ */
890 arg0
= tcg_temp_new();
891 tcg_gen_qemu_ld32s(arg0
, REG(B7_4
), ctx
->memidx
);
892 arg1
= tcg_temp_new();
893 tcg_gen_qemu_ld32s(arg1
, REG(B11_8
), ctx
->memidx
);
894 gen_helper_macw(arg0
, arg1
);
897 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
898 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
901 case 0x0007: /* mul.l Rm,Rn */
902 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
904 case 0x200f: /* muls.w Rm,Rn */
907 arg0
= tcg_temp_new();
908 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
909 arg1
= tcg_temp_new();
910 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
911 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
916 case 0x200e: /* mulu.w Rm,Rn */
919 arg0
= tcg_temp_new();
920 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
921 arg1
= tcg_temp_new();
922 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
923 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
928 case 0x600b: /* neg Rm,Rn */
929 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
931 case 0x600a: /* negc Rm,Rn */
935 tcg_gen_neg_i32(t0
, REG(B7_4
));
937 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
938 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
939 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
940 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
941 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
942 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
943 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
948 case 0x6007: /* not Rm,Rn */
949 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
951 case 0x200b: /* or Rm,Rn */
952 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
954 case 0x400c: /* shad Rm,Rn */
956 int label1
= gen_new_label();
957 int label2
= gen_new_label();
958 int label3
= gen_new_label();
959 int label4
= gen_new_label();
961 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
962 /* Rm positive, shift to the left */
963 shift
= tcg_temp_new();
964 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
965 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
966 tcg_temp_free(shift
);
968 /* Rm negative, shift to the right */
969 gen_set_label(label1
);
970 shift
= tcg_temp_new();
971 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
972 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
973 tcg_gen_not_i32(shift
, REG(B7_4
));
974 tcg_gen_andi_i32(shift
, shift
, 0x1f);
975 tcg_gen_addi_i32(shift
, shift
, 1);
976 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
977 tcg_temp_free(shift
);
980 gen_set_label(label2
);
981 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
982 tcg_gen_movi_i32(REG(B11_8
), 0);
984 gen_set_label(label3
);
985 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
986 gen_set_label(label4
);
989 case 0x400d: /* shld Rm,Rn */
991 int label1
= gen_new_label();
992 int label2
= gen_new_label();
993 int label3
= gen_new_label();
995 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
996 /* Rm positive, shift to the left */
997 shift
= tcg_temp_new();
998 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
999 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
1000 tcg_temp_free(shift
);
1002 /* Rm negative, shift to the right */
1003 gen_set_label(label1
);
1004 shift
= tcg_temp_new();
1005 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
1006 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
1007 tcg_gen_not_i32(shift
, REG(B7_4
));
1008 tcg_gen_andi_i32(shift
, shift
, 0x1f);
1009 tcg_gen_addi_i32(shift
, shift
, 1);
1010 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
1011 tcg_temp_free(shift
);
1014 gen_set_label(label2
);
1015 tcg_gen_movi_i32(REG(B11_8
), 0);
1016 gen_set_label(label3
);
1019 case 0x3008: /* sub Rm,Rn */
1020 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1022 case 0x300a: /* subc Rm,Rn */
1023 gen_helper_subc(REG(B11_8
), REG(B7_4
), REG(B11_8
));
1025 case 0x300b: /* subv Rm,Rn */
1026 gen_helper_subv(REG(B11_8
), REG(B7_4
), REG(B11_8
));
1028 case 0x2008: /* tst Rm,Rn */
1030 TCGv val
= tcg_temp_new();
1031 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
1032 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1036 case 0x200a: /* xor Rm,Rn */
1037 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
1039 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1041 if (ctx
->fpscr
& FPSCR_SZ
) {
1042 TCGv_i64 fp
= tcg_temp_new_i64();
1043 gen_load_fpr64(fp
, XREG(B7_4
));
1044 gen_store_fpr64(fp
, XREG(B11_8
));
1045 tcg_temp_free_i64(fp
);
1047 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1050 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1052 if (ctx
->fpscr
& FPSCR_SZ
) {
1053 TCGv addr_hi
= tcg_temp_new();
1054 int fr
= XREG(B7_4
);
1055 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
1056 tcg_gen_qemu_st32(cpu_fregs
[fr
], REG(B11_8
), ctx
->memidx
);
1057 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1058 tcg_temp_free(addr_hi
);
1060 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
), ctx
->memidx
);
1063 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1065 if (ctx
->fpscr
& FPSCR_SZ
) {
1066 TCGv addr_hi
= tcg_temp_new();
1067 int fr
= XREG(B11_8
);
1068 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1069 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1070 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1071 tcg_temp_free(addr_hi
);
1073 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1076 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1078 if (ctx
->fpscr
& FPSCR_SZ
) {
1079 TCGv addr_hi
= tcg_temp_new();
1080 int fr
= XREG(B11_8
);
1081 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1082 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
);
1083 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
);
1084 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1085 tcg_temp_free(addr_hi
);
1087 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], REG(B7_4
), ctx
->memidx
);
1088 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1091 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1093 if (ctx
->fpscr
& FPSCR_SZ
) {
1094 TCGv addr
= tcg_temp_new_i32();
1095 int fr
= XREG(B7_4
);
1096 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1097 tcg_gen_qemu_st32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1098 tcg_gen_subi_i32(addr
, addr
, 4);
1099 tcg_gen_qemu_st32(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1100 tcg_gen_mov_i32(REG(B11_8
), addr
);
1101 tcg_temp_free(addr
);
1104 addr
= tcg_temp_new_i32();
1105 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1106 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1107 tcg_gen_mov_i32(REG(B11_8
), addr
);
1108 tcg_temp_free(addr
);
1111 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1114 TCGv addr
= tcg_temp_new_i32();
1115 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1116 if (ctx
->fpscr
& FPSCR_SZ
) {
1117 int fr
= XREG(B11_8
);
1118 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1119 tcg_gen_addi_i32(addr
, addr
, 4);
1120 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1122 tcg_gen_qemu_ld32u(cpu_fregs
[FREG(B11_8
)], addr
, ctx
->memidx
);
1124 tcg_temp_free(addr
);
1127 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1130 TCGv addr
= tcg_temp_new();
1131 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1132 if (ctx
->fpscr
& FPSCR_SZ
) {
1133 int fr
= XREG(B7_4
);
1134 tcg_gen_qemu_ld32u(cpu_fregs
[fr
], addr
, ctx
->memidx
);
1135 tcg_gen_addi_i32(addr
, addr
, 4);
1136 tcg_gen_qemu_ld32u(cpu_fregs
[fr
+1], addr
, ctx
->memidx
);
1138 tcg_gen_qemu_st32(cpu_fregs
[FREG(B7_4
)], addr
, ctx
->memidx
);
1140 tcg_temp_free(addr
);
1143 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1144 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1145 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1146 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1147 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1148 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1151 if (ctx
->fpscr
& FPSCR_PR
) {
1154 if (ctx
->opcode
& 0x0110)
1155 break; /* illegal instruction */
1156 fp0
= tcg_temp_new_i64();
1157 fp1
= tcg_temp_new_i64();
1158 gen_load_fpr64(fp0
, DREG(B11_8
));
1159 gen_load_fpr64(fp1
, DREG(B7_4
));
1160 switch (ctx
->opcode
& 0xf00f) {
1161 case 0xf000: /* fadd Rm,Rn */
1162 gen_helper_fadd_DT(fp0
, fp0
, fp1
);
1164 case 0xf001: /* fsub Rm,Rn */
1165 gen_helper_fsub_DT(fp0
, fp0
, fp1
);
1167 case 0xf002: /* fmul Rm,Rn */
1168 gen_helper_fmul_DT(fp0
, fp0
, fp1
);
1170 case 0xf003: /* fdiv Rm,Rn */
1171 gen_helper_fdiv_DT(fp0
, fp0
, fp1
);
1173 case 0xf004: /* fcmp/eq Rm,Rn */
1174 gen_helper_fcmp_eq_DT(fp0
, fp1
);
1176 case 0xf005: /* fcmp/gt Rm,Rn */
1177 gen_helper_fcmp_gt_DT(fp0
, fp1
);
1180 gen_store_fpr64(fp0
, DREG(B11_8
));
1181 tcg_temp_free_i64(fp0
);
1182 tcg_temp_free_i64(fp1
);
1184 switch (ctx
->opcode
& 0xf00f) {
1185 case 0xf000: /* fadd Rm,Rn */
1186 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1188 case 0xf001: /* fsub Rm,Rn */
1189 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1191 case 0xf002: /* fmul Rm,Rn */
1192 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1194 case 0xf003: /* fdiv Rm,Rn */
1195 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1197 case 0xf004: /* fcmp/eq Rm,Rn */
1198 gen_helper_fcmp_eq_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1200 case 0xf005: /* fcmp/gt Rm,Rn */
1201 gen_helper_fcmp_gt_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
1207 case 0xf00e: /* fmac FR0,RM,Rn */
1210 if (ctx
->fpscr
& FPSCR_PR
) {
1211 break; /* illegal instruction */
1213 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)],
1214 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)], cpu_fregs
[FREG(B11_8
)]);
1220 switch (ctx
->opcode
& 0xff00) {
1221 case 0xc900: /* and #imm,R0 */
1222 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1224 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1227 addr
= tcg_temp_new();
1228 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1229 val
= tcg_temp_new();
1230 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1231 tcg_gen_andi_i32(val
, val
, B7_0
);
1232 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1234 tcg_temp_free(addr
);
1237 case 0x8b00: /* bf label */
1238 CHECK_NOT_DELAY_SLOT
1239 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1240 ctx
->pc
+ 4 + B7_0s
* 2);
1241 ctx
->bstate
= BS_BRANCH
;
1243 case 0x8f00: /* bf/s label */
1244 CHECK_NOT_DELAY_SLOT
1245 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1246 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1248 case 0x8900: /* bt label */
1249 CHECK_NOT_DELAY_SLOT
1250 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1252 ctx
->bstate
= BS_BRANCH
;
1254 case 0x8d00: /* bt/s label */
1255 CHECK_NOT_DELAY_SLOT
1256 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1257 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1259 case 0x8800: /* cmp/eq #imm,R0 */
1260 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1262 case 0xc400: /* mov.b @(disp,GBR),R0 */
1264 TCGv addr
= tcg_temp_new();
1265 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1266 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1267 tcg_temp_free(addr
);
1270 case 0xc500: /* mov.w @(disp,GBR),R0 */
1272 TCGv addr
= tcg_temp_new();
1273 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1274 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1275 tcg_temp_free(addr
);
1278 case 0xc600: /* mov.l @(disp,GBR),R0 */
1280 TCGv addr
= tcg_temp_new();
1281 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1282 tcg_gen_qemu_ld32s(REG(0), addr
, ctx
->memidx
);
1283 tcg_temp_free(addr
);
1286 case 0xc000: /* mov.b R0,@(disp,GBR) */
1288 TCGv addr
= tcg_temp_new();
1289 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1290 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1291 tcg_temp_free(addr
);
1294 case 0xc100: /* mov.w R0,@(disp,GBR) */
1296 TCGv addr
= tcg_temp_new();
1297 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1298 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1299 tcg_temp_free(addr
);
1302 case 0xc200: /* mov.l R0,@(disp,GBR) */
1304 TCGv addr
= tcg_temp_new();
1305 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1306 tcg_gen_qemu_st32(REG(0), addr
, ctx
->memidx
);
1307 tcg_temp_free(addr
);
1310 case 0x8000: /* mov.b R0,@(disp,Rn) */
1312 TCGv addr
= tcg_temp_new();
1313 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1314 tcg_gen_qemu_st8(REG(0), addr
, ctx
->memidx
);
1315 tcg_temp_free(addr
);
1318 case 0x8100: /* mov.w R0,@(disp,Rn) */
1320 TCGv addr
= tcg_temp_new();
1321 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1322 tcg_gen_qemu_st16(REG(0), addr
, ctx
->memidx
);
1323 tcg_temp_free(addr
);
1326 case 0x8400: /* mov.b @(disp,Rn),R0 */
1328 TCGv addr
= tcg_temp_new();
1329 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1330 tcg_gen_qemu_ld8s(REG(0), addr
, ctx
->memidx
);
1331 tcg_temp_free(addr
);
1334 case 0x8500: /* mov.w @(disp,Rn),R0 */
1336 TCGv addr
= tcg_temp_new();
1337 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1338 tcg_gen_qemu_ld16s(REG(0), addr
, ctx
->memidx
);
1339 tcg_temp_free(addr
);
1342 case 0xc700: /* mova @(disp,PC),R0 */
1343 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1345 case 0xcb00: /* or #imm,R0 */
1346 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1348 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1351 addr
= tcg_temp_new();
1352 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1353 val
= tcg_temp_new();
1354 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1355 tcg_gen_ori_i32(val
, val
, B7_0
);
1356 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1358 tcg_temp_free(addr
);
1361 case 0xc300: /* trapa #imm */
1364 CHECK_NOT_DELAY_SLOT
1365 imm
= tcg_const_i32(B7_0
);
1366 gen_helper_trapa(imm
);
1368 ctx
->bstate
= BS_BRANCH
;
1371 case 0xc800: /* tst #imm,R0 */
1373 TCGv val
= tcg_temp_new();
1374 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1375 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1379 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1381 TCGv val
= tcg_temp_new();
1382 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1383 tcg_gen_qemu_ld8u(val
, val
, ctx
->memidx
);
1384 tcg_gen_andi_i32(val
, val
, B7_0
);
1385 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1389 case 0xca00: /* xor #imm,R0 */
1390 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1392 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1395 addr
= tcg_temp_new();
1396 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1397 val
= tcg_temp_new();
1398 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1399 tcg_gen_xori_i32(val
, val
, B7_0
);
1400 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1402 tcg_temp_free(addr
);
1407 switch (ctx
->opcode
& 0xf08f) {
1408 case 0x408e: /* ldc Rm,Rn_BANK */
1410 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1412 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1414 tcg_gen_qemu_ld32s(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
);
1415 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1417 case 0x0082: /* stc Rm_BANK,Rn */
1419 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1421 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1424 TCGv addr
= tcg_temp_new();
1425 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1426 tcg_gen_qemu_st32(ALTREG(B6_4
), addr
, ctx
->memidx
);
1427 tcg_gen_mov_i32(REG(B11_8
), addr
);
1428 tcg_temp_free(addr
);
1433 switch (ctx
->opcode
& 0xf0ff) {
1434 case 0x0023: /* braf Rn */
1435 CHECK_NOT_DELAY_SLOT
1436 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1437 ctx
->flags
|= DELAY_SLOT
;
1438 ctx
->delayed_pc
= (uint32_t) - 1;
1440 case 0x0003: /* bsrf Rn */
1441 CHECK_NOT_DELAY_SLOT
1442 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1443 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1444 ctx
->flags
|= DELAY_SLOT
;
1445 ctx
->delayed_pc
= (uint32_t) - 1;
1447 case 0x4015: /* cmp/pl Rn */
1448 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1450 case 0x4011: /* cmp/pz Rn */
1451 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1453 case 0x4010: /* dt Rn */
1454 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1455 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1457 case 0x402b: /* jmp @Rn */
1458 CHECK_NOT_DELAY_SLOT
1459 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1460 ctx
->flags
|= DELAY_SLOT
;
1461 ctx
->delayed_pc
= (uint32_t) - 1;
1463 case 0x400b: /* jsr @Rn */
1464 CHECK_NOT_DELAY_SLOT
1465 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1466 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1467 ctx
->flags
|= DELAY_SLOT
;
1468 ctx
->delayed_pc
= (uint32_t) - 1;
1470 case 0x400e: /* ldc Rm,SR */
1472 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1473 ctx
->bstate
= BS_STOP
;
1475 case 0x4007: /* ldc.l @Rm+,SR */
1478 TCGv val
= tcg_temp_new();
1479 tcg_gen_qemu_ld32s(val
, REG(B11_8
), ctx
->memidx
);
1480 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1482 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1483 ctx
->bstate
= BS_STOP
;
1486 case 0x0002: /* stc SR,Rn */
1488 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1490 case 0x4003: /* stc SR,@-Rn */
1493 TCGv addr
= tcg_temp_new();
1494 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1495 tcg_gen_qemu_st32(cpu_sr
, addr
, ctx
->memidx
);
1496 tcg_gen_mov_i32(REG(B11_8
), addr
);
1497 tcg_temp_free(addr
);
1500 #define LD(reg,ldnum,ldpnum,prechk) \
1503 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1507 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1508 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1510 #define ST(reg,stnum,stpnum,prechk) \
1513 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1518 TCGv addr = tcg_temp_new(); \
1519 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1520 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1521 tcg_gen_mov_i32(REG(B11_8), addr); \
1522 tcg_temp_free(addr); \
1525 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1526 LD(reg,ldnum,ldpnum,prechk) \
1527 ST(reg,stnum,stpnum,prechk)
1528 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1529 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1530 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1531 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1532 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1533 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1534 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1535 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1536 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1537 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1538 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1539 case 0x406a: /* lds Rm,FPSCR */
1541 gen_helper_ld_fpscr(REG(B11_8
));
1542 ctx
->bstate
= BS_STOP
;
1544 case 0x4066: /* lds.l @Rm+,FPSCR */
1547 TCGv addr
= tcg_temp_new();
1548 tcg_gen_qemu_ld32s(addr
, REG(B11_8
), ctx
->memidx
);
1549 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1550 gen_helper_ld_fpscr(addr
);
1551 tcg_temp_free(addr
);
1552 ctx
->bstate
= BS_STOP
;
1555 case 0x006a: /* sts FPSCR,Rn */
1557 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1559 case 0x4062: /* sts FPSCR,@-Rn */
1563 val
= tcg_temp_new();
1564 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1565 addr
= tcg_temp_new();
1566 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1567 tcg_gen_qemu_st32(val
, addr
, ctx
->memidx
);
1568 tcg_gen_mov_i32(REG(B11_8
), addr
);
1569 tcg_temp_free(addr
);
1573 case 0x00c3: /* movca.l R0,@Rm */
1575 TCGv val
= tcg_temp_new();
1576 tcg_gen_qemu_ld32u(val
, REG(B11_8
), ctx
->memidx
);
1577 gen_helper_movcal (REG(B11_8
), val
);
1578 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1580 ctx
->has_movcal
= 1;
1583 /* MOVUA.L @Rm,R0 (Rm) -> R0
1584 Load non-boundary-aligned data */
1585 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1588 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1589 Load non-boundary-aligned data */
1590 tcg_gen_qemu_ld32u(REG(0), REG(B11_8
), ctx
->memidx
);
1591 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1593 case 0x0029: /* movt Rn */
1594 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1599 If (T == 1) R0 -> (Rn)
1602 if (ctx
->features
& SH_FEATURE_SH4A
) {
1603 int label
= gen_new_label();
1605 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1606 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1607 tcg_gen_qemu_st32(REG(0), REG(B11_8
), ctx
->memidx
);
1608 gen_set_label(label
);
1609 tcg_gen_movi_i32(cpu_ldst
, 0);
1617 When interrupt/exception
1620 if (ctx
->features
& SH_FEATURE_SH4A
) {
1621 tcg_gen_movi_i32(cpu_ldst
, 0);
1622 tcg_gen_qemu_ld32s(REG(0), REG(B11_8
), ctx
->memidx
);
1623 tcg_gen_movi_i32(cpu_ldst
, 1);
1627 case 0x0093: /* ocbi @Rn */
1629 gen_helper_ocbi (REG(B11_8
));
1632 case 0x00a3: /* ocbp @Rn */
1633 case 0x00b3: /* ocbwb @Rn */
1634 /* These instructions are supposed to do nothing in case of
1635 a cache miss. Given that we only partially emulate caches
1636 it is safe to simply ignore them. */
1638 case 0x0083: /* pref @Rn */
1640 case 0x00d3: /* prefi @Rn */
1641 if (ctx
->features
& SH_FEATURE_SH4A
)
1645 case 0x00e3: /* icbi @Rn */
1646 if (ctx
->features
& SH_FEATURE_SH4A
)
1650 case 0x00ab: /* synco */
1651 if (ctx
->features
& SH_FEATURE_SH4A
)
1655 case 0x4024: /* rotcl Rn */
1657 TCGv tmp
= tcg_temp_new();
1658 tcg_gen_mov_i32(tmp
, cpu_sr
);
1659 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1660 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1661 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1665 case 0x4025: /* rotcr Rn */
1667 TCGv tmp
= tcg_temp_new();
1668 tcg_gen_mov_i32(tmp
, cpu_sr
);
1669 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1670 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1671 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1675 case 0x4004: /* rotl Rn */
1676 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1677 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1679 case 0x4005: /* rotr Rn */
1680 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1681 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1683 case 0x4000: /* shll Rn */
1684 case 0x4020: /* shal Rn */
1685 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1686 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1688 case 0x4021: /* shar Rn */
1689 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1690 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1692 case 0x4001: /* shlr Rn */
1693 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1694 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1696 case 0x4008: /* shll2 Rn */
1697 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1699 case 0x4018: /* shll8 Rn */
1700 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1702 case 0x4028: /* shll16 Rn */
1703 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1705 case 0x4009: /* shlr2 Rn */
1706 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1708 case 0x4019: /* shlr8 Rn */
1709 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1711 case 0x4029: /* shlr16 Rn */
1712 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1714 case 0x401b: /* tas.b @Rn */
1717 addr
= tcg_temp_local_new();
1718 tcg_gen_mov_i32(addr
, REG(B11_8
));
1719 val
= tcg_temp_local_new();
1720 tcg_gen_qemu_ld8u(val
, addr
, ctx
->memidx
);
1721 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1722 tcg_gen_ori_i32(val
, val
, 0x80);
1723 tcg_gen_qemu_st8(val
, addr
, ctx
->memidx
);
1725 tcg_temp_free(addr
);
1728 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1730 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1732 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1734 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1736 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1738 if (ctx
->fpscr
& FPSCR_PR
) {
1740 if (ctx
->opcode
& 0x0100)
1741 break; /* illegal instruction */
1742 fp
= tcg_temp_new_i64();
1743 gen_helper_float_DT(fp
, cpu_fpul
);
1744 gen_store_fpr64(fp
, DREG(B11_8
));
1745 tcg_temp_free_i64(fp
);
1748 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1751 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1753 if (ctx
->fpscr
& FPSCR_PR
) {
1755 if (ctx
->opcode
& 0x0100)
1756 break; /* illegal instruction */
1757 fp
= tcg_temp_new_i64();
1758 gen_load_fpr64(fp
, DREG(B11_8
));
1759 gen_helper_ftrc_DT(cpu_fpul
, fp
);
1760 tcg_temp_free_i64(fp
);
1763 gen_helper_ftrc_FT(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1766 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1769 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1772 case 0xf05d: /* fabs FRn/DRn */
1774 if (ctx
->fpscr
& FPSCR_PR
) {
1775 if (ctx
->opcode
& 0x0100)
1776 break; /* illegal instruction */
1777 TCGv_i64 fp
= tcg_temp_new_i64();
1778 gen_load_fpr64(fp
, DREG(B11_8
));
1779 gen_helper_fabs_DT(fp
, fp
);
1780 gen_store_fpr64(fp
, DREG(B11_8
));
1781 tcg_temp_free_i64(fp
);
1783 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1786 case 0xf06d: /* fsqrt FRn */
1788 if (ctx
->fpscr
& FPSCR_PR
) {
1789 if (ctx
->opcode
& 0x0100)
1790 break; /* illegal instruction */
1791 TCGv_i64 fp
= tcg_temp_new_i64();
1792 gen_load_fpr64(fp
, DREG(B11_8
));
1793 gen_helper_fsqrt_DT(fp
, fp
);
1794 gen_store_fpr64(fp
, DREG(B11_8
));
1795 tcg_temp_free_i64(fp
);
1797 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1800 case 0xf07d: /* fsrra FRn */
1803 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1805 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1806 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1809 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1811 if (!(ctx
->fpscr
& FPSCR_PR
)) {
1812 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1815 case 0xf0ad: /* fcnvsd FPUL,DRn */
1818 TCGv_i64 fp
= tcg_temp_new_i64();
1819 gen_helper_fcnvsd_FT_DT(fp
, cpu_fpul
);
1820 gen_store_fpr64(fp
, DREG(B11_8
));
1821 tcg_temp_free_i64(fp
);
1824 case 0xf0bd: /* fcnvds DRn,FPUL */
1827 TCGv_i64 fp
= tcg_temp_new_i64();
1828 gen_load_fpr64(fp
, DREG(B11_8
));
1829 gen_helper_fcnvds_DT_FT(cpu_fpul
, fp
);
1830 tcg_temp_free_i64(fp
);
1833 case 0xf0ed: /* fipr FVm,FVn */
1835 if ((ctx
->fpscr
& FPSCR_PR
) == 0) {
1837 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1838 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1839 gen_helper_fipr(m
, n
);
1845 case 0xf0fd: /* ftrv XMTRX,FVn */
1847 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1848 (ctx
->fpscr
& FPSCR_PR
) == 0) {
1850 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1858 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1859 ctx
->opcode
, ctx
->pc
);
1862 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1863 gen_helper_raise_slot_illegal_instruction();
1865 gen_helper_raise_illegal_instruction();
1867 ctx
->bstate
= BS_EXCP
;
1870 static void decode_opc(DisasContext
* ctx
)
1872 uint32_t old_flags
= ctx
->flags
;
1874 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
1875 tcg_gen_debug_insn_start(ctx
->pc
);
1880 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1881 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1884 /* go out of the delay slot */
1885 uint32_t new_flags
= ctx
->flags
;
1886 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1887 gen_store_flags(new_flags
);
1890 ctx
->bstate
= BS_BRANCH
;
1891 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1892 gen_delayed_conditional_jump(ctx
);
1893 } else if (old_flags
& DELAY_SLOT
) {
1899 /* go into a delay slot */
1900 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1901 gen_store_flags(ctx
->flags
);
1905 gen_intermediate_code_internal(CPUSH4State
* env
, TranslationBlock
* tb
,
1909 target_ulong pc_start
;
1910 static uint16_t *gen_opc_end
;
1917 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1919 ctx
.flags
= (uint32_t)tb
->flags
;
1920 ctx
.bstate
= BS_NONE
;
1922 ctx
.fpscr
= env
->fpscr
;
1923 ctx
.memidx
= (env
->sr
& SR_MD
) == 0 ? 1 : 0;
1924 /* We don't know if the delayed pc came from a dynamic or static branch,
1925 so assume it is a dynamic branch. */
1926 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1928 ctx
.singlestep_enabled
= env
->singlestep_enabled
;
1929 ctx
.features
= env
->features
;
1930 ctx
.has_movcal
= (tb
->flags
& TB_FLAG_PENDING_MOVCA
);
1934 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1936 max_insns
= CF_COUNT_MASK
;
1938 while (ctx
.bstate
== BS_NONE
&& gen_opc_ptr
< gen_opc_end
) {
1939 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
1940 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1941 if (ctx
.pc
== bp
->pc
) {
1942 /* We have hit a breakpoint - make sure PC is up-to-date */
1943 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1945 ctx
.bstate
= BS_EXCP
;
1951 i
= gen_opc_ptr
- gen_opc_buf
;
1955 gen_opc_instr_start
[ii
++] = 0;
1957 gen_opc_pc
[ii
] = ctx
.pc
;
1958 gen_opc_hflags
[ii
] = ctx
.flags
;
1959 gen_opc_instr_start
[ii
] = 1;
1960 gen_opc_icount
[ii
] = num_insns
;
1962 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1965 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1968 ctx
.opcode
= lduw_code(ctx
.pc
);
1972 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1974 if (env
->singlestep_enabled
)
1976 if (num_insns
>= max_insns
)
1981 if (tb
->cflags
& CF_LAST_IO
)
1983 if (env
->singlestep_enabled
) {
1984 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1987 switch (ctx
.bstate
) {
1989 /* gen_op_interrupt_restart(); */
1993 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1995 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1998 /* gen_op_interrupt_restart(); */
2007 gen_icount_end(tb
, num_insns
);
2008 *gen_opc_ptr
= INDEX_op_end
;
2010 i
= gen_opc_ptr
- gen_opc_buf
;
2013 gen_opc_instr_start
[ii
++] = 0;
2015 tb
->size
= ctx
.pc
- pc_start
;
2016 tb
->icount
= num_insns
;
2020 #ifdef SH4_DEBUG_DISAS
2021 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "\n");
2023 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2024 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2025 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 0);
2031 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2033 gen_intermediate_code_internal(env
, tb
, 0);
2036 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2038 gen_intermediate_code_internal(env
, tb
, 1);
2041 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
2043 env
->pc
= gen_opc_pc
[pc_pos
];
2044 env
->flags
= gen_opc_hflags
[pc_pos
];