4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 //#define SH4_SINGLE_STEP
24 #include "disas/disas.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 typedef struct DisasContext
{
31 struct TranslationBlock
*tb
;
38 int singlestep_enabled
;
43 #if defined(CONFIG_USER_ONLY)
44 #define IS_USER(ctx) 1
46 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
50 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
53 BS_STOP
= 1, /* We want to stop translation for any reason */
54 BS_BRANCH
= 2, /* We reached a branch condition */
55 BS_EXCP
= 3, /* We reached an exception condition */
58 /* global register indexes */
59 static TCGv_ptr cpu_env
;
60 static TCGv cpu_gregs
[24];
61 static TCGv cpu_pc
, cpu_sr
, cpu_ssr
, cpu_spc
, cpu_gbr
;
62 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
63 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
64 static TCGv cpu_fregs
[32];
66 /* internal register indexes */
67 static TCGv cpu_flags
, cpu_delayed_pc
;
69 static uint32_t gen_opc_hflags
[OPC_BUF_SIZE
];
71 #include "exec/gen-icount.h"
73 void sh4_translate_init(void)
76 static int done_init
= 0;
77 static const char * const gregnames
[24] = {
78 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
79 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
80 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
81 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
82 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
84 static const char * const fregnames
[32] = {
85 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
86 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
87 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
88 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
89 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
90 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
91 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
92 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
98 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
100 for (i
= 0; i
< 24; i
++)
101 cpu_gregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
102 offsetof(CPUSH4State
, gregs
[i
]),
105 cpu_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
106 offsetof(CPUSH4State
, pc
), "PC");
107 cpu_sr
= tcg_global_mem_new_i32(TCG_AREG0
,
108 offsetof(CPUSH4State
, sr
), "SR");
109 cpu_ssr
= tcg_global_mem_new_i32(TCG_AREG0
,
110 offsetof(CPUSH4State
, ssr
), "SSR");
111 cpu_spc
= tcg_global_mem_new_i32(TCG_AREG0
,
112 offsetof(CPUSH4State
, spc
), "SPC");
113 cpu_gbr
= tcg_global_mem_new_i32(TCG_AREG0
,
114 offsetof(CPUSH4State
, gbr
), "GBR");
115 cpu_vbr
= tcg_global_mem_new_i32(TCG_AREG0
,
116 offsetof(CPUSH4State
, vbr
), "VBR");
117 cpu_sgr
= tcg_global_mem_new_i32(TCG_AREG0
,
118 offsetof(CPUSH4State
, sgr
), "SGR");
119 cpu_dbr
= tcg_global_mem_new_i32(TCG_AREG0
,
120 offsetof(CPUSH4State
, dbr
), "DBR");
121 cpu_mach
= tcg_global_mem_new_i32(TCG_AREG0
,
122 offsetof(CPUSH4State
, mach
), "MACH");
123 cpu_macl
= tcg_global_mem_new_i32(TCG_AREG0
,
124 offsetof(CPUSH4State
, macl
), "MACL");
125 cpu_pr
= tcg_global_mem_new_i32(TCG_AREG0
,
126 offsetof(CPUSH4State
, pr
), "PR");
127 cpu_fpscr
= tcg_global_mem_new_i32(TCG_AREG0
,
128 offsetof(CPUSH4State
, fpscr
), "FPSCR");
129 cpu_fpul
= tcg_global_mem_new_i32(TCG_AREG0
,
130 offsetof(CPUSH4State
, fpul
), "FPUL");
132 cpu_flags
= tcg_global_mem_new_i32(TCG_AREG0
,
133 offsetof(CPUSH4State
, flags
), "_flags_");
134 cpu_delayed_pc
= tcg_global_mem_new_i32(TCG_AREG0
,
135 offsetof(CPUSH4State
, delayed_pc
),
137 cpu_ldst
= tcg_global_mem_new_i32(TCG_AREG0
,
138 offsetof(CPUSH4State
, ldst
), "_ldst_");
140 for (i
= 0; i
< 32; i
++)
141 cpu_fregs
[i
] = tcg_global_mem_new_i32(TCG_AREG0
,
142 offsetof(CPUSH4State
, fregs
[i
]),
148 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
149 fprintf_function cpu_fprintf
, int flags
)
151 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
152 CPUSH4State
*env
= &cpu
->env
;
154 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
155 env
->pc
, env
->sr
, env
->pr
, env
->fpscr
);
156 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
157 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
158 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
159 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
160 for (i
= 0; i
< 24; i
+= 4) {
161 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
162 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
163 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
165 if (env
->flags
& DELAY_SLOT
) {
166 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
168 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
169 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
174 static void gen_goto_tb(DisasContext
* ctx
, int n
, target_ulong dest
)
176 TranslationBlock
*tb
;
179 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) &&
180 !ctx
->singlestep_enabled
) {
181 /* Use a direct jump if in same page and singlestep not enabled */
183 tcg_gen_movi_i32(cpu_pc
, dest
);
184 tcg_gen_exit_tb((uintptr_t)tb
+ n
);
186 tcg_gen_movi_i32(cpu_pc
, dest
);
187 if (ctx
->singlestep_enabled
)
188 gen_helper_debug(cpu_env
);
193 static void gen_jump(DisasContext
* ctx
)
195 if (ctx
->delayed_pc
== (uint32_t) - 1) {
196 /* Target is not statically known, it comes necessarily from a
197 delayed jump as immediate jump are conditinal jumps */
198 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
199 if (ctx
->singlestep_enabled
)
200 gen_helper_debug(cpu_env
);
203 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
207 static inline void gen_branch_slot(uint32_t delayed_pc
, int t
)
210 int label
= gen_new_label();
211 tcg_gen_movi_i32(cpu_delayed_pc
, delayed_pc
);
213 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
214 tcg_gen_brcondi_i32(t
? TCG_COND_EQ
:TCG_COND_NE
, sr
, 0, label
);
215 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
216 gen_set_label(label
);
219 /* Immediate conditional jump (bt or bf) */
220 static void gen_conditional_jump(DisasContext
* ctx
,
221 target_ulong ift
, target_ulong ifnott
)
226 l1
= gen_new_label();
228 tcg_gen_andi_i32(sr
, cpu_sr
, SR_T
);
229 tcg_gen_brcondi_i32(TCG_COND_NE
, sr
, 0, l1
);
230 gen_goto_tb(ctx
, 0, ifnott
);
232 gen_goto_tb(ctx
, 1, ift
);
235 /* Delayed conditional jump (bt or bf) */
236 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
241 l1
= gen_new_label();
243 tcg_gen_andi_i32(ds
, cpu_flags
, DELAY_SLOT_TRUE
);
244 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
245 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
247 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, ~DELAY_SLOT_TRUE
);
251 static inline void gen_cmp(int cond
, TCGv t0
, TCGv t1
)
256 tcg_gen_setcond_i32(cond
, t
, t1
, t0
);
257 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
258 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
263 static inline void gen_cmp_imm(int cond
, TCGv t0
, int32_t imm
)
268 tcg_gen_setcondi_i32(cond
, t
, t0
, imm
);
269 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
270 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t
);
275 static inline void gen_store_flags(uint32_t flags
)
277 tcg_gen_andi_i32(cpu_flags
, cpu_flags
, DELAY_SLOT_TRUE
);
278 tcg_gen_ori_i32(cpu_flags
, cpu_flags
, flags
);
281 static inline void gen_copy_bit_i32(TCGv t0
, int p0
, TCGv t1
, int p1
)
283 TCGv tmp
= tcg_temp_new();
288 tcg_gen_andi_i32(tmp
, t1
, (1 << p1
));
289 tcg_gen_andi_i32(t0
, t0
, ~(1 << p0
));
291 tcg_gen_shri_i32(tmp
, tmp
, p1
- p0
);
293 tcg_gen_shli_i32(tmp
, tmp
, p0
- p1
);
294 tcg_gen_or_i32(t0
, t0
, tmp
);
299 static inline void gen_load_fpr64(TCGv_i64 t
, int reg
)
301 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
304 static inline void gen_store_fpr64 (TCGv_i64 t
, int reg
)
306 TCGv_i32 tmp
= tcg_temp_new_i32();
307 tcg_gen_trunc_i64_i32(tmp
, t
);
308 tcg_gen_mov_i32(cpu_fregs
[reg
+ 1], tmp
);
309 tcg_gen_shri_i64(t
, t
, 32);
310 tcg_gen_trunc_i64_i32(tmp
, t
);
311 tcg_gen_mov_i32(cpu_fregs
[reg
], tmp
);
312 tcg_temp_free_i32(tmp
);
315 #define B3_0 (ctx->opcode & 0xf)
316 #define B6_4 ((ctx->opcode >> 4) & 0x7)
317 #define B7_4 ((ctx->opcode >> 4) & 0xf)
318 #define B7_0 (ctx->opcode & 0xff)
319 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
320 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
321 (ctx->opcode & 0xfff))
322 #define B11_8 ((ctx->opcode >> 8) & 0xf)
323 #define B15_12 ((ctx->opcode >> 12) & 0xf)
325 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
326 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
328 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
329 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
331 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
332 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
333 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
334 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
336 #define CHECK_NOT_DELAY_SLOT \
337 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
339 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
340 gen_helper_raise_slot_illegal_instruction(cpu_env); \
341 ctx->bstate = BS_BRANCH; \
345 #define CHECK_PRIVILEGED \
346 if (IS_USER(ctx)) { \
347 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
348 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
349 gen_helper_raise_slot_illegal_instruction(cpu_env); \
351 gen_helper_raise_illegal_instruction(cpu_env); \
353 ctx->bstate = BS_BRANCH; \
357 #define CHECK_FPU_ENABLED \
358 if (ctx->flags & SR_FD) { \
359 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
360 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
361 gen_helper_raise_slot_fpu_disable(cpu_env); \
363 gen_helper_raise_fpu_disable(cpu_env); \
365 ctx->bstate = BS_BRANCH; \
369 static void _decode_opc(DisasContext
* ctx
)
371 /* This code tries to make movcal emulation sufficiently
372 accurate for Linux purposes. This instruction writes
373 memory, and prior to that, always allocates a cache line.
374 It is used in two contexts:
375 - in memcpy, where data is copied in blocks, the first write
376 of to a block uses movca.l for performance.
377 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
378 to flush the cache. Here, the data written by movcal.l is never
379 written to memory, and the data written is just bogus.
381 To simulate this, we simulate movcal.l, we store the value to memory,
382 but we also remember the previous content. If we see ocbi, we check
383 if movcal.l for that address was done previously. If so, the write should
384 not have hit the memory, so we restore the previous content.
385 When we see an instruction that is neither movca.l
386 nor ocbi, the previous content is discarded.
388 To optimize, we only try to flush stores when we're at the start of
389 TB, or if we already saw movca.l in this TB and did not flush stores
393 int opcode
= ctx
->opcode
& 0xf0ff;
394 if (opcode
!= 0x0093 /* ocbi */
395 && opcode
!= 0x00c3 /* movca.l */)
397 gen_helper_discard_movcal_backup(cpu_env
);
403 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
406 switch (ctx
->opcode
) {
407 case 0x0019: /* div0u */
408 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(SR_M
| SR_Q
| SR_T
));
410 case 0x000b: /* rts */
412 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
413 ctx
->flags
|= DELAY_SLOT
;
414 ctx
->delayed_pc
= (uint32_t) - 1;
416 case 0x0028: /* clrmac */
417 tcg_gen_movi_i32(cpu_mach
, 0);
418 tcg_gen_movi_i32(cpu_macl
, 0);
420 case 0x0048: /* clrs */
421 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_S
);
423 case 0x0008: /* clrt */
424 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
426 case 0x0038: /* ldtlb */
428 gen_helper_ldtlb(cpu_env
);
430 case 0x002b: /* rte */
433 tcg_gen_mov_i32(cpu_sr
, cpu_ssr
);
434 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
435 ctx
->flags
|= DELAY_SLOT
;
436 ctx
->delayed_pc
= (uint32_t) - 1;
438 case 0x0058: /* sets */
439 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_S
);
441 case 0x0018: /* sett */
442 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, SR_T
);
444 case 0xfbfd: /* frchg */
445 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
446 ctx
->bstate
= BS_STOP
;
448 case 0xf3fd: /* fschg */
449 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
450 ctx
->bstate
= BS_STOP
;
452 case 0x0009: /* nop */
454 case 0x001b: /* sleep */
456 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
457 gen_helper_sleep(cpu_env
);
461 switch (ctx
->opcode
& 0xf000) {
462 case 0x1000: /* mov.l Rm,@(disp,Rn) */
464 TCGv addr
= tcg_temp_new();
465 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
466 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
470 case 0x5000: /* mov.l @(disp,Rm),Rn */
472 TCGv addr
= tcg_temp_new();
473 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
474 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
478 case 0xe000: /* mov #imm,Rn */
479 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
481 case 0x9000: /* mov.w @(disp,PC),Rn */
483 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
484 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
488 case 0xd000: /* mov.l @(disp,PC),Rn */
490 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
491 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
495 case 0x7000: /* add #imm,Rn */
496 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
498 case 0xa000: /* bra disp */
500 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
501 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
502 ctx
->flags
|= DELAY_SLOT
;
504 case 0xb000: /* bsr disp */
506 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
507 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
508 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
509 ctx
->flags
|= DELAY_SLOT
;
513 switch (ctx
->opcode
& 0xf00f) {
514 case 0x6003: /* mov Rm,Rn */
515 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
517 case 0x2000: /* mov.b Rm,@Rn */
518 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
520 case 0x2001: /* mov.w Rm,@Rn */
521 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
523 case 0x2002: /* mov.l Rm,@Rn */
524 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
526 case 0x6000: /* mov.b @Rm,Rn */
527 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
529 case 0x6001: /* mov.w @Rm,Rn */
530 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
532 case 0x6002: /* mov.l @Rm,Rn */
533 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
535 case 0x2004: /* mov.b Rm,@-Rn */
537 TCGv addr
= tcg_temp_new();
538 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
539 /* might cause re-execution */
540 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
541 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
545 case 0x2005: /* mov.w Rm,@-Rn */
547 TCGv addr
= tcg_temp_new();
548 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
549 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
550 tcg_gen_mov_i32(REG(B11_8
), addr
);
554 case 0x2006: /* mov.l Rm,@-Rn */
556 TCGv addr
= tcg_temp_new();
557 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
558 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
559 tcg_gen_mov_i32(REG(B11_8
), addr
);
562 case 0x6004: /* mov.b @Rm+,Rn */
563 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
565 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
567 case 0x6005: /* mov.w @Rm+,Rn */
568 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
570 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
572 case 0x6006: /* mov.l @Rm+,Rn */
573 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
575 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
577 case 0x0004: /* mov.b Rm,@(R0,Rn) */
579 TCGv addr
= tcg_temp_new();
580 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
581 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
585 case 0x0005: /* mov.w Rm,@(R0,Rn) */
587 TCGv addr
= tcg_temp_new();
588 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
589 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
593 case 0x0006: /* mov.l Rm,@(R0,Rn) */
595 TCGv addr
= tcg_temp_new();
596 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
597 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
601 case 0x000c: /* mov.b @(R0,Rm),Rn */
603 TCGv addr
= tcg_temp_new();
604 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
605 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
609 case 0x000d: /* mov.w @(R0,Rm),Rn */
611 TCGv addr
= tcg_temp_new();
612 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
613 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
617 case 0x000e: /* mov.l @(R0,Rm),Rn */
619 TCGv addr
= tcg_temp_new();
620 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
621 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
625 case 0x6008: /* swap.b Rm,Rn */
628 high
= tcg_temp_new();
629 tcg_gen_andi_i32(high
, REG(B7_4
), 0xffff0000);
630 low
= tcg_temp_new();
631 tcg_gen_ext16u_i32(low
, REG(B7_4
));
632 tcg_gen_bswap16_i32(low
, low
);
633 tcg_gen_or_i32(REG(B11_8
), high
, low
);
638 case 0x6009: /* swap.w Rm,Rn */
639 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
641 case 0x200d: /* xtrct Rm,Rn */
644 high
= tcg_temp_new();
645 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
646 low
= tcg_temp_new();
647 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
648 tcg_gen_or_i32(REG(B11_8
), high
, low
);
653 case 0x300c: /* add Rm,Rn */
654 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
656 case 0x300e: /* addc Rm,Rn */
660 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
662 tcg_gen_add_i32(t1
, REG(B7_4
), REG(B11_8
));
663 tcg_gen_add_i32(t0
, t0
, t1
);
665 tcg_gen_setcond_i32(TCG_COND_GTU
, t2
, REG(B11_8
), t1
);
666 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, t1
, t0
);
667 tcg_gen_or_i32(t1
, t1
, t2
);
669 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
670 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
672 tcg_gen_mov_i32(REG(B11_8
), t0
);
676 case 0x300f: /* addv Rm,Rn */
680 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
682 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
684 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
685 tcg_gen_andc_i32(t1
, t1
, t2
);
687 tcg_gen_shri_i32(t1
, t1
, 31);
688 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
689 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
691 tcg_gen_mov_i32(REG(B7_4
), t0
);
695 case 0x2009: /* and Rm,Rn */
696 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
698 case 0x3000: /* cmp/eq Rm,Rn */
699 gen_cmp(TCG_COND_EQ
, REG(B7_4
), REG(B11_8
));
701 case 0x3003: /* cmp/ge Rm,Rn */
702 gen_cmp(TCG_COND_GE
, REG(B7_4
), REG(B11_8
));
704 case 0x3007: /* cmp/gt Rm,Rn */
705 gen_cmp(TCG_COND_GT
, REG(B7_4
), REG(B11_8
));
707 case 0x3006: /* cmp/hi Rm,Rn */
708 gen_cmp(TCG_COND_GTU
, REG(B7_4
), REG(B11_8
));
710 case 0x3002: /* cmp/hs Rm,Rn */
711 gen_cmp(TCG_COND_GEU
, REG(B7_4
), REG(B11_8
));
713 case 0x200c: /* cmp/str Rm,Rn */
715 TCGv cmp1
= tcg_temp_new();
716 TCGv cmp2
= tcg_temp_new();
717 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
718 tcg_gen_xor_i32(cmp1
, REG(B7_4
), REG(B11_8
));
719 tcg_gen_andi_i32(cmp2
, cmp1
, 0xff000000);
720 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
721 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
722 tcg_gen_andi_i32(cmp2
, cmp1
, 0x00ff0000);
723 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
724 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
725 tcg_gen_andi_i32(cmp2
, cmp1
, 0x0000ff00);
726 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
727 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
728 tcg_gen_andi_i32(cmp2
, cmp1
, 0x000000ff);
729 tcg_gen_setcondi_i32(TCG_COND_EQ
, cmp2
, cmp2
, 0);
730 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cmp2
);
735 case 0x2007: /* div0s Rm,Rn */
737 gen_copy_bit_i32(cpu_sr
, 8, REG(B11_8
), 31); /* SR_Q */
738 gen_copy_bit_i32(cpu_sr
, 9, REG(B7_4
), 31); /* SR_M */
739 TCGv val
= tcg_temp_new();
740 tcg_gen_xor_i32(val
, REG(B7_4
), REG(B11_8
));
741 gen_copy_bit_i32(cpu_sr
, 0, val
, 31); /* SR_T */
745 case 0x3004: /* div1 Rm,Rn */
746 gen_helper_div1(REG(B11_8
), cpu_env
, REG(B7_4
), REG(B11_8
));
748 case 0x300d: /* dmuls.l Rm,Rn */
749 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
751 case 0x3005: /* dmulu.l Rm,Rn */
752 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
754 case 0x600e: /* exts.b Rm,Rn */
755 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
757 case 0x600f: /* exts.w Rm,Rn */
758 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
760 case 0x600c: /* extu.b Rm,Rn */
761 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
763 case 0x600d: /* extu.w Rm,Rn */
764 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
766 case 0x000f: /* mac.l @Rm+,@Rn+ */
769 arg0
= tcg_temp_new();
770 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
771 arg1
= tcg_temp_new();
772 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
773 gen_helper_macl(cpu_env
, arg0
, arg1
);
776 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
777 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
780 case 0x400f: /* mac.w @Rm+,@Rn+ */
783 arg0
= tcg_temp_new();
784 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
785 arg1
= tcg_temp_new();
786 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
787 gen_helper_macw(cpu_env
, arg0
, arg1
);
790 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
791 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
794 case 0x0007: /* mul.l Rm,Rn */
795 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
797 case 0x200f: /* muls.w Rm,Rn */
800 arg0
= tcg_temp_new();
801 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
802 arg1
= tcg_temp_new();
803 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
804 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
809 case 0x200e: /* mulu.w Rm,Rn */
812 arg0
= tcg_temp_new();
813 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
814 arg1
= tcg_temp_new();
815 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
816 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
821 case 0x600b: /* neg Rm,Rn */
822 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
824 case 0x600a: /* negc Rm,Rn */
828 tcg_gen_neg_i32(t0
, REG(B7_4
));
830 tcg_gen_andi_i32(t1
, cpu_sr
, SR_T
);
831 tcg_gen_sub_i32(REG(B11_8
), t0
, t1
);
832 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
833 tcg_gen_setcondi_i32(TCG_COND_GTU
, t1
, t0
, 0);
834 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
835 tcg_gen_setcond_i32(TCG_COND_GTU
, t1
, REG(B11_8
), t0
);
836 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
841 case 0x6007: /* not Rm,Rn */
842 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
844 case 0x200b: /* or Rm,Rn */
845 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
847 case 0x400c: /* shad Rm,Rn */
849 int label1
= gen_new_label();
850 int label2
= gen_new_label();
851 int label3
= gen_new_label();
852 int label4
= gen_new_label();
854 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
855 /* Rm positive, shift to the left */
856 shift
= tcg_temp_new();
857 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
858 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
859 tcg_temp_free(shift
);
861 /* Rm negative, shift to the right */
862 gen_set_label(label1
);
863 shift
= tcg_temp_new();
864 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
865 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
866 tcg_gen_not_i32(shift
, REG(B7_4
));
867 tcg_gen_andi_i32(shift
, shift
, 0x1f);
868 tcg_gen_addi_i32(shift
, shift
, 1);
869 tcg_gen_sar_i32(REG(B11_8
), REG(B11_8
), shift
);
870 tcg_temp_free(shift
);
873 gen_set_label(label2
);
874 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B11_8
), 0, label3
);
875 tcg_gen_movi_i32(REG(B11_8
), 0);
877 gen_set_label(label3
);
878 tcg_gen_movi_i32(REG(B11_8
), 0xffffffff);
879 gen_set_label(label4
);
882 case 0x400d: /* shld Rm,Rn */
884 int label1
= gen_new_label();
885 int label2
= gen_new_label();
886 int label3
= gen_new_label();
888 tcg_gen_brcondi_i32(TCG_COND_LT
, REG(B7_4
), 0, label1
);
889 /* Rm positive, shift to the left */
890 shift
= tcg_temp_new();
891 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
892 tcg_gen_shl_i32(REG(B11_8
), REG(B11_8
), shift
);
893 tcg_temp_free(shift
);
895 /* Rm negative, shift to the right */
896 gen_set_label(label1
);
897 shift
= tcg_temp_new();
898 tcg_gen_andi_i32(shift
, REG(B7_4
), 0x1f);
899 tcg_gen_brcondi_i32(TCG_COND_EQ
, shift
, 0, label2
);
900 tcg_gen_not_i32(shift
, REG(B7_4
));
901 tcg_gen_andi_i32(shift
, shift
, 0x1f);
902 tcg_gen_addi_i32(shift
, shift
, 1);
903 tcg_gen_shr_i32(REG(B11_8
), REG(B11_8
), shift
);
904 tcg_temp_free(shift
);
907 gen_set_label(label2
);
908 tcg_gen_movi_i32(REG(B11_8
), 0);
909 gen_set_label(label3
);
912 case 0x3008: /* sub Rm,Rn */
913 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
915 case 0x300a: /* subc Rm,Rn */
919 tcg_gen_andi_i32(t0
, cpu_sr
, SR_T
);
921 tcg_gen_sub_i32(t1
, REG(B11_8
), REG(B7_4
));
922 tcg_gen_sub_i32(t0
, t1
, t0
);
924 tcg_gen_setcond_i32(TCG_COND_LTU
, t2
, REG(B11_8
), t1
);
925 tcg_gen_setcond_i32(TCG_COND_LTU
, t1
, t1
, t0
);
926 tcg_gen_or_i32(t1
, t1
, t2
);
928 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
929 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
931 tcg_gen_mov_i32(REG(B11_8
), t0
);
935 case 0x300b: /* subv Rm,Rn */
939 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
941 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
943 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
944 tcg_gen_and_i32(t1
, t1
, t2
);
946 tcg_gen_shri_i32(t1
, t1
, 31);
947 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
948 tcg_gen_or_i32(cpu_sr
, cpu_sr
, t1
);
950 tcg_gen_mov_i32(REG(B11_8
), t0
);
954 case 0x2008: /* tst Rm,Rn */
956 TCGv val
= tcg_temp_new();
957 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
958 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
962 case 0x200a: /* xor Rm,Rn */
963 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
965 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
967 if (ctx
->flags
& FPSCR_SZ
) {
968 TCGv_i64 fp
= tcg_temp_new_i64();
969 gen_load_fpr64(fp
, XREG(B7_4
));
970 gen_store_fpr64(fp
, XREG(B11_8
));
971 tcg_temp_free_i64(fp
);
973 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B7_4
)]);
976 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
978 if (ctx
->flags
& FPSCR_SZ
) {
979 TCGv addr_hi
= tcg_temp_new();
981 tcg_gen_addi_i32(addr_hi
, REG(B11_8
), 4);
982 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], REG(B11_8
),
983 ctx
->memidx
, MO_TEUL
);
984 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr_hi
,
985 ctx
->memidx
, MO_TEUL
);
986 tcg_temp_free(addr_hi
);
988 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], REG(B11_8
),
989 ctx
->memidx
, MO_TEUL
);
992 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
994 if (ctx
->flags
& FPSCR_SZ
) {
995 TCGv addr_hi
= tcg_temp_new();
996 int fr
= XREG(B11_8
);
997 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
998 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
999 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
1000 tcg_temp_free(addr_hi
);
1002 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
1003 ctx
->memidx
, MO_TEUL
);
1006 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1008 if (ctx
->flags
& FPSCR_SZ
) {
1009 TCGv addr_hi
= tcg_temp_new();
1010 int fr
= XREG(B11_8
);
1011 tcg_gen_addi_i32(addr_hi
, REG(B7_4
), 4);
1012 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1013 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr_hi
, ctx
->memidx
, MO_TEUL
);
1014 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1015 tcg_temp_free(addr_hi
);
1017 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], REG(B7_4
),
1018 ctx
->memidx
, MO_TEUL
);
1019 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1022 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1024 if (ctx
->flags
& FPSCR_SZ
) {
1025 TCGv addr
= tcg_temp_new_i32();
1026 int fr
= XREG(B7_4
);
1027 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1028 tcg_gen_qemu_st_i32(cpu_fregs
[fr
+1], addr
, ctx
->memidx
, MO_TEUL
);
1029 tcg_gen_subi_i32(addr
, addr
, 4);
1030 tcg_gen_qemu_st_i32(cpu_fregs
[fr
], addr
, ctx
->memidx
, MO_TEUL
);
1031 tcg_gen_mov_i32(REG(B11_8
), addr
);
1032 tcg_temp_free(addr
);
1035 addr
= tcg_temp_new_i32();
1036 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1037 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1038 ctx
->memidx
, MO_TEUL
);
1039 tcg_gen_mov_i32(REG(B11_8
), addr
);
1040 tcg_temp_free(addr
);
1043 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1046 TCGv addr
= tcg_temp_new_i32();
1047 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1048 if (ctx
->flags
& FPSCR_SZ
) {
1049 int fr
= XREG(B11_8
);
1050 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1051 ctx
->memidx
, MO_TEUL
);
1052 tcg_gen_addi_i32(addr
, addr
, 4);
1053 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1054 ctx
->memidx
, MO_TEUL
);
1056 tcg_gen_qemu_ld_i32(cpu_fregs
[FREG(B11_8
)], addr
,
1057 ctx
->memidx
, MO_TEUL
);
1059 tcg_temp_free(addr
);
1062 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1065 TCGv addr
= tcg_temp_new();
1066 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1067 if (ctx
->flags
& FPSCR_SZ
) {
1068 int fr
= XREG(B7_4
);
1069 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
], addr
,
1070 ctx
->memidx
, MO_TEUL
);
1071 tcg_gen_addi_i32(addr
, addr
, 4);
1072 tcg_gen_qemu_ld_i32(cpu_fregs
[fr
+1], addr
,
1073 ctx
->memidx
, MO_TEUL
);
1075 tcg_gen_qemu_st_i32(cpu_fregs
[FREG(B7_4
)], addr
,
1076 ctx
->memidx
, MO_TEUL
);
1078 tcg_temp_free(addr
);
1081 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1082 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1085 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1086 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1089 if (ctx
->flags
& FPSCR_PR
) {
1092 if (ctx
->opcode
& 0x0110)
1093 break; /* illegal instruction */
1094 fp0
= tcg_temp_new_i64();
1095 fp1
= tcg_temp_new_i64();
1096 gen_load_fpr64(fp0
, DREG(B11_8
));
1097 gen_load_fpr64(fp1
, DREG(B7_4
));
1098 switch (ctx
->opcode
& 0xf00f) {
1099 case 0xf000: /* fadd Rm,Rn */
1100 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1102 case 0xf001: /* fsub Rm,Rn */
1103 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1105 case 0xf002: /* fmul Rm,Rn */
1106 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1108 case 0xf003: /* fdiv Rm,Rn */
1109 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1111 case 0xf004: /* fcmp/eq Rm,Rn */
1112 gen_helper_fcmp_eq_DT(cpu_env
, fp0
, fp1
);
1114 case 0xf005: /* fcmp/gt Rm,Rn */
1115 gen_helper_fcmp_gt_DT(cpu_env
, fp0
, fp1
);
1118 gen_store_fpr64(fp0
, DREG(B11_8
));
1119 tcg_temp_free_i64(fp0
);
1120 tcg_temp_free_i64(fp1
);
1122 switch (ctx
->opcode
& 0xf00f) {
1123 case 0xf000: /* fadd Rm,Rn */
1124 gen_helper_fadd_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1125 cpu_fregs
[FREG(B11_8
)],
1126 cpu_fregs
[FREG(B7_4
)]);
1128 case 0xf001: /* fsub Rm,Rn */
1129 gen_helper_fsub_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1130 cpu_fregs
[FREG(B11_8
)],
1131 cpu_fregs
[FREG(B7_4
)]);
1133 case 0xf002: /* fmul Rm,Rn */
1134 gen_helper_fmul_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1135 cpu_fregs
[FREG(B11_8
)],
1136 cpu_fregs
[FREG(B7_4
)]);
1138 case 0xf003: /* fdiv Rm,Rn */
1139 gen_helper_fdiv_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1140 cpu_fregs
[FREG(B11_8
)],
1141 cpu_fregs
[FREG(B7_4
)]);
1143 case 0xf004: /* fcmp/eq Rm,Rn */
1144 gen_helper_fcmp_eq_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1145 cpu_fregs
[FREG(B7_4
)]);
1147 case 0xf005: /* fcmp/gt Rm,Rn */
1148 gen_helper_fcmp_gt_FT(cpu_env
, cpu_fregs
[FREG(B11_8
)],
1149 cpu_fregs
[FREG(B7_4
)]);
1155 case 0xf00e: /* fmac FR0,RM,Rn */
1158 if (ctx
->flags
& FPSCR_PR
) {
1159 break; /* illegal instruction */
1161 gen_helper_fmac_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1162 cpu_fregs
[FREG(0)], cpu_fregs
[FREG(B7_4
)],
1163 cpu_fregs
[FREG(B11_8
)]);
1169 switch (ctx
->opcode
& 0xff00) {
1170 case 0xc900: /* and #imm,R0 */
1171 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1173 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1176 addr
= tcg_temp_new();
1177 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1178 val
= tcg_temp_new();
1179 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1180 tcg_gen_andi_i32(val
, val
, B7_0
);
1181 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1183 tcg_temp_free(addr
);
1186 case 0x8b00: /* bf label */
1187 CHECK_NOT_DELAY_SLOT
1188 gen_conditional_jump(ctx
, ctx
->pc
+ 2,
1189 ctx
->pc
+ 4 + B7_0s
* 2);
1190 ctx
->bstate
= BS_BRANCH
;
1192 case 0x8f00: /* bf/s label */
1193 CHECK_NOT_DELAY_SLOT
1194 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 0);
1195 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1197 case 0x8900: /* bt label */
1198 CHECK_NOT_DELAY_SLOT
1199 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2,
1201 ctx
->bstate
= BS_BRANCH
;
1203 case 0x8d00: /* bt/s label */
1204 CHECK_NOT_DELAY_SLOT
1205 gen_branch_slot(ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2, 1);
1206 ctx
->flags
|= DELAY_SLOT_CONDITIONAL
;
1208 case 0x8800: /* cmp/eq #imm,R0 */
1209 gen_cmp_imm(TCG_COND_EQ
, REG(0), B7_0s
);
1211 case 0xc400: /* mov.b @(disp,GBR),R0 */
1213 TCGv addr
= tcg_temp_new();
1214 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1215 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1216 tcg_temp_free(addr
);
1219 case 0xc500: /* mov.w @(disp,GBR),R0 */
1221 TCGv addr
= tcg_temp_new();
1222 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1223 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1224 tcg_temp_free(addr
);
1227 case 0xc600: /* mov.l @(disp,GBR),R0 */
1229 TCGv addr
= tcg_temp_new();
1230 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1231 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1232 tcg_temp_free(addr
);
1235 case 0xc000: /* mov.b R0,@(disp,GBR) */
1237 TCGv addr
= tcg_temp_new();
1238 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1239 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1240 tcg_temp_free(addr
);
1243 case 0xc100: /* mov.w R0,@(disp,GBR) */
1245 TCGv addr
= tcg_temp_new();
1246 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1247 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1248 tcg_temp_free(addr
);
1251 case 0xc200: /* mov.l R0,@(disp,GBR) */
1253 TCGv addr
= tcg_temp_new();
1254 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1255 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1256 tcg_temp_free(addr
);
1259 case 0x8000: /* mov.b R0,@(disp,Rn) */
1261 TCGv addr
= tcg_temp_new();
1262 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1263 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1264 tcg_temp_free(addr
);
1267 case 0x8100: /* mov.w R0,@(disp,Rn) */
1269 TCGv addr
= tcg_temp_new();
1270 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1271 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1272 tcg_temp_free(addr
);
1275 case 0x8400: /* mov.b @(disp,Rn),R0 */
1277 TCGv addr
= tcg_temp_new();
1278 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1279 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1280 tcg_temp_free(addr
);
1283 case 0x8500: /* mov.w @(disp,Rn),R0 */
1285 TCGv addr
= tcg_temp_new();
1286 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1287 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1288 tcg_temp_free(addr
);
1291 case 0xc700: /* mova @(disp,PC),R0 */
1292 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1294 case 0xcb00: /* or #imm,R0 */
1295 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1297 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1300 addr
= tcg_temp_new();
1301 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1302 val
= tcg_temp_new();
1303 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1304 tcg_gen_ori_i32(val
, val
, B7_0
);
1305 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1307 tcg_temp_free(addr
);
1310 case 0xc300: /* trapa #imm */
1313 CHECK_NOT_DELAY_SLOT
1314 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1315 imm
= tcg_const_i32(B7_0
);
1316 gen_helper_trapa(cpu_env
, imm
);
1318 ctx
->bstate
= BS_BRANCH
;
1321 case 0xc800: /* tst #imm,R0 */
1323 TCGv val
= tcg_temp_new();
1324 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1325 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1329 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1331 TCGv val
= tcg_temp_new();
1332 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1333 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1334 tcg_gen_andi_i32(val
, val
, B7_0
);
1335 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1339 case 0xca00: /* xor #imm,R0 */
1340 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1342 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1345 addr
= tcg_temp_new();
1346 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1347 val
= tcg_temp_new();
1348 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1349 tcg_gen_xori_i32(val
, val
, B7_0
);
1350 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1352 tcg_temp_free(addr
);
1357 switch (ctx
->opcode
& 0xf08f) {
1358 case 0x408e: /* ldc Rm,Rn_BANK */
1360 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1362 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1364 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1365 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1367 case 0x0082: /* stc Rm_BANK,Rn */
1369 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1371 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1374 TCGv addr
= tcg_temp_new();
1375 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1376 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1377 tcg_gen_mov_i32(REG(B11_8
), addr
);
1378 tcg_temp_free(addr
);
1383 switch (ctx
->opcode
& 0xf0ff) {
1384 case 0x0023: /* braf Rn */
1385 CHECK_NOT_DELAY_SLOT
1386 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1387 ctx
->flags
|= DELAY_SLOT
;
1388 ctx
->delayed_pc
= (uint32_t) - 1;
1390 case 0x0003: /* bsrf Rn */
1391 CHECK_NOT_DELAY_SLOT
1392 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1393 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1394 ctx
->flags
|= DELAY_SLOT
;
1395 ctx
->delayed_pc
= (uint32_t) - 1;
1397 case 0x4015: /* cmp/pl Rn */
1398 gen_cmp_imm(TCG_COND_GT
, REG(B11_8
), 0);
1400 case 0x4011: /* cmp/pz Rn */
1401 gen_cmp_imm(TCG_COND_GE
, REG(B11_8
), 0);
1403 case 0x4010: /* dt Rn */
1404 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1405 gen_cmp_imm(TCG_COND_EQ
, REG(B11_8
), 0);
1407 case 0x402b: /* jmp @Rn */
1408 CHECK_NOT_DELAY_SLOT
1409 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1410 ctx
->flags
|= DELAY_SLOT
;
1411 ctx
->delayed_pc
= (uint32_t) - 1;
1413 case 0x400b: /* jsr @Rn */
1414 CHECK_NOT_DELAY_SLOT
1415 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1416 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1417 ctx
->flags
|= DELAY_SLOT
;
1418 ctx
->delayed_pc
= (uint32_t) - 1;
1420 case 0x400e: /* ldc Rm,SR */
1422 tcg_gen_andi_i32(cpu_sr
, REG(B11_8
), 0x700083f3);
1423 ctx
->bstate
= BS_STOP
;
1425 case 0x4007: /* ldc.l @Rm+,SR */
1428 TCGv val
= tcg_temp_new();
1429 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1430 tcg_gen_andi_i32(cpu_sr
, val
, 0x700083f3);
1432 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1433 ctx
->bstate
= BS_STOP
;
1436 case 0x0002: /* stc SR,Rn */
1438 tcg_gen_mov_i32(REG(B11_8
), cpu_sr
);
1440 case 0x4003: /* stc SR,@-Rn */
1443 TCGv addr
= tcg_temp_new();
1444 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1445 tcg_gen_qemu_st_i32(cpu_sr
, addr
, ctx
->memidx
, MO_TEUL
);
1446 tcg_gen_mov_i32(REG(B11_8
), addr
);
1447 tcg_temp_free(addr
);
1450 #define LD(reg,ldnum,ldpnum,prechk) \
1453 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1457 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1458 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1460 #define ST(reg,stnum,stpnum,prechk) \
1463 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1468 TCGv addr = tcg_temp_new(); \
1469 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1470 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1471 tcg_gen_mov_i32(REG(B11_8), addr); \
1472 tcg_temp_free(addr); \
1475 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1476 LD(reg,ldnum,ldpnum,prechk) \
1477 ST(reg,stnum,stpnum,prechk)
1478 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1479 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1480 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1481 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1482 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1483 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED
if (!(ctx
->features
& SH_FEATURE_SH4A
)) break;)
1484 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1485 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1486 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1487 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1488 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1489 case 0x406a: /* lds Rm,FPSCR */
1491 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1492 ctx
->bstate
= BS_STOP
;
1494 case 0x4066: /* lds.l @Rm+,FPSCR */
1497 TCGv addr
= tcg_temp_new();
1498 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1499 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1500 gen_helper_ld_fpscr(cpu_env
, addr
);
1501 tcg_temp_free(addr
);
1502 ctx
->bstate
= BS_STOP
;
1505 case 0x006a: /* sts FPSCR,Rn */
1507 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1509 case 0x4062: /* sts FPSCR,@-Rn */
1513 val
= tcg_temp_new();
1514 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1515 addr
= tcg_temp_new();
1516 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1517 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1518 tcg_gen_mov_i32(REG(B11_8
), addr
);
1519 tcg_temp_free(addr
);
1523 case 0x00c3: /* movca.l R0,@Rm */
1525 TCGv val
= tcg_temp_new();
1526 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1527 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1528 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1530 ctx
->has_movcal
= 1;
1533 /* MOVUA.L @Rm,R0 (Rm) -> R0
1534 Load non-boundary-aligned data */
1535 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1538 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1539 Load non-boundary-aligned data */
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1541 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1543 case 0x0029: /* movt Rn */
1544 tcg_gen_andi_i32(REG(B11_8
), cpu_sr
, SR_T
);
1549 If (T == 1) R0 -> (Rn)
1552 if (ctx
->features
& SH_FEATURE_SH4A
) {
1553 int label
= gen_new_label();
1554 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~SR_T
);
1555 tcg_gen_or_i32(cpu_sr
, cpu_sr
, cpu_ldst
);
1556 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1557 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1558 gen_set_label(label
);
1559 tcg_gen_movi_i32(cpu_ldst
, 0);
1567 When interrupt/exception
1570 if (ctx
->features
& SH_FEATURE_SH4A
) {
1571 tcg_gen_movi_i32(cpu_ldst
, 0);
1572 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1573 tcg_gen_movi_i32(cpu_ldst
, 1);
1577 case 0x0093: /* ocbi @Rn */
1579 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1582 case 0x00a3: /* ocbp @Rn */
1583 case 0x00b3: /* ocbwb @Rn */
1584 /* These instructions are supposed to do nothing in case of
1585 a cache miss. Given that we only partially emulate caches
1586 it is safe to simply ignore them. */
1588 case 0x0083: /* pref @Rn */
1590 case 0x00d3: /* prefi @Rn */
1591 if (ctx
->features
& SH_FEATURE_SH4A
)
1595 case 0x00e3: /* icbi @Rn */
1596 if (ctx
->features
& SH_FEATURE_SH4A
)
1600 case 0x00ab: /* synco */
1601 if (ctx
->features
& SH_FEATURE_SH4A
)
1605 case 0x4024: /* rotcl Rn */
1607 TCGv tmp
= tcg_temp_new();
1608 tcg_gen_mov_i32(tmp
, cpu_sr
);
1609 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1610 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1611 gen_copy_bit_i32(REG(B11_8
), 0, tmp
, 0);
1615 case 0x4025: /* rotcr Rn */
1617 TCGv tmp
= tcg_temp_new();
1618 tcg_gen_mov_i32(tmp
, cpu_sr
);
1619 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1620 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1621 gen_copy_bit_i32(REG(B11_8
), 31, tmp
, 0);
1625 case 0x4004: /* rotl Rn */
1626 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1627 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1629 case 0x4005: /* rotr Rn */
1630 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1631 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1633 case 0x4000: /* shll Rn */
1634 case 0x4020: /* shal Rn */
1635 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 31);
1636 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1638 case 0x4021: /* shar Rn */
1639 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1640 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1642 case 0x4001: /* shlr Rn */
1643 gen_copy_bit_i32(cpu_sr
, 0, REG(B11_8
), 0);
1644 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1646 case 0x4008: /* shll2 Rn */
1647 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1649 case 0x4018: /* shll8 Rn */
1650 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1652 case 0x4028: /* shll16 Rn */
1653 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1655 case 0x4009: /* shlr2 Rn */
1656 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1658 case 0x4019: /* shlr8 Rn */
1659 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1661 case 0x4029: /* shlr16 Rn */
1662 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1664 case 0x401b: /* tas.b @Rn */
1667 addr
= tcg_temp_local_new();
1668 tcg_gen_mov_i32(addr
, REG(B11_8
));
1669 val
= tcg_temp_local_new();
1670 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1671 gen_cmp_imm(TCG_COND_EQ
, val
, 0);
1672 tcg_gen_ori_i32(val
, val
, 0x80);
1673 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1675 tcg_temp_free(addr
);
1678 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1680 tcg_gen_mov_i32(cpu_fregs
[FREG(B11_8
)], cpu_fpul
);
1682 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1684 tcg_gen_mov_i32(cpu_fpul
, cpu_fregs
[FREG(B11_8
)]);
1686 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1688 if (ctx
->flags
& FPSCR_PR
) {
1690 if (ctx
->opcode
& 0x0100)
1691 break; /* illegal instruction */
1692 fp
= tcg_temp_new_i64();
1693 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1694 gen_store_fpr64(fp
, DREG(B11_8
));
1695 tcg_temp_free_i64(fp
);
1698 gen_helper_float_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
, cpu_fpul
);
1701 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1703 if (ctx
->flags
& FPSCR_PR
) {
1705 if (ctx
->opcode
& 0x0100)
1706 break; /* illegal instruction */
1707 fp
= tcg_temp_new_i64();
1708 gen_load_fpr64(fp
, DREG(B11_8
));
1709 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1710 tcg_temp_free_i64(fp
);
1713 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, cpu_fregs
[FREG(B11_8
)]);
1716 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1719 gen_helper_fneg_T(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1722 case 0xf05d: /* fabs FRn/DRn */
1724 if (ctx
->flags
& FPSCR_PR
) {
1725 if (ctx
->opcode
& 0x0100)
1726 break; /* illegal instruction */
1727 TCGv_i64 fp
= tcg_temp_new_i64();
1728 gen_load_fpr64(fp
, DREG(B11_8
));
1729 gen_helper_fabs_DT(fp
, fp
);
1730 gen_store_fpr64(fp
, DREG(B11_8
));
1731 tcg_temp_free_i64(fp
);
1733 gen_helper_fabs_FT(cpu_fregs
[FREG(B11_8
)], cpu_fregs
[FREG(B11_8
)]);
1736 case 0xf06d: /* fsqrt FRn */
1738 if (ctx
->flags
& FPSCR_PR
) {
1739 if (ctx
->opcode
& 0x0100)
1740 break; /* illegal instruction */
1741 TCGv_i64 fp
= tcg_temp_new_i64();
1742 gen_load_fpr64(fp
, DREG(B11_8
));
1743 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1744 gen_store_fpr64(fp
, DREG(B11_8
));
1745 tcg_temp_free_i64(fp
);
1747 gen_helper_fsqrt_FT(cpu_fregs
[FREG(B11_8
)], cpu_env
,
1748 cpu_fregs
[FREG(B11_8
)]);
1751 case 0xf07d: /* fsrra FRn */
1754 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1756 if (!(ctx
->flags
& FPSCR_PR
)) {
1757 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0);
1760 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1762 if (!(ctx
->flags
& FPSCR_PR
)) {
1763 tcg_gen_movi_i32(cpu_fregs
[FREG(B11_8
)], 0x3f800000);
1766 case 0xf0ad: /* fcnvsd FPUL,DRn */
1769 TCGv_i64 fp
= tcg_temp_new_i64();
1770 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1771 gen_store_fpr64(fp
, DREG(B11_8
));
1772 tcg_temp_free_i64(fp
);
1775 case 0xf0bd: /* fcnvds DRn,FPUL */
1778 TCGv_i64 fp
= tcg_temp_new_i64();
1779 gen_load_fpr64(fp
, DREG(B11_8
));
1780 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1781 tcg_temp_free_i64(fp
);
1784 case 0xf0ed: /* fipr FVm,FVn */
1786 if ((ctx
->flags
& FPSCR_PR
) == 0) {
1788 m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1789 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1790 gen_helper_fipr(cpu_env
, m
, n
);
1796 case 0xf0fd: /* ftrv XMTRX,FVn */
1798 if ((ctx
->opcode
& 0x0300) == 0x0100 &&
1799 (ctx
->flags
& FPSCR_PR
) == 0) {
1801 n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1802 gen_helper_ftrv(cpu_env
, n
);
1809 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1810 ctx
->opcode
, ctx
->pc
);
1813 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
1814 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1815 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1817 gen_helper_raise_illegal_instruction(cpu_env
);
1819 ctx
->bstate
= BS_BRANCH
;
1822 static void decode_opc(DisasContext
* ctx
)
1824 uint32_t old_flags
= ctx
->flags
;
1826 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
1827 tcg_gen_debug_insn_start(ctx
->pc
);
1832 if (old_flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
)) {
1833 if (ctx
->flags
& DELAY_SLOT_CLEARME
) {
1836 /* go out of the delay slot */
1837 uint32_t new_flags
= ctx
->flags
;
1838 new_flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
1839 gen_store_flags(new_flags
);
1842 ctx
->bstate
= BS_BRANCH
;
1843 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1844 gen_delayed_conditional_jump(ctx
);
1845 } else if (old_flags
& DELAY_SLOT
) {
1851 /* go into a delay slot */
1852 if (ctx
->flags
& (DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))
1853 gen_store_flags(ctx
->flags
);
1857 gen_intermediate_code_internal(SuperHCPU
*cpu
, TranslationBlock
*tb
,
1860 CPUState
*cs
= CPU(cpu
);
1861 CPUSH4State
*env
= &cpu
->env
;
1863 target_ulong pc_start
;
1864 static uint16_t *gen_opc_end
;
1871 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
1873 ctx
.flags
= (uint32_t)tb
->flags
;
1874 ctx
.bstate
= BS_NONE
;
1875 ctx
.memidx
= (ctx
.flags
& SR_MD
) == 0 ? 1 : 0;
1876 /* We don't know if the delayed pc came from a dynamic or static branch,
1877 so assume it is a dynamic branch. */
1878 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
1880 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
1881 ctx
.features
= env
->features
;
1882 ctx
.has_movcal
= (ctx
.flags
& TB_FLAG_PENDING_MOVCA
);
1886 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
1888 max_insns
= CF_COUNT_MASK
;
1890 while (ctx
.bstate
== BS_NONE
&& tcg_ctx
.gen_opc_ptr
< gen_opc_end
) {
1891 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
1892 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
1893 if (ctx
.pc
== bp
->pc
) {
1894 /* We have hit a breakpoint - make sure PC is up-to-date */
1895 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1896 gen_helper_debug(cpu_env
);
1897 ctx
.bstate
= BS_BRANCH
;
1903 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1907 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1909 tcg_ctx
.gen_opc_pc
[ii
] = ctx
.pc
;
1910 gen_opc_hflags
[ii
] = ctx
.flags
;
1911 tcg_ctx
.gen_opc_instr_start
[ii
] = 1;
1912 tcg_ctx
.gen_opc_icount
[ii
] = num_insns
;
1914 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
1917 fprintf(stderr
, "Loading opcode at address 0x%08x\n", ctx
.pc
);
1920 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
1924 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
1926 if (cs
->singlestep_enabled
) {
1929 if (num_insns
>= max_insns
)
1934 if (tb
->cflags
& CF_LAST_IO
)
1936 if (cs
->singlestep_enabled
) {
1937 tcg_gen_movi_i32(cpu_pc
, ctx
.pc
);
1938 gen_helper_debug(cpu_env
);
1940 switch (ctx
.bstate
) {
1942 /* gen_op_interrupt_restart(); */
1946 gen_store_flags(ctx
.flags
| DELAY_SLOT_CLEARME
);
1948 gen_goto_tb(&ctx
, 0, ctx
.pc
);
1951 /* gen_op_interrupt_restart(); */
1960 gen_tb_end(tb
, num_insns
);
1961 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
1963 i
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
1966 tcg_ctx
.gen_opc_instr_start
[ii
++] = 0;
1968 tb
->size
= ctx
.pc
- pc_start
;
1969 tb
->icount
= num_insns
;
1973 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
1974 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1975 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 0);
1981 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1983 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, false);
1986 void gen_intermediate_code_pc(CPUSH4State
* env
, struct TranslationBlock
*tb
)
1988 gen_intermediate_code_internal(sh_env_get_cpu(env
), tb
, true);
1991 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
, int pc_pos
)
1993 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];
1994 env
->flags
= gen_opc_hflags
[pc_pos
];