4 * Copyright (c) 2005 Samuel Tardieu
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
32 #include "trace-tcg.h"
36 typedef struct DisasContext
{
37 struct TranslationBlock
*tb
;
40 uint32_t tbflags
; /* should stay unmodified during the TB translation */
41 uint32_t envflags
; /* should stay in sync with env->flags using TCG ops */
47 int singlestep_enabled
;
52 #if defined(CONFIG_USER_ONLY)
53 #define IS_USER(ctx) 1
55 #define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
59 BS_NONE
= 0, /* We go out of the TB without reaching a branch or an
62 BS_STOP
= 1, /* We want to stop translation for any reason */
63 BS_BRANCH
= 2, /* We reached a branch condition */
64 BS_EXCP
= 3, /* We reached an exception condition */
67 /* global register indexes */
68 static TCGv_env cpu_env
;
69 static TCGv cpu_gregs
[32];
70 static TCGv cpu_sr
, cpu_sr_m
, cpu_sr_q
, cpu_sr_t
;
71 static TCGv cpu_pc
, cpu_ssr
, cpu_spc
, cpu_gbr
;
72 static TCGv cpu_vbr
, cpu_sgr
, cpu_dbr
, cpu_mach
, cpu_macl
;
73 static TCGv cpu_pr
, cpu_fpscr
, cpu_fpul
, cpu_ldst
;
74 static TCGv cpu_fregs
[32];
76 /* internal register indexes */
77 static TCGv cpu_flags
, cpu_delayed_pc
, cpu_delayed_cond
;
79 #include "exec/gen-icount.h"
81 void sh4_translate_init(void)
84 static int done_init
= 0;
85 static const char * const gregnames
[24] = {
86 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
87 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
88 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
89 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
90 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
92 static const char * const fregnames
[32] = {
93 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
94 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
95 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
96 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
97 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
98 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
99 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
100 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
108 tcg_ctx
.tcg_env
= cpu_env
;
110 for (i
= 0; i
< 24; i
++) {
111 cpu_gregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
112 offsetof(CPUSH4State
, gregs
[i
]),
115 memcpy(cpu_gregs
+ 24, cpu_gregs
+ 8, 8 * sizeof(TCGv
));
117 cpu_pc
= tcg_global_mem_new_i32(cpu_env
,
118 offsetof(CPUSH4State
, pc
), "PC");
119 cpu_sr
= tcg_global_mem_new_i32(cpu_env
,
120 offsetof(CPUSH4State
, sr
), "SR");
121 cpu_sr_m
= tcg_global_mem_new_i32(cpu_env
,
122 offsetof(CPUSH4State
, sr_m
), "SR_M");
123 cpu_sr_q
= tcg_global_mem_new_i32(cpu_env
,
124 offsetof(CPUSH4State
, sr_q
), "SR_Q");
125 cpu_sr_t
= tcg_global_mem_new_i32(cpu_env
,
126 offsetof(CPUSH4State
, sr_t
), "SR_T");
127 cpu_ssr
= tcg_global_mem_new_i32(cpu_env
,
128 offsetof(CPUSH4State
, ssr
), "SSR");
129 cpu_spc
= tcg_global_mem_new_i32(cpu_env
,
130 offsetof(CPUSH4State
, spc
), "SPC");
131 cpu_gbr
= tcg_global_mem_new_i32(cpu_env
,
132 offsetof(CPUSH4State
, gbr
), "GBR");
133 cpu_vbr
= tcg_global_mem_new_i32(cpu_env
,
134 offsetof(CPUSH4State
, vbr
), "VBR");
135 cpu_sgr
= tcg_global_mem_new_i32(cpu_env
,
136 offsetof(CPUSH4State
, sgr
), "SGR");
137 cpu_dbr
= tcg_global_mem_new_i32(cpu_env
,
138 offsetof(CPUSH4State
, dbr
), "DBR");
139 cpu_mach
= tcg_global_mem_new_i32(cpu_env
,
140 offsetof(CPUSH4State
, mach
), "MACH");
141 cpu_macl
= tcg_global_mem_new_i32(cpu_env
,
142 offsetof(CPUSH4State
, macl
), "MACL");
143 cpu_pr
= tcg_global_mem_new_i32(cpu_env
,
144 offsetof(CPUSH4State
, pr
), "PR");
145 cpu_fpscr
= tcg_global_mem_new_i32(cpu_env
,
146 offsetof(CPUSH4State
, fpscr
), "FPSCR");
147 cpu_fpul
= tcg_global_mem_new_i32(cpu_env
,
148 offsetof(CPUSH4State
, fpul
), "FPUL");
150 cpu_flags
= tcg_global_mem_new_i32(cpu_env
,
151 offsetof(CPUSH4State
, flags
), "_flags_");
152 cpu_delayed_pc
= tcg_global_mem_new_i32(cpu_env
,
153 offsetof(CPUSH4State
, delayed_pc
),
155 cpu_delayed_cond
= tcg_global_mem_new_i32(cpu_env
,
156 offsetof(CPUSH4State
,
159 cpu_ldst
= tcg_global_mem_new_i32(cpu_env
,
160 offsetof(CPUSH4State
, ldst
), "_ldst_");
162 for (i
= 0; i
< 32; i
++)
163 cpu_fregs
[i
] = tcg_global_mem_new_i32(cpu_env
,
164 offsetof(CPUSH4State
, fregs
[i
]),
170 void superh_cpu_dump_state(CPUState
*cs
, FILE *f
,
171 fprintf_function cpu_fprintf
, int flags
)
173 SuperHCPU
*cpu
= SUPERH_CPU(cs
);
174 CPUSH4State
*env
= &cpu
->env
;
176 cpu_fprintf(f
, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
177 env
->pc
, cpu_read_sr(env
), env
->pr
, env
->fpscr
);
178 cpu_fprintf(f
, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
179 env
->spc
, env
->ssr
, env
->gbr
, env
->vbr
);
180 cpu_fprintf(f
, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
181 env
->sgr
, env
->dbr
, env
->delayed_pc
, env
->fpul
);
182 for (i
= 0; i
< 24; i
+= 4) {
183 cpu_fprintf(f
, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
184 i
, env
->gregs
[i
], i
+ 1, env
->gregs
[i
+ 1],
185 i
+ 2, env
->gregs
[i
+ 2], i
+ 3, env
->gregs
[i
+ 3]);
187 if (env
->flags
& DELAY_SLOT
) {
188 cpu_fprintf(f
, "in delay slot (delayed_pc=0x%08x)\n",
190 } else if (env
->flags
& DELAY_SLOT_CONDITIONAL
) {
191 cpu_fprintf(f
, "in conditional delay slot (delayed_pc=0x%08x)\n",
193 } else if (env
->flags
& DELAY_SLOT_RTE
) {
194 cpu_fprintf(f
, "in rte delay slot (delayed_pc=0x%08x)\n",
199 static void gen_read_sr(TCGv dst
)
201 TCGv t0
= tcg_temp_new();
202 tcg_gen_shli_i32(t0
, cpu_sr_q
, SR_Q
);
203 tcg_gen_or_i32(dst
, dst
, t0
);
204 tcg_gen_shli_i32(t0
, cpu_sr_m
, SR_M
);
205 tcg_gen_or_i32(dst
, dst
, t0
);
206 tcg_gen_shli_i32(t0
, cpu_sr_t
, SR_T
);
207 tcg_gen_or_i32(dst
, cpu_sr
, t0
);
208 tcg_temp_free_i32(t0
);
211 static void gen_write_sr(TCGv src
)
213 tcg_gen_andi_i32(cpu_sr
, src
,
214 ~((1u << SR_Q
) | (1u << SR_M
) | (1u << SR_T
)));
215 tcg_gen_extract_i32(cpu_sr_q
, src
, SR_Q
, 1);
216 tcg_gen_extract_i32(cpu_sr_m
, src
, SR_M
, 1);
217 tcg_gen_extract_i32(cpu_sr_t
, src
, SR_T
, 1);
220 static inline void gen_save_cpu_state(DisasContext
*ctx
, bool save_pc
)
223 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
);
225 if (ctx
->delayed_pc
!= (uint32_t) -1) {
226 tcg_gen_movi_i32(cpu_delayed_pc
, ctx
->delayed_pc
);
228 if ((ctx
->tbflags
& TB_FLAG_ENVFLAGS_MASK
) != ctx
->envflags
) {
229 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
);
233 static inline bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
235 if (unlikely(ctx
->singlestep_enabled
)) {
238 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
241 #ifndef CONFIG_USER_ONLY
242 return (ctx
->tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
);
248 static void gen_goto_tb(DisasContext
*ctx
, int n
, target_ulong dest
)
250 if (use_goto_tb(ctx
, dest
)) {
251 /* Use a direct jump if in same page and singlestep not enabled */
253 tcg_gen_movi_i32(cpu_pc
, dest
);
254 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ n
);
256 tcg_gen_movi_i32(cpu_pc
, dest
);
257 if (ctx
->singlestep_enabled
)
258 gen_helper_debug(cpu_env
);
263 static void gen_jump(DisasContext
* ctx
)
265 if (ctx
->delayed_pc
== (uint32_t) - 1) {
266 /* Target is not statically known, it comes necessarily from a
267 delayed jump as immediate jump are conditinal jumps */
268 tcg_gen_mov_i32(cpu_pc
, cpu_delayed_pc
);
269 tcg_gen_discard_i32(cpu_delayed_pc
);
270 if (ctx
->singlestep_enabled
)
271 gen_helper_debug(cpu_env
);
274 gen_goto_tb(ctx
, 0, ctx
->delayed_pc
);
278 /* Immediate conditional jump (bt or bf) */
279 static void gen_conditional_jump(DisasContext
*ctx
, target_ulong dest
,
282 TCGLabel
*l1
= gen_new_label();
283 TCGCond cond_not_taken
= jump_if_true
? TCG_COND_EQ
: TCG_COND_NE
;
285 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
286 /* When in an exclusive region, we must continue to the end.
287 Therefore, exit the region on a taken branch, but otherwise
288 fall through to the next instruction. */
289 tcg_gen_brcondi_i32(cond_not_taken
, cpu_sr_t
, 0, l1
);
290 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
& ~GUSA_MASK
);
291 /* Note that this won't actually use a goto_tb opcode because we
292 disallow it in use_goto_tb, but it handles exit + singlestep. */
293 gen_goto_tb(ctx
, 0, dest
);
298 gen_save_cpu_state(ctx
, false);
299 tcg_gen_brcondi_i32(cond_not_taken
, cpu_sr_t
, 0, l1
);
300 gen_goto_tb(ctx
, 0, dest
);
302 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
303 ctx
->bstate
= BS_BRANCH
;
306 /* Delayed conditional jump (bt or bf) */
307 static void gen_delayed_conditional_jump(DisasContext
* ctx
)
309 TCGLabel
*l1
= gen_new_label();
310 TCGv ds
= tcg_temp_new();
312 tcg_gen_mov_i32(ds
, cpu_delayed_cond
);
313 tcg_gen_discard_i32(cpu_delayed_cond
);
315 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
316 /* When in an exclusive region, we must continue to the end.
317 Therefore, exit the region on a taken branch, but otherwise
318 fall through to the next instruction. */
319 tcg_gen_brcondi_i32(TCG_COND_EQ
, ds
, 0, l1
);
321 /* Leave the gUSA region. */
322 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
& ~GUSA_MASK
);
329 tcg_gen_brcondi_i32(TCG_COND_NE
, ds
, 0, l1
);
330 gen_goto_tb(ctx
, 1, ctx
->pc
+ 2);
335 static inline void gen_load_fpr64(DisasContext
*ctx
, TCGv_i64 t
, int reg
)
337 /* We have already signaled illegal instruction for odd Dr. */
338 tcg_debug_assert((reg
& 1) == 0);
340 tcg_gen_concat_i32_i64(t
, cpu_fregs
[reg
+ 1], cpu_fregs
[reg
]);
343 static inline void gen_store_fpr64(DisasContext
*ctx
, TCGv_i64 t
, int reg
)
345 /* We have already signaled illegal instruction for odd Dr. */
346 tcg_debug_assert((reg
& 1) == 0);
348 tcg_gen_extr_i64_i32(cpu_fregs
[reg
+ 1], cpu_fregs
[reg
], t
);
351 #define B3_0 (ctx->opcode & 0xf)
352 #define B6_4 ((ctx->opcode >> 4) & 0x7)
353 #define B7_4 ((ctx->opcode >> 4) & 0xf)
354 #define B7_0 (ctx->opcode & 0xff)
355 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
356 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
357 (ctx->opcode & 0xfff))
358 #define B11_8 ((ctx->opcode >> 8) & 0xf)
359 #define B15_12 ((ctx->opcode >> 12) & 0xf)
361 #define REG(x) cpu_gregs[(x) ^ ctx->gbank]
362 #define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
363 #define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
365 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
367 #define CHECK_NOT_DELAY_SLOT \
368 if (ctx->envflags & DELAY_SLOT_MASK) { \
369 goto do_illegal_slot; \
372 #define CHECK_PRIVILEGED \
373 if (IS_USER(ctx)) { \
377 #define CHECK_FPU_ENABLED \
378 if (ctx->tbflags & (1u << SR_FD)) { \
379 goto do_fpu_disabled; \
382 #define CHECK_FPSCR_PR_0 \
383 if (ctx->tbflags & FPSCR_PR) { \
387 #define CHECK_FPSCR_PR_1 \
388 if (!(ctx->tbflags & FPSCR_PR)) { \
393 if (!(ctx->features & SH_FEATURE_SH4A)) { \
397 static void _decode_opc(DisasContext
* ctx
)
399 /* This code tries to make movcal emulation sufficiently
400 accurate for Linux purposes. This instruction writes
401 memory, and prior to that, always allocates a cache line.
402 It is used in two contexts:
403 - in memcpy, where data is copied in blocks, the first write
404 of to a block uses movca.l for performance.
405 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
406 to flush the cache. Here, the data written by movcal.l is never
407 written to memory, and the data written is just bogus.
409 To simulate this, we simulate movcal.l, we store the value to memory,
410 but we also remember the previous content. If we see ocbi, we check
411 if movcal.l for that address was done previously. If so, the write should
412 not have hit the memory, so we restore the previous content.
413 When we see an instruction that is neither movca.l
414 nor ocbi, the previous content is discarded.
416 To optimize, we only try to flush stores when we're at the start of
417 TB, or if we already saw movca.l in this TB and did not flush stores
421 int opcode
= ctx
->opcode
& 0xf0ff;
422 if (opcode
!= 0x0093 /* ocbi */
423 && opcode
!= 0x00c3 /* movca.l */)
425 gen_helper_discard_movcal_backup(cpu_env
);
431 fprintf(stderr
, "Translating opcode 0x%04x\n", ctx
->opcode
);
434 switch (ctx
->opcode
) {
435 case 0x0019: /* div0u */
436 tcg_gen_movi_i32(cpu_sr_m
, 0);
437 tcg_gen_movi_i32(cpu_sr_q
, 0);
438 tcg_gen_movi_i32(cpu_sr_t
, 0);
440 case 0x000b: /* rts */
442 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_pr
);
443 ctx
->envflags
|= DELAY_SLOT
;
444 ctx
->delayed_pc
= (uint32_t) - 1;
446 case 0x0028: /* clrmac */
447 tcg_gen_movi_i32(cpu_mach
, 0);
448 tcg_gen_movi_i32(cpu_macl
, 0);
450 case 0x0048: /* clrs */
451 tcg_gen_andi_i32(cpu_sr
, cpu_sr
, ~(1u << SR_S
));
453 case 0x0008: /* clrt */
454 tcg_gen_movi_i32(cpu_sr_t
, 0);
456 case 0x0038: /* ldtlb */
458 gen_helper_ldtlb(cpu_env
);
460 case 0x002b: /* rte */
463 gen_write_sr(cpu_ssr
);
464 tcg_gen_mov_i32(cpu_delayed_pc
, cpu_spc
);
465 ctx
->envflags
|= DELAY_SLOT_RTE
;
466 ctx
->delayed_pc
= (uint32_t) - 1;
467 ctx
->bstate
= BS_STOP
;
469 case 0x0058: /* sets */
470 tcg_gen_ori_i32(cpu_sr
, cpu_sr
, (1u << SR_S
));
472 case 0x0018: /* sett */
473 tcg_gen_movi_i32(cpu_sr_t
, 1);
475 case 0xfbfd: /* frchg */
476 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_FR
);
477 ctx
->bstate
= BS_STOP
;
479 case 0xf3fd: /* fschg */
480 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_SZ
);
481 ctx
->bstate
= BS_STOP
;
483 case 0xf7fd: /* fpchg */
485 tcg_gen_xori_i32(cpu_fpscr
, cpu_fpscr
, FPSCR_PR
);
486 ctx
->bstate
= BS_STOP
;
488 case 0x0009: /* nop */
490 case 0x001b: /* sleep */
492 tcg_gen_movi_i32(cpu_pc
, ctx
->pc
+ 2);
493 gen_helper_sleep(cpu_env
);
497 switch (ctx
->opcode
& 0xf000) {
498 case 0x1000: /* mov.l Rm,@(disp,Rn) */
500 TCGv addr
= tcg_temp_new();
501 tcg_gen_addi_i32(addr
, REG(B11_8
), B3_0
* 4);
502 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
506 case 0x5000: /* mov.l @(disp,Rm),Rn */
508 TCGv addr
= tcg_temp_new();
509 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 4);
510 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
514 case 0xe000: /* mov #imm,Rn */
515 #ifdef CONFIG_USER_ONLY
516 /* Detect the start of a gUSA region. If so, update envflags
517 and end the TB. This will allow us to see the end of the
518 region (stored in R0) in the next TB. */
519 if (B11_8
== 15 && B7_0s
< 0 && parallel_cpus
) {
520 ctx
->envflags
= deposit32(ctx
->envflags
, GUSA_SHIFT
, 8, B7_0s
);
521 ctx
->bstate
= BS_STOP
;
524 tcg_gen_movi_i32(REG(B11_8
), B7_0s
);
526 case 0x9000: /* mov.w @(disp,PC),Rn */
528 TCGv addr
= tcg_const_i32(ctx
->pc
+ 4 + B7_0
* 2);
529 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
533 case 0xd000: /* mov.l @(disp,PC),Rn */
535 TCGv addr
= tcg_const_i32((ctx
->pc
+ 4 + B7_0
* 4) & ~3);
536 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
540 case 0x7000: /* add #imm,Rn */
541 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), B7_0s
);
543 case 0xa000: /* bra disp */
545 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
546 ctx
->envflags
|= DELAY_SLOT
;
548 case 0xb000: /* bsr disp */
550 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
551 ctx
->delayed_pc
= ctx
->pc
+ 4 + B11_0s
* 2;
552 ctx
->envflags
|= DELAY_SLOT
;
556 switch (ctx
->opcode
& 0xf00f) {
557 case 0x6003: /* mov Rm,Rn */
558 tcg_gen_mov_i32(REG(B11_8
), REG(B7_4
));
560 case 0x2000: /* mov.b Rm,@Rn */
561 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_UB
);
563 case 0x2001: /* mov.w Rm,@Rn */
564 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUW
);
566 case 0x2002: /* mov.l Rm,@Rn */
567 tcg_gen_qemu_st_i32(REG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
569 case 0x6000: /* mov.b @Rm,Rn */
570 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
572 case 0x6001: /* mov.w @Rm,Rn */
573 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
575 case 0x6002: /* mov.l @Rm,Rn */
576 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
578 case 0x2004: /* mov.b Rm,@-Rn */
580 TCGv addr
= tcg_temp_new();
581 tcg_gen_subi_i32(addr
, REG(B11_8
), 1);
582 /* might cause re-execution */
583 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
584 tcg_gen_mov_i32(REG(B11_8
), addr
); /* modify register status */
588 case 0x2005: /* mov.w Rm,@-Rn */
590 TCGv addr
= tcg_temp_new();
591 tcg_gen_subi_i32(addr
, REG(B11_8
), 2);
592 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
593 tcg_gen_mov_i32(REG(B11_8
), addr
);
597 case 0x2006: /* mov.l Rm,@-Rn */
599 TCGv addr
= tcg_temp_new();
600 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
601 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
602 tcg_gen_mov_i32(REG(B11_8
), addr
);
605 case 0x6004: /* mov.b @Rm+,Rn */
606 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_SB
);
608 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 1);
610 case 0x6005: /* mov.w @Rm+,Rn */
611 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESW
);
613 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
615 case 0x6006: /* mov.l @Rm+,Rn */
616 tcg_gen_qemu_ld_i32(REG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TESL
);
618 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
620 case 0x0004: /* mov.b Rm,@(R0,Rn) */
622 TCGv addr
= tcg_temp_new();
623 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
624 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_UB
);
628 case 0x0005: /* mov.w Rm,@(R0,Rn) */
630 TCGv addr
= tcg_temp_new();
631 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
632 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUW
);
636 case 0x0006: /* mov.l Rm,@(R0,Rn) */
638 TCGv addr
= tcg_temp_new();
639 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
640 tcg_gen_qemu_st_i32(REG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
644 case 0x000c: /* mov.b @(R0,Rm),Rn */
646 TCGv addr
= tcg_temp_new();
647 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
648 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_SB
);
652 case 0x000d: /* mov.w @(R0,Rm),Rn */
654 TCGv addr
= tcg_temp_new();
655 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
656 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESW
);
660 case 0x000e: /* mov.l @(R0,Rm),Rn */
662 TCGv addr
= tcg_temp_new();
663 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
664 tcg_gen_qemu_ld_i32(REG(B11_8
), addr
, ctx
->memidx
, MO_TESL
);
668 case 0x6008: /* swap.b Rm,Rn */
670 TCGv low
= tcg_temp_new();;
671 tcg_gen_ext16u_i32(low
, REG(B7_4
));
672 tcg_gen_bswap16_i32(low
, low
);
673 tcg_gen_deposit_i32(REG(B11_8
), REG(B7_4
), low
, 0, 16);
677 case 0x6009: /* swap.w Rm,Rn */
678 tcg_gen_rotli_i32(REG(B11_8
), REG(B7_4
), 16);
680 case 0x200d: /* xtrct Rm,Rn */
683 high
= tcg_temp_new();
684 tcg_gen_shli_i32(high
, REG(B7_4
), 16);
685 low
= tcg_temp_new();
686 tcg_gen_shri_i32(low
, REG(B11_8
), 16);
687 tcg_gen_or_i32(REG(B11_8
), high
, low
);
692 case 0x300c: /* add Rm,Rn */
693 tcg_gen_add_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
695 case 0x300e: /* addc Rm,Rn */
698 t0
= tcg_const_tl(0);
700 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
701 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
702 REG(B11_8
), t0
, t1
, cpu_sr_t
);
707 case 0x300f: /* addv Rm,Rn */
711 tcg_gen_add_i32(t0
, REG(B7_4
), REG(B11_8
));
713 tcg_gen_xor_i32(t1
, t0
, REG(B11_8
));
715 tcg_gen_xor_i32(t2
, REG(B7_4
), REG(B11_8
));
716 tcg_gen_andc_i32(cpu_sr_t
, t1
, t2
);
718 tcg_gen_shri_i32(cpu_sr_t
, cpu_sr_t
, 31);
720 tcg_gen_mov_i32(REG(B7_4
), t0
);
724 case 0x2009: /* and Rm,Rn */
725 tcg_gen_and_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
727 case 0x3000: /* cmp/eq Rm,Rn */
728 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
730 case 0x3003: /* cmp/ge Rm,Rn */
731 tcg_gen_setcond_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
733 case 0x3007: /* cmp/gt Rm,Rn */
734 tcg_gen_setcond_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
736 case 0x3006: /* cmp/hi Rm,Rn */
737 tcg_gen_setcond_i32(TCG_COND_GTU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
739 case 0x3002: /* cmp/hs Rm,Rn */
740 tcg_gen_setcond_i32(TCG_COND_GEU
, cpu_sr_t
, REG(B11_8
), REG(B7_4
));
742 case 0x200c: /* cmp/str Rm,Rn */
744 TCGv cmp1
= tcg_temp_new();
745 TCGv cmp2
= tcg_temp_new();
746 tcg_gen_xor_i32(cmp2
, REG(B7_4
), REG(B11_8
));
747 tcg_gen_subi_i32(cmp1
, cmp2
, 0x01010101);
748 tcg_gen_andc_i32(cmp1
, cmp1
, cmp2
);
749 tcg_gen_andi_i32(cmp1
, cmp1
, 0x80808080);
750 tcg_gen_setcondi_i32(TCG_COND_NE
, cpu_sr_t
, cmp1
, 0);
755 case 0x2007: /* div0s Rm,Rn */
756 tcg_gen_shri_i32(cpu_sr_q
, REG(B11_8
), 31); /* SR_Q */
757 tcg_gen_shri_i32(cpu_sr_m
, REG(B7_4
), 31); /* SR_M */
758 tcg_gen_xor_i32(cpu_sr_t
, cpu_sr_q
, cpu_sr_m
); /* SR_T */
760 case 0x3004: /* div1 Rm,Rn */
762 TCGv t0
= tcg_temp_new();
763 TCGv t1
= tcg_temp_new();
764 TCGv t2
= tcg_temp_new();
765 TCGv zero
= tcg_const_i32(0);
767 /* shift left arg1, saving the bit being pushed out and inserting
769 tcg_gen_shri_i32(t0
, REG(B11_8
), 31);
770 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
771 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), cpu_sr_t
);
773 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
774 using 64-bit temps, we compute arg0's high part from q ^ m, so
775 that it is 0x00000000 when adding the value or 0xffffffff when
777 tcg_gen_xor_i32(t1
, cpu_sr_q
, cpu_sr_m
);
778 tcg_gen_subi_i32(t1
, t1
, 1);
779 tcg_gen_neg_i32(t2
, REG(B7_4
));
780 tcg_gen_movcond_i32(TCG_COND_EQ
, t2
, t1
, zero
, REG(B7_4
), t2
);
781 tcg_gen_add2_i32(REG(B11_8
), t1
, REG(B11_8
), zero
, t2
, t1
);
783 /* compute T and Q depending on carry */
784 tcg_gen_andi_i32(t1
, t1
, 1);
785 tcg_gen_xor_i32(t1
, t1
, t0
);
786 tcg_gen_xori_i32(cpu_sr_t
, t1
, 1);
787 tcg_gen_xor_i32(cpu_sr_q
, cpu_sr_m
, t1
);
795 case 0x300d: /* dmuls.l Rm,Rn */
796 tcg_gen_muls2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
798 case 0x3005: /* dmulu.l Rm,Rn */
799 tcg_gen_mulu2_i32(cpu_macl
, cpu_mach
, REG(B7_4
), REG(B11_8
));
801 case 0x600e: /* exts.b Rm,Rn */
802 tcg_gen_ext8s_i32(REG(B11_8
), REG(B7_4
));
804 case 0x600f: /* exts.w Rm,Rn */
805 tcg_gen_ext16s_i32(REG(B11_8
), REG(B7_4
));
807 case 0x600c: /* extu.b Rm,Rn */
808 tcg_gen_ext8u_i32(REG(B11_8
), REG(B7_4
));
810 case 0x600d: /* extu.w Rm,Rn */
811 tcg_gen_ext16u_i32(REG(B11_8
), REG(B7_4
));
813 case 0x000f: /* mac.l @Rm+,@Rn+ */
816 arg0
= tcg_temp_new();
817 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
818 arg1
= tcg_temp_new();
819 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
820 gen_helper_macl(cpu_env
, arg0
, arg1
);
823 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
824 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
827 case 0x400f: /* mac.w @Rm+,@Rn+ */
830 arg0
= tcg_temp_new();
831 tcg_gen_qemu_ld_i32(arg0
, REG(B7_4
), ctx
->memidx
, MO_TESL
);
832 arg1
= tcg_temp_new();
833 tcg_gen_qemu_ld_i32(arg1
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
834 gen_helper_macw(cpu_env
, arg0
, arg1
);
837 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 2);
838 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 2);
841 case 0x0007: /* mul.l Rm,Rn */
842 tcg_gen_mul_i32(cpu_macl
, REG(B7_4
), REG(B11_8
));
844 case 0x200f: /* muls.w Rm,Rn */
847 arg0
= tcg_temp_new();
848 tcg_gen_ext16s_i32(arg0
, REG(B7_4
));
849 arg1
= tcg_temp_new();
850 tcg_gen_ext16s_i32(arg1
, REG(B11_8
));
851 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
856 case 0x200e: /* mulu.w Rm,Rn */
859 arg0
= tcg_temp_new();
860 tcg_gen_ext16u_i32(arg0
, REG(B7_4
));
861 arg1
= tcg_temp_new();
862 tcg_gen_ext16u_i32(arg1
, REG(B11_8
));
863 tcg_gen_mul_i32(cpu_macl
, arg0
, arg1
);
868 case 0x600b: /* neg Rm,Rn */
869 tcg_gen_neg_i32(REG(B11_8
), REG(B7_4
));
871 case 0x600a: /* negc Rm,Rn */
873 TCGv t0
= tcg_const_i32(0);
874 tcg_gen_add2_i32(REG(B11_8
), cpu_sr_t
,
875 REG(B7_4
), t0
, cpu_sr_t
, t0
);
876 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
877 t0
, t0
, REG(B11_8
), cpu_sr_t
);
878 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
882 case 0x6007: /* not Rm,Rn */
883 tcg_gen_not_i32(REG(B11_8
), REG(B7_4
));
885 case 0x200b: /* or Rm,Rn */
886 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
888 case 0x400c: /* shad Rm,Rn */
890 TCGv t0
= tcg_temp_new();
891 TCGv t1
= tcg_temp_new();
892 TCGv t2
= tcg_temp_new();
894 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
896 /* positive case: shift to the left */
897 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
899 /* negative case: shift to the right in two steps to
900 correctly handle the -32 case */
901 tcg_gen_xori_i32(t0
, t0
, 0x1f);
902 tcg_gen_sar_i32(t2
, REG(B11_8
), t0
);
903 tcg_gen_sari_i32(t2
, t2
, 1);
905 /* select between the two cases */
906 tcg_gen_movi_i32(t0
, 0);
907 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
914 case 0x400d: /* shld Rm,Rn */
916 TCGv t0
= tcg_temp_new();
917 TCGv t1
= tcg_temp_new();
918 TCGv t2
= tcg_temp_new();
920 tcg_gen_andi_i32(t0
, REG(B7_4
), 0x1f);
922 /* positive case: shift to the left */
923 tcg_gen_shl_i32(t1
, REG(B11_8
), t0
);
925 /* negative case: shift to the right in two steps to
926 correctly handle the -32 case */
927 tcg_gen_xori_i32(t0
, t0
, 0x1f);
928 tcg_gen_shr_i32(t2
, REG(B11_8
), t0
);
929 tcg_gen_shri_i32(t2
, t2
, 1);
931 /* select between the two cases */
932 tcg_gen_movi_i32(t0
, 0);
933 tcg_gen_movcond_i32(TCG_COND_GE
, REG(B11_8
), REG(B7_4
), t0
, t1
, t2
);
940 case 0x3008: /* sub Rm,Rn */
941 tcg_gen_sub_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
943 case 0x300a: /* subc Rm,Rn */
946 t0
= tcg_const_tl(0);
948 tcg_gen_add2_i32(t1
, cpu_sr_t
, cpu_sr_t
, t0
, REG(B7_4
), t0
);
949 tcg_gen_sub2_i32(REG(B11_8
), cpu_sr_t
,
950 REG(B11_8
), t0
, t1
, cpu_sr_t
);
951 tcg_gen_andi_i32(cpu_sr_t
, cpu_sr_t
, 1);
956 case 0x300b: /* subv Rm,Rn */
960 tcg_gen_sub_i32(t0
, REG(B11_8
), REG(B7_4
));
962 tcg_gen_xor_i32(t1
, t0
, REG(B7_4
));
964 tcg_gen_xor_i32(t2
, REG(B11_8
), REG(B7_4
));
965 tcg_gen_and_i32(t1
, t1
, t2
);
967 tcg_gen_shri_i32(cpu_sr_t
, t1
, 31);
969 tcg_gen_mov_i32(REG(B11_8
), t0
);
973 case 0x2008: /* tst Rm,Rn */
975 TCGv val
= tcg_temp_new();
976 tcg_gen_and_i32(val
, REG(B7_4
), REG(B11_8
));
977 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
981 case 0x200a: /* xor Rm,Rn */
982 tcg_gen_xor_i32(REG(B11_8
), REG(B11_8
), REG(B7_4
));
984 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
986 if (ctx
->tbflags
& FPSCR_SZ
) {
987 int xsrc
= XHACK(B7_4
);
988 int xdst
= XHACK(B11_8
);
989 tcg_gen_mov_i32(FREG(xdst
), FREG(xsrc
));
990 tcg_gen_mov_i32(FREG(xdst
+ 1), FREG(xsrc
+ 1));
992 tcg_gen_mov_i32(FREG(B11_8
), FREG(B7_4
));
995 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
997 if (ctx
->tbflags
& FPSCR_SZ
) {
998 TCGv_i64 fp
= tcg_temp_new_i64();
999 gen_load_fpr64(ctx
, fp
, XHACK(B7_4
));
1000 tcg_gen_qemu_st_i64(fp
, REG(B11_8
), ctx
->memidx
, MO_TEQ
);
1001 tcg_temp_free_i64(fp
);
1003 tcg_gen_qemu_st_i32(FREG(B7_4
), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1006 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1008 if (ctx
->tbflags
& FPSCR_SZ
) {
1009 TCGv_i64 fp
= tcg_temp_new_i64();
1010 tcg_gen_qemu_ld_i64(fp
, REG(B7_4
), ctx
->memidx
, MO_TEQ
);
1011 gen_store_fpr64(ctx
, fp
, XHACK(B11_8
));
1012 tcg_temp_free_i64(fp
);
1014 tcg_gen_qemu_ld_i32(FREG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1017 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1019 if (ctx
->tbflags
& FPSCR_SZ
) {
1020 TCGv_i64 fp
= tcg_temp_new_i64();
1021 tcg_gen_qemu_ld_i64(fp
, REG(B7_4
), ctx
->memidx
, MO_TEQ
);
1022 gen_store_fpr64(ctx
, fp
, XHACK(B11_8
));
1023 tcg_temp_free_i64(fp
);
1024 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 8);
1026 tcg_gen_qemu_ld_i32(FREG(B11_8
), REG(B7_4
), ctx
->memidx
, MO_TEUL
);
1027 tcg_gen_addi_i32(REG(B7_4
), REG(B7_4
), 4);
1030 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1033 TCGv addr
= tcg_temp_new_i32();
1034 if (ctx
->tbflags
& FPSCR_SZ
) {
1035 TCGv_i64 fp
= tcg_temp_new_i64();
1036 gen_load_fpr64(ctx
, fp
, XHACK(B7_4
));
1037 tcg_gen_subi_i32(addr
, REG(B11_8
), 8);
1038 tcg_gen_qemu_st_i64(fp
, addr
, ctx
->memidx
, MO_TEQ
);
1039 tcg_temp_free_i64(fp
);
1041 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1042 tcg_gen_qemu_st_i32(FREG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
1044 tcg_gen_mov_i32(REG(B11_8
), addr
);
1045 tcg_temp_free(addr
);
1048 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1051 TCGv addr
= tcg_temp_new_i32();
1052 tcg_gen_add_i32(addr
, REG(B7_4
), REG(0));
1053 if (ctx
->tbflags
& FPSCR_SZ
) {
1054 TCGv_i64 fp
= tcg_temp_new_i64();
1055 tcg_gen_qemu_ld_i64(fp
, addr
, ctx
->memidx
, MO_TEQ
);
1056 gen_store_fpr64(ctx
, fp
, XHACK(B11_8
));
1057 tcg_temp_free_i64(fp
);
1059 tcg_gen_qemu_ld_i32(FREG(B11_8
), addr
, ctx
->memidx
, MO_TEUL
);
1061 tcg_temp_free(addr
);
1064 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1067 TCGv addr
= tcg_temp_new();
1068 tcg_gen_add_i32(addr
, REG(B11_8
), REG(0));
1069 if (ctx
->tbflags
& FPSCR_SZ
) {
1070 TCGv_i64 fp
= tcg_temp_new_i64();
1071 gen_load_fpr64(ctx
, fp
, XHACK(B7_4
));
1072 tcg_gen_qemu_st_i64(fp
, addr
, ctx
->memidx
, MO_TEQ
);
1073 tcg_temp_free_i64(fp
);
1075 tcg_gen_qemu_st_i32(FREG(B7_4
), addr
, ctx
->memidx
, MO_TEUL
);
1077 tcg_temp_free(addr
);
1080 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1081 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1082 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1085 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1088 if (ctx
->tbflags
& FPSCR_PR
) {
1091 if (ctx
->opcode
& 0x0110) {
1094 fp0
= tcg_temp_new_i64();
1095 fp1
= tcg_temp_new_i64();
1096 gen_load_fpr64(ctx
, fp0
, B11_8
);
1097 gen_load_fpr64(ctx
, fp1
, B7_4
);
1098 switch (ctx
->opcode
& 0xf00f) {
1099 case 0xf000: /* fadd Rm,Rn */
1100 gen_helper_fadd_DT(fp0
, cpu_env
, fp0
, fp1
);
1102 case 0xf001: /* fsub Rm,Rn */
1103 gen_helper_fsub_DT(fp0
, cpu_env
, fp0
, fp1
);
1105 case 0xf002: /* fmul Rm,Rn */
1106 gen_helper_fmul_DT(fp0
, cpu_env
, fp0
, fp1
);
1108 case 0xf003: /* fdiv Rm,Rn */
1109 gen_helper_fdiv_DT(fp0
, cpu_env
, fp0
, fp1
);
1111 case 0xf004: /* fcmp/eq Rm,Rn */
1112 gen_helper_fcmp_eq_DT(cpu_sr_t
, cpu_env
, fp0
, fp1
);
1114 case 0xf005: /* fcmp/gt Rm,Rn */
1115 gen_helper_fcmp_gt_DT(cpu_sr_t
, cpu_env
, fp0
, fp1
);
1118 gen_store_fpr64(ctx
, fp0
, B11_8
);
1119 tcg_temp_free_i64(fp0
);
1120 tcg_temp_free_i64(fp1
);
1122 switch (ctx
->opcode
& 0xf00f) {
1123 case 0xf000: /* fadd Rm,Rn */
1124 gen_helper_fadd_FT(FREG(B11_8
), cpu_env
,
1125 FREG(B11_8
), FREG(B7_4
));
1127 case 0xf001: /* fsub Rm,Rn */
1128 gen_helper_fsub_FT(FREG(B11_8
), cpu_env
,
1129 FREG(B11_8
), FREG(B7_4
));
1131 case 0xf002: /* fmul Rm,Rn */
1132 gen_helper_fmul_FT(FREG(B11_8
), cpu_env
,
1133 FREG(B11_8
), FREG(B7_4
));
1135 case 0xf003: /* fdiv Rm,Rn */
1136 gen_helper_fdiv_FT(FREG(B11_8
), cpu_env
,
1137 FREG(B11_8
), FREG(B7_4
));
1139 case 0xf004: /* fcmp/eq Rm,Rn */
1140 gen_helper_fcmp_eq_FT(cpu_sr_t
, cpu_env
,
1141 FREG(B11_8
), FREG(B7_4
));
1143 case 0xf005: /* fcmp/gt Rm,Rn */
1144 gen_helper_fcmp_gt_FT(cpu_sr_t
, cpu_env
,
1145 FREG(B11_8
), FREG(B7_4
));
1151 case 0xf00e: /* fmac FR0,RM,Rn */
1154 gen_helper_fmac_FT(FREG(B11_8
), cpu_env
,
1155 FREG(0), FREG(B7_4
), FREG(B11_8
));
1159 switch (ctx
->opcode
& 0xff00) {
1160 case 0xc900: /* and #imm,R0 */
1161 tcg_gen_andi_i32(REG(0), REG(0), B7_0
);
1163 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1166 addr
= tcg_temp_new();
1167 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1168 val
= tcg_temp_new();
1169 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1170 tcg_gen_andi_i32(val
, val
, B7_0
);
1171 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1173 tcg_temp_free(addr
);
1176 case 0x8b00: /* bf label */
1177 CHECK_NOT_DELAY_SLOT
1178 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2, false);
1180 case 0x8f00: /* bf/s label */
1181 CHECK_NOT_DELAY_SLOT
1182 tcg_gen_xori_i32(cpu_delayed_cond
, cpu_sr_t
, 1);
1183 ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2;
1184 ctx
->envflags
|= DELAY_SLOT_CONDITIONAL
;
1186 case 0x8900: /* bt label */
1187 CHECK_NOT_DELAY_SLOT
1188 gen_conditional_jump(ctx
, ctx
->pc
+ 4 + B7_0s
* 2, true);
1190 case 0x8d00: /* bt/s label */
1191 CHECK_NOT_DELAY_SLOT
1192 tcg_gen_mov_i32(cpu_delayed_cond
, cpu_sr_t
);
1193 ctx
->delayed_pc
= ctx
->pc
+ 4 + B7_0s
* 2;
1194 ctx
->envflags
|= DELAY_SLOT_CONDITIONAL
;
1196 case 0x8800: /* cmp/eq #imm,R0 */
1197 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(0), B7_0s
);
1199 case 0xc400: /* mov.b @(disp,GBR),R0 */
1201 TCGv addr
= tcg_temp_new();
1202 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1203 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1204 tcg_temp_free(addr
);
1207 case 0xc500: /* mov.w @(disp,GBR),R0 */
1209 TCGv addr
= tcg_temp_new();
1210 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1211 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1212 tcg_temp_free(addr
);
1215 case 0xc600: /* mov.l @(disp,GBR),R0 */
1217 TCGv addr
= tcg_temp_new();
1218 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1219 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESL
);
1220 tcg_temp_free(addr
);
1223 case 0xc000: /* mov.b R0,@(disp,GBR) */
1225 TCGv addr
= tcg_temp_new();
1226 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
);
1227 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1228 tcg_temp_free(addr
);
1231 case 0xc100: /* mov.w R0,@(disp,GBR) */
1233 TCGv addr
= tcg_temp_new();
1234 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 2);
1235 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1236 tcg_temp_free(addr
);
1239 case 0xc200: /* mov.l R0,@(disp,GBR) */
1241 TCGv addr
= tcg_temp_new();
1242 tcg_gen_addi_i32(addr
, cpu_gbr
, B7_0
* 4);
1243 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUL
);
1244 tcg_temp_free(addr
);
1247 case 0x8000: /* mov.b R0,@(disp,Rn) */
1249 TCGv addr
= tcg_temp_new();
1250 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1251 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_UB
);
1252 tcg_temp_free(addr
);
1255 case 0x8100: /* mov.w R0,@(disp,Rn) */
1257 TCGv addr
= tcg_temp_new();
1258 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1259 tcg_gen_qemu_st_i32(REG(0), addr
, ctx
->memidx
, MO_TEUW
);
1260 tcg_temp_free(addr
);
1263 case 0x8400: /* mov.b @(disp,Rn),R0 */
1265 TCGv addr
= tcg_temp_new();
1266 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
);
1267 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_SB
);
1268 tcg_temp_free(addr
);
1271 case 0x8500: /* mov.w @(disp,Rn),R0 */
1273 TCGv addr
= tcg_temp_new();
1274 tcg_gen_addi_i32(addr
, REG(B7_4
), B3_0
* 2);
1275 tcg_gen_qemu_ld_i32(REG(0), addr
, ctx
->memidx
, MO_TESW
);
1276 tcg_temp_free(addr
);
1279 case 0xc700: /* mova @(disp,PC),R0 */
1280 tcg_gen_movi_i32(REG(0), ((ctx
->pc
& 0xfffffffc) + 4 + B7_0
* 4) & ~3);
1282 case 0xcb00: /* or #imm,R0 */
1283 tcg_gen_ori_i32(REG(0), REG(0), B7_0
);
1285 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1288 addr
= tcg_temp_new();
1289 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1290 val
= tcg_temp_new();
1291 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1292 tcg_gen_ori_i32(val
, val
, B7_0
);
1293 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1295 tcg_temp_free(addr
);
1298 case 0xc300: /* trapa #imm */
1301 CHECK_NOT_DELAY_SLOT
1302 gen_save_cpu_state(ctx
, true);
1303 imm
= tcg_const_i32(B7_0
);
1304 gen_helper_trapa(cpu_env
, imm
);
1306 ctx
->bstate
= BS_EXCP
;
1309 case 0xc800: /* tst #imm,R0 */
1311 TCGv val
= tcg_temp_new();
1312 tcg_gen_andi_i32(val
, REG(0), B7_0
);
1313 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1317 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1319 TCGv val
= tcg_temp_new();
1320 tcg_gen_add_i32(val
, REG(0), cpu_gbr
);
1321 tcg_gen_qemu_ld_i32(val
, val
, ctx
->memidx
, MO_UB
);
1322 tcg_gen_andi_i32(val
, val
, B7_0
);
1323 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1327 case 0xca00: /* xor #imm,R0 */
1328 tcg_gen_xori_i32(REG(0), REG(0), B7_0
);
1330 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1333 addr
= tcg_temp_new();
1334 tcg_gen_add_i32(addr
, REG(0), cpu_gbr
);
1335 val
= tcg_temp_new();
1336 tcg_gen_qemu_ld_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1337 tcg_gen_xori_i32(val
, val
, B7_0
);
1338 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_UB
);
1340 tcg_temp_free(addr
);
1345 switch (ctx
->opcode
& 0xf08f) {
1346 case 0x408e: /* ldc Rm,Rn_BANK */
1348 tcg_gen_mov_i32(ALTREG(B6_4
), REG(B11_8
));
1350 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1352 tcg_gen_qemu_ld_i32(ALTREG(B6_4
), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1353 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1355 case 0x0082: /* stc Rm_BANK,Rn */
1357 tcg_gen_mov_i32(REG(B11_8
), ALTREG(B6_4
));
1359 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1362 TCGv addr
= tcg_temp_new();
1363 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1364 tcg_gen_qemu_st_i32(ALTREG(B6_4
), addr
, ctx
->memidx
, MO_TEUL
);
1365 tcg_gen_mov_i32(REG(B11_8
), addr
);
1366 tcg_temp_free(addr
);
1371 switch (ctx
->opcode
& 0xf0ff) {
1372 case 0x0023: /* braf Rn */
1373 CHECK_NOT_DELAY_SLOT
1374 tcg_gen_addi_i32(cpu_delayed_pc
, REG(B11_8
), ctx
->pc
+ 4);
1375 ctx
->envflags
|= DELAY_SLOT
;
1376 ctx
->delayed_pc
= (uint32_t) - 1;
1378 case 0x0003: /* bsrf Rn */
1379 CHECK_NOT_DELAY_SLOT
1380 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1381 tcg_gen_add_i32(cpu_delayed_pc
, REG(B11_8
), cpu_pr
);
1382 ctx
->envflags
|= DELAY_SLOT
;
1383 ctx
->delayed_pc
= (uint32_t) - 1;
1385 case 0x4015: /* cmp/pl Rn */
1386 tcg_gen_setcondi_i32(TCG_COND_GT
, cpu_sr_t
, REG(B11_8
), 0);
1388 case 0x4011: /* cmp/pz Rn */
1389 tcg_gen_setcondi_i32(TCG_COND_GE
, cpu_sr_t
, REG(B11_8
), 0);
1391 case 0x4010: /* dt Rn */
1392 tcg_gen_subi_i32(REG(B11_8
), REG(B11_8
), 1);
1393 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, REG(B11_8
), 0);
1395 case 0x402b: /* jmp @Rn */
1396 CHECK_NOT_DELAY_SLOT
1397 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1398 ctx
->envflags
|= DELAY_SLOT
;
1399 ctx
->delayed_pc
= (uint32_t) - 1;
1401 case 0x400b: /* jsr @Rn */
1402 CHECK_NOT_DELAY_SLOT
1403 tcg_gen_movi_i32(cpu_pr
, ctx
->pc
+ 4);
1404 tcg_gen_mov_i32(cpu_delayed_pc
, REG(B11_8
));
1405 ctx
->envflags
|= DELAY_SLOT
;
1406 ctx
->delayed_pc
= (uint32_t) - 1;
1408 case 0x400e: /* ldc Rm,SR */
1411 TCGv val
= tcg_temp_new();
1412 tcg_gen_andi_i32(val
, REG(B11_8
), 0x700083f3);
1415 ctx
->bstate
= BS_STOP
;
1418 case 0x4007: /* ldc.l @Rm+,SR */
1421 TCGv val
= tcg_temp_new();
1422 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1423 tcg_gen_andi_i32(val
, val
, 0x700083f3);
1426 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1427 ctx
->bstate
= BS_STOP
;
1430 case 0x0002: /* stc SR,Rn */
1432 gen_read_sr(REG(B11_8
));
1434 case 0x4003: /* stc SR,@-Rn */
1437 TCGv addr
= tcg_temp_new();
1438 TCGv val
= tcg_temp_new();
1439 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1441 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1442 tcg_gen_mov_i32(REG(B11_8
), addr
);
1444 tcg_temp_free(addr
);
1447 #define LD(reg,ldnum,ldpnum,prechk) \
1450 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1454 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1455 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1457 #define ST(reg,stnum,stpnum,prechk) \
1460 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1465 TCGv addr = tcg_temp_new(); \
1466 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1467 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1468 tcg_gen_mov_i32(REG(B11_8), addr); \
1469 tcg_temp_free(addr); \
1472 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1473 LD(reg,ldnum,ldpnum,prechk) \
1474 ST(reg,stnum,stpnum,prechk)
1475 LDST(gbr
, 0x401e, 0x4017, 0x0012, 0x4013, {})
1476 LDST(vbr
, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED
)
1477 LDST(ssr
, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED
)
1478 LDST(spc
, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED
)
1479 ST(sgr
, 0x003a, 0x4032, CHECK_PRIVILEGED
)
1480 LD(sgr
, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A
)
1481 LDST(dbr
, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED
)
1482 LDST(mach
, 0x400a, 0x4006, 0x000a, 0x4002, {})
1483 LDST(macl
, 0x401a, 0x4016, 0x001a, 0x4012, {})
1484 LDST(pr
, 0x402a, 0x4026, 0x002a, 0x4022, {})
1485 LDST(fpul
, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED
})
1486 case 0x406a: /* lds Rm,FPSCR */
1488 gen_helper_ld_fpscr(cpu_env
, REG(B11_8
));
1489 ctx
->bstate
= BS_STOP
;
1491 case 0x4066: /* lds.l @Rm+,FPSCR */
1494 TCGv addr
= tcg_temp_new();
1495 tcg_gen_qemu_ld_i32(addr
, REG(B11_8
), ctx
->memidx
, MO_TESL
);
1496 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1497 gen_helper_ld_fpscr(cpu_env
, addr
);
1498 tcg_temp_free(addr
);
1499 ctx
->bstate
= BS_STOP
;
1502 case 0x006a: /* sts FPSCR,Rn */
1504 tcg_gen_andi_i32(REG(B11_8
), cpu_fpscr
, 0x003fffff);
1506 case 0x4062: /* sts FPSCR,@-Rn */
1510 val
= tcg_temp_new();
1511 tcg_gen_andi_i32(val
, cpu_fpscr
, 0x003fffff);
1512 addr
= tcg_temp_new();
1513 tcg_gen_subi_i32(addr
, REG(B11_8
), 4);
1514 tcg_gen_qemu_st_i32(val
, addr
, ctx
->memidx
, MO_TEUL
);
1515 tcg_gen_mov_i32(REG(B11_8
), addr
);
1516 tcg_temp_free(addr
);
1520 case 0x00c3: /* movca.l R0,@Rm */
1522 TCGv val
= tcg_temp_new();
1523 tcg_gen_qemu_ld_i32(val
, REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1524 gen_helper_movcal(cpu_env
, REG(B11_8
), val
);
1525 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1527 ctx
->has_movcal
= 1;
1529 case 0x40a9: /* movua.l @Rm,R0 */
1531 /* Load non-boundary-aligned data */
1532 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
,
1533 MO_TEUL
| MO_UNALN
);
1536 case 0x40e9: /* movua.l @Rm+,R0 */
1538 /* Load non-boundary-aligned data */
1539 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
,
1540 MO_TEUL
| MO_UNALN
);
1541 tcg_gen_addi_i32(REG(B11_8
), REG(B11_8
), 4);
1544 case 0x0029: /* movt Rn */
1545 tcg_gen_mov_i32(REG(B11_8
), cpu_sr_t
);
1550 If (T == 1) R0 -> (Rn)
1555 TCGLabel
*label
= gen_new_label();
1556 tcg_gen_mov_i32(cpu_sr_t
, cpu_ldst
);
1557 tcg_gen_brcondi_i32(TCG_COND_EQ
, cpu_ldst
, 0, label
);
1558 tcg_gen_qemu_st_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TEUL
);
1559 gen_set_label(label
);
1560 tcg_gen_movi_i32(cpu_ldst
, 0);
1567 When interrupt/exception
1571 tcg_gen_movi_i32(cpu_ldst
, 0);
1572 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8
), ctx
->memidx
, MO_TESL
);
1573 tcg_gen_movi_i32(cpu_ldst
, 1);
1575 case 0x0093: /* ocbi @Rn */
1577 gen_helper_ocbi(cpu_env
, REG(B11_8
));
1580 case 0x00a3: /* ocbp @Rn */
1581 case 0x00b3: /* ocbwb @Rn */
1582 /* These instructions are supposed to do nothing in case of
1583 a cache miss. Given that we only partially emulate caches
1584 it is safe to simply ignore them. */
1586 case 0x0083: /* pref @Rn */
1588 case 0x00d3: /* prefi @Rn */
1591 case 0x00e3: /* icbi @Rn */
1594 case 0x00ab: /* synco */
1596 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
1599 case 0x4024: /* rotcl Rn */
1601 TCGv tmp
= tcg_temp_new();
1602 tcg_gen_mov_i32(tmp
, cpu_sr_t
);
1603 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1604 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1605 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1609 case 0x4025: /* rotcr Rn */
1611 TCGv tmp
= tcg_temp_new();
1612 tcg_gen_shli_i32(tmp
, cpu_sr_t
, 31);
1613 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1614 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1615 tcg_gen_or_i32(REG(B11_8
), REG(B11_8
), tmp
);
1619 case 0x4004: /* rotl Rn */
1620 tcg_gen_rotli_i32(REG(B11_8
), REG(B11_8
), 1);
1621 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1623 case 0x4005: /* rotr Rn */
1624 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 0);
1625 tcg_gen_rotri_i32(REG(B11_8
), REG(B11_8
), 1);
1627 case 0x4000: /* shll Rn */
1628 case 0x4020: /* shal Rn */
1629 tcg_gen_shri_i32(cpu_sr_t
, REG(B11_8
), 31);
1630 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 1);
1632 case 0x4021: /* shar Rn */
1633 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1634 tcg_gen_sari_i32(REG(B11_8
), REG(B11_8
), 1);
1636 case 0x4001: /* shlr Rn */
1637 tcg_gen_andi_i32(cpu_sr_t
, REG(B11_8
), 1);
1638 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 1);
1640 case 0x4008: /* shll2 Rn */
1641 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 2);
1643 case 0x4018: /* shll8 Rn */
1644 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 8);
1646 case 0x4028: /* shll16 Rn */
1647 tcg_gen_shli_i32(REG(B11_8
), REG(B11_8
), 16);
1649 case 0x4009: /* shlr2 Rn */
1650 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 2);
1652 case 0x4019: /* shlr8 Rn */
1653 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 8);
1655 case 0x4029: /* shlr16 Rn */
1656 tcg_gen_shri_i32(REG(B11_8
), REG(B11_8
), 16);
1658 case 0x401b: /* tas.b @Rn */
1660 TCGv val
= tcg_const_i32(0x80);
1661 tcg_gen_atomic_fetch_or_i32(val
, REG(B11_8
), val
,
1662 ctx
->memidx
, MO_UB
);
1663 tcg_gen_setcondi_i32(TCG_COND_EQ
, cpu_sr_t
, val
, 0);
1667 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1669 tcg_gen_mov_i32(FREG(B11_8
), cpu_fpul
);
1671 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1673 tcg_gen_mov_i32(cpu_fpul
, FREG(B11_8
));
1675 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1677 if (ctx
->tbflags
& FPSCR_PR
) {
1679 if (ctx
->opcode
& 0x0100) {
1682 fp
= tcg_temp_new_i64();
1683 gen_helper_float_DT(fp
, cpu_env
, cpu_fpul
);
1684 gen_store_fpr64(ctx
, fp
, B11_8
);
1685 tcg_temp_free_i64(fp
);
1688 gen_helper_float_FT(FREG(B11_8
), cpu_env
, cpu_fpul
);
1691 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1693 if (ctx
->tbflags
& FPSCR_PR
) {
1695 if (ctx
->opcode
& 0x0100) {
1698 fp
= tcg_temp_new_i64();
1699 gen_load_fpr64(ctx
, fp
, B11_8
);
1700 gen_helper_ftrc_DT(cpu_fpul
, cpu_env
, fp
);
1701 tcg_temp_free_i64(fp
);
1704 gen_helper_ftrc_FT(cpu_fpul
, cpu_env
, FREG(B11_8
));
1707 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1709 tcg_gen_xori_i32(FREG(B11_8
), FREG(B11_8
), 0x80000000);
1711 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1713 tcg_gen_andi_i32(FREG(B11_8
), FREG(B11_8
), 0x7fffffff);
1715 case 0xf06d: /* fsqrt FRn */
1717 if (ctx
->tbflags
& FPSCR_PR
) {
1718 if (ctx
->opcode
& 0x0100) {
1721 TCGv_i64 fp
= tcg_temp_new_i64();
1722 gen_load_fpr64(ctx
, fp
, B11_8
);
1723 gen_helper_fsqrt_DT(fp
, cpu_env
, fp
);
1724 gen_store_fpr64(ctx
, fp
, B11_8
);
1725 tcg_temp_free_i64(fp
);
1727 gen_helper_fsqrt_FT(FREG(B11_8
), cpu_env
, FREG(B11_8
));
1730 case 0xf07d: /* fsrra FRn */
1733 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1736 tcg_gen_movi_i32(FREG(B11_8
), 0);
1738 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1741 tcg_gen_movi_i32(FREG(B11_8
), 0x3f800000);
1743 case 0xf0ad: /* fcnvsd FPUL,DRn */
1746 TCGv_i64 fp
= tcg_temp_new_i64();
1747 gen_helper_fcnvsd_FT_DT(fp
, cpu_env
, cpu_fpul
);
1748 gen_store_fpr64(ctx
, fp
, B11_8
);
1749 tcg_temp_free_i64(fp
);
1752 case 0xf0bd: /* fcnvds DRn,FPUL */
1755 TCGv_i64 fp
= tcg_temp_new_i64();
1756 gen_load_fpr64(ctx
, fp
, B11_8
);
1757 gen_helper_fcnvds_DT_FT(cpu_fpul
, cpu_env
, fp
);
1758 tcg_temp_free_i64(fp
);
1761 case 0xf0ed: /* fipr FVm,FVn */
1765 TCGv m
= tcg_const_i32((ctx
->opcode
>> 8) & 3);
1766 TCGv n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1767 gen_helper_fipr(cpu_env
, m
, n
);
1773 case 0xf0fd: /* ftrv XMTRX,FVn */
1777 if ((ctx
->opcode
& 0x0300) != 0x0100) {
1780 TCGv n
= tcg_const_i32((ctx
->opcode
>> 10) & 3);
1781 gen_helper_ftrv(cpu_env
, n
);
1788 fprintf(stderr
, "unknown instruction 0x%04x at pc 0x%08x\n",
1789 ctx
->opcode
, ctx
->pc
);
1793 if (ctx
->envflags
& DELAY_SLOT_MASK
) {
1795 gen_save_cpu_state(ctx
, true);
1796 gen_helper_raise_slot_illegal_instruction(cpu_env
);
1798 gen_save_cpu_state(ctx
, true);
1799 gen_helper_raise_illegal_instruction(cpu_env
);
1801 ctx
->bstate
= BS_EXCP
;
1805 gen_save_cpu_state(ctx
, true);
1806 if (ctx
->envflags
& DELAY_SLOT_MASK
) {
1807 gen_helper_raise_slot_fpu_disable(cpu_env
);
1809 gen_helper_raise_fpu_disable(cpu_env
);
1811 ctx
->bstate
= BS_EXCP
;
1815 static void decode_opc(DisasContext
* ctx
)
1817 uint32_t old_flags
= ctx
->envflags
;
1821 if (old_flags
& DELAY_SLOT_MASK
) {
1822 /* go out of the delay slot */
1823 ctx
->envflags
&= ~DELAY_SLOT_MASK
;
1825 /* When in an exclusive region, we must continue to the end
1826 for conditional branches. */
1827 if (ctx
->tbflags
& GUSA_EXCLUSIVE
1828 && old_flags
& DELAY_SLOT_CONDITIONAL
) {
1829 gen_delayed_conditional_jump(ctx
);
1832 /* Otherwise this is probably an invalid gUSA region.
1833 Drop the GUSA bits so the next TB doesn't see them. */
1834 ctx
->envflags
&= ~GUSA_MASK
;
1836 tcg_gen_movi_i32(cpu_flags
, ctx
->envflags
);
1837 ctx
->bstate
= BS_BRANCH
;
1838 if (old_flags
& DELAY_SLOT_CONDITIONAL
) {
1839 gen_delayed_conditional_jump(ctx
);
1846 #ifdef CONFIG_USER_ONLY
1847 /* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1848 Upon an interrupt, a real kernel would simply notice magic values in
1849 the registers and reset the PC to the start of the sequence.
1851 For QEMU, we cannot do this in quite the same way. Instead, we notice
1852 the normal start of such a sequence (mov #-x,r15). While we can handle
1853 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1854 sequences and transform them into atomic operations as seen by the host.
1856 static int decode_gusa(DisasContext
*ctx
, CPUSH4State
*env
, int *pmax_insns
)
1859 int ld_adr
, ld_dst
, ld_mop
;
1860 int op_dst
, op_src
, op_opc
;
1861 int mv_src
, mt_dst
, st_src
, st_mop
;
1864 uint32_t pc
= ctx
->pc
;
1865 uint32_t pc_end
= ctx
->tb
->cs_base
;
1866 int backup
= sextract32(ctx
->tbflags
, GUSA_SHIFT
, 8);
1867 int max_insns
= (pc_end
- pc
) / 2;
1870 if (pc
!= pc_end
+ backup
|| max_insns
< 2) {
1871 /* This is a malformed gUSA region. Don't do anything special,
1872 since the interpreter is likely to get confused. */
1873 ctx
->envflags
&= ~GUSA_MASK
;
1877 if (ctx
->tbflags
& GUSA_EXCLUSIVE
) {
1878 /* Regardless of single-stepping or the end of the page,
1879 we must complete execution of the gUSA region while
1880 holding the exclusive lock. */
1881 *pmax_insns
= max_insns
;
1885 /* The state machine below will consume only a few insns.
1886 If there are more than that in a region, fail now. */
1887 if (max_insns
> ARRAY_SIZE(insns
)) {
1891 /* Read all of the insns for the region. */
1892 for (i
= 0; i
< max_insns
; ++i
) {
1893 insns
[i
] = cpu_lduw_code(env
, pc
+ i
* 2);
1896 ld_adr
= ld_dst
= ld_mop
= -1;
1898 op_dst
= op_src
= op_opc
= -1;
1900 st_src
= st_mop
= -1;
1901 TCGV_UNUSED(op_arg
);
1905 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1908 * Expect a load to begin the region.
1911 switch (ctx
->opcode
& 0xf00f) {
1912 case 0x6000: /* mov.b @Rm,Rn */
1915 case 0x6001: /* mov.w @Rm,Rn */
1918 case 0x6002: /* mov.l @Rm,Rn */
1926 if (ld_adr
== ld_dst
) {
1929 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1933 * Expect an optional register move.
1936 switch (ctx
->opcode
& 0xf00f) {
1937 case 0x6003: /* mov Rm,Rn */
1938 /* Here we want to recognize ld_dst being saved for later consumtion,
1939 or for another input register being copied so that ld_dst need not
1940 be clobbered during the operation. */
1943 if (op_dst
== ld_dst
) {
1944 /* Overwriting the load output. */
1947 if (mv_src
!= ld_dst
) {
1948 /* Copying a new input; constrain op_src to match the load. */
1954 /* Put back and re-examine as operation. */
1959 * Expect the operation.
1962 switch (ctx
->opcode
& 0xf00f) {
1963 case 0x300c: /* add Rm,Rn */
1964 op_opc
= INDEX_op_add_i32
;
1966 case 0x2009: /* and Rm,Rn */
1967 op_opc
= INDEX_op_and_i32
;
1969 case 0x200a: /* xor Rm,Rn */
1970 op_opc
= INDEX_op_xor_i32
;
1972 case 0x200b: /* or Rm,Rn */
1973 op_opc
= INDEX_op_or_i32
;
1975 /* The operation register should be as expected, and the
1976 other input cannot depend on the load. */
1977 if (op_dst
!= B11_8
) {
1981 /* Unconstrainted input. */
1983 } else if (op_src
== B7_4
) {
1984 /* Constrained input matched load. All operations are
1985 commutative; "swap" them by "moving" the load output
1986 to the (implicit) first argument and the move source
1987 to the (explicit) second argument. */
1992 op_arg
= REG(op_src
);
1995 case 0x6007: /* not Rm,Rn */
1996 if (ld_dst
!= B7_4
|| mv_src
>= 0) {
2000 op_opc
= INDEX_op_xor_i32
;
2001 op_arg
= tcg_const_i32(-1);
2004 case 0x7000 ... 0x700f: /* add #imm,Rn */
2005 if (op_dst
!= B11_8
|| mv_src
>= 0) {
2008 op_opc
= INDEX_op_add_i32
;
2009 op_arg
= tcg_const_i32(B7_0s
);
2012 case 0x3000: /* cmp/eq Rm,Rn */
2013 /* Looking for the middle of a compare-and-swap sequence,
2014 beginning with the compare. Operands can be either order,
2015 but with only one overlapping the load. */
2016 if ((ld_dst
== B11_8
) + (ld_dst
== B7_4
) != 1 || mv_src
>= 0) {
2019 op_opc
= INDEX_op_setcond_i32
; /* placeholder */
2020 op_src
= (ld_dst
== B11_8
? B7_4
: B11_8
);
2021 op_arg
= REG(op_src
);
2024 switch (ctx
->opcode
& 0xff00) {
2025 case 0x8b00: /* bf label */
2026 case 0x8f00: /* bf/s label */
2027 if (pc
+ (i
+ 1 + B7_0s
) * 2 != pc_end
) {
2030 if ((ctx
->opcode
& 0xff00) == 0x8b00) { /* bf label */
2033 /* We're looking to unconditionally modify Rn with the
2034 result of the comparison, within the delay slot of
2035 the branch. This is used by older gcc. */
2037 if ((ctx
->opcode
& 0xf0ff) == 0x0029) { /* movt Rn */
2049 case 0x2008: /* tst Rm,Rn */
2050 /* Looking for a compare-and-swap against zero. */
2051 if (ld_dst
!= B11_8
|| ld_dst
!= B7_4
|| mv_src
>= 0) {
2054 op_opc
= INDEX_op_setcond_i32
;
2055 op_arg
= tcg_const_i32(0);
2058 if ((ctx
->opcode
& 0xff00) != 0x8900 /* bt label */
2059 || pc
+ (i
+ 1 + B7_0s
) * 2 != pc_end
) {
2065 /* Put back and re-examine as store. */
2072 /* The store must be the last insn. */
2073 if (i
!= max_insns
- 1) {
2077 switch (ctx
->opcode
& 0xf00f) {
2078 case 0x2000: /* mov.b Rm,@Rn */
2081 case 0x2001: /* mov.w Rm,@Rn */
2084 case 0x2002: /* mov.l Rm,@Rn */
2090 /* The store must match the load. */
2091 if (ld_adr
!= B11_8
|| st_mop
!= (ld_mop
& MO_SIZE
)) {
2099 * Emit the operation.
2101 tcg_gen_insn_start(pc
, ctx
->envflags
);
2104 /* No operation found. Look for exchange pattern. */
2105 if (st_src
== ld_dst
|| mv_src
>= 0) {
2108 tcg_gen_atomic_xchg_i32(REG(ld_dst
), REG(ld_adr
), REG(st_src
),
2109 ctx
->memidx
, ld_mop
);
2112 case INDEX_op_add_i32
:
2113 if (op_dst
!= st_src
) {
2116 if (op_dst
== ld_dst
&& st_mop
== MO_UL
) {
2117 tcg_gen_atomic_add_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2118 op_arg
, ctx
->memidx
, ld_mop
);
2120 tcg_gen_atomic_fetch_add_i32(REG(ld_dst
), REG(ld_adr
),
2121 op_arg
, ctx
->memidx
, ld_mop
);
2122 if (op_dst
!= ld_dst
) {
2123 /* Note that mop sizes < 4 cannot use add_fetch
2124 because it won't carry into the higher bits. */
2125 tcg_gen_add_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2130 case INDEX_op_and_i32
:
2131 if (op_dst
!= st_src
) {
2134 if (op_dst
== ld_dst
) {
2135 tcg_gen_atomic_and_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2136 op_arg
, ctx
->memidx
, ld_mop
);
2138 tcg_gen_atomic_fetch_and_i32(REG(ld_dst
), REG(ld_adr
),
2139 op_arg
, ctx
->memidx
, ld_mop
);
2140 tcg_gen_and_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2144 case INDEX_op_or_i32
:
2145 if (op_dst
!= st_src
) {
2148 if (op_dst
== ld_dst
) {
2149 tcg_gen_atomic_or_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2150 op_arg
, ctx
->memidx
, ld_mop
);
2152 tcg_gen_atomic_fetch_or_i32(REG(ld_dst
), REG(ld_adr
),
2153 op_arg
, ctx
->memidx
, ld_mop
);
2154 tcg_gen_or_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2158 case INDEX_op_xor_i32
:
2159 if (op_dst
!= st_src
) {
2162 if (op_dst
== ld_dst
) {
2163 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst
), REG(ld_adr
),
2164 op_arg
, ctx
->memidx
, ld_mop
);
2166 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst
), REG(ld_adr
),
2167 op_arg
, ctx
->memidx
, ld_mop
);
2168 tcg_gen_xor_i32(REG(op_dst
), REG(ld_dst
), op_arg
);
2172 case INDEX_op_setcond_i32
:
2173 if (st_src
== ld_dst
) {
2176 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst
), REG(ld_adr
), op_arg
,
2177 REG(st_src
), ctx
->memidx
, ld_mop
);
2178 tcg_gen_setcond_i32(TCG_COND_EQ
, cpu_sr_t
, REG(ld_dst
), op_arg
);
2180 tcg_gen_mov_i32(REG(mt_dst
), cpu_sr_t
);
2185 g_assert_not_reached();
2188 /* If op_src is not a valid register, then op_arg was a constant. */
2190 tcg_temp_free_i32(op_arg
);
2193 /* The entire region has been translated. */
2194 ctx
->envflags
&= ~GUSA_MASK
;
2199 qemu_log_mask(LOG_UNIMP
, "Unrecognized gUSA sequence %08x-%08x\n",
2202 /* Restart with the EXCLUSIVE bit set, within a TB run via
2203 cpu_exec_step_atomic holding the exclusive lock. */
2204 tcg_gen_insn_start(pc
, ctx
->envflags
);
2205 ctx
->envflags
|= GUSA_EXCLUSIVE
;
2206 gen_save_cpu_state(ctx
, false);
2207 gen_helper_exclusive(cpu_env
);
2208 ctx
->bstate
= BS_EXCP
;
2210 /* We're not executing an instruction, but we must report one for the
2211 purposes of accounting within the TB. We might as well report the
2212 entire region consumed via ctx->pc so that it's immediately available
2213 in the disassembly dump. */
2219 void gen_intermediate_code(CPUSH4State
* env
, struct TranslationBlock
*tb
)
2221 SuperHCPU
*cpu
= sh_env_get_cpu(env
);
2222 CPUState
*cs
= CPU(cpu
);
2224 target_ulong pc_start
;
2230 ctx
.tbflags
= (uint32_t)tb
->flags
;
2231 ctx
.envflags
= tb
->flags
& TB_FLAG_ENVFLAGS_MASK
;
2232 ctx
.bstate
= BS_NONE
;
2233 ctx
.memidx
= (ctx
.tbflags
& (1u << SR_MD
)) == 0 ? 1 : 0;
2234 /* We don't know if the delayed pc came from a dynamic or static branch,
2235 so assume it is a dynamic branch. */
2236 ctx
.delayed_pc
= -1; /* use delayed pc from env pointer */
2238 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2239 ctx
.features
= env
->features
;
2240 ctx
.has_movcal
= (ctx
.tbflags
& TB_FLAG_PENDING_MOVCA
);
2241 ctx
.gbank
= ((ctx
.tbflags
& (1 << SR_MD
)) &&
2242 (ctx
.tbflags
& (1 << SR_RB
))) * 0x10;
2243 ctx
.fbank
= ctx
.tbflags
& FPSCR_FR
? 0x10 : 0;
2245 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2246 if (max_insns
== 0) {
2247 max_insns
= CF_COUNT_MASK
;
2249 max_insns
= MIN(max_insns
, TCG_MAX_INSNS
);
2251 /* Since the ISA is fixed-width, we can bound by the number
2252 of instructions remaining on the page. */
2253 num_insns
= -(ctx
.pc
| TARGET_PAGE_MASK
) / 2;
2254 max_insns
= MIN(max_insns
, num_insns
);
2256 /* Single stepping means just that. */
2257 if (ctx
.singlestep_enabled
|| singlestep
) {
2264 #ifdef CONFIG_USER_ONLY
2265 if (ctx
.tbflags
& GUSA_MASK
) {
2266 num_insns
= decode_gusa(&ctx
, env
, &max_insns
);
2270 while (ctx
.bstate
== BS_NONE
2271 && num_insns
< max_insns
2272 && !tcg_op_buf_full()) {
2273 tcg_gen_insn_start(ctx
.pc
, ctx
.envflags
);
2276 if (unlikely(cpu_breakpoint_test(cs
, ctx
.pc
, BP_ANY
))) {
2277 /* We have hit a breakpoint - make sure PC is up-to-date */
2278 gen_save_cpu_state(&ctx
, true);
2279 gen_helper_debug(cpu_env
);
2280 ctx
.bstate
= BS_EXCP
;
2281 /* The address covered by the breakpoint must be included in
2282 [tb->pc, tb->pc + tb->size) in order to for it to be
2283 properly cleared -- thus we increment the PC here so that
2284 the logic setting tb->size below does the right thing. */
2289 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2293 ctx
.opcode
= cpu_lduw_code(env
, ctx
.pc
);
2297 if (tb
->cflags
& CF_LAST_IO
) {
2301 if (ctx
.tbflags
& GUSA_EXCLUSIVE
) {
2302 /* Ending the region of exclusivity. Clear the bits. */
2303 ctx
.envflags
&= ~GUSA_MASK
;
2306 if (cs
->singlestep_enabled
) {
2307 gen_save_cpu_state(&ctx
, true);
2308 gen_helper_debug(cpu_env
);
2310 switch (ctx
.bstate
) {
2312 gen_save_cpu_state(&ctx
, true);
2316 gen_save_cpu_state(&ctx
, false);
2317 gen_goto_tb(&ctx
, 0, ctx
.pc
);
2327 gen_tb_end(tb
, num_insns
);
2329 tb
->size
= ctx
.pc
- pc_start
;
2330 tb
->icount
= num_insns
;
2333 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
2334 && qemu_log_in_addr_range(pc_start
)) {
2336 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2337 log_target_disas(cs
, pc_start
, ctx
.pc
- pc_start
, 0);
2344 void restore_state_to_opc(CPUSH4State
*env
, TranslationBlock
*tb
,
2348 env
->flags
= data
[1];
2349 /* Theoretically delayed_pc should also be restored. In practice the
2350 branch instruction is re-executed after exception, so the delayed
2351 branch target will be recomputed. */