]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
target: Use vaddr in gen_intermediate_code
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
6faf2b6c 9 * version 2.1 of the License, or (at your option) any later version.
fdf9b3e8
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8 19
9d4c9946 20#include "qemu/osdep.h"
fdf9b3e8 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
63c91552 23#include "exec/exec-all.h"
dcb32f1d 24#include "tcg/tcg-op.h"
2ef6175a
RH
25#include "exec/helper-proto.h"
26#include "exec/helper-gen.h"
4834871b 27#include "exec/translator.h"
508127e2 28#include "exec/log.h"
90c84c56 29#include "qemu/qemu-print.h"
a7e30d84 30
d53106c9
RH
31#define HELPER_H "helper.h"
32#include "exec/helper-info.c.inc"
33#undef HELPER_H
34
a7e30d84 35
fdf9b3e8 36typedef struct DisasContext {
6f1c2af6
RH
37 DisasContextBase base;
38
39 uint32_t tbflags; /* should stay unmodified during the TB translation */
40 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
fdf9b3e8 41 int memidx;
3a3bb8d2 42 int gbank;
5c13bad9 43 int fbank;
fdf9b3e8 44 uint32_t delayed_pc;
71968fa6 45 uint32_t features;
6f1c2af6
RH
46
47 uint16_t opcode;
48
49 bool has_movcal;
fdf9b3e8
FB
50} DisasContext;
51
fe25591e
AJ
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(ctx) 1
4da06fb3 54#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
fe25591e 55#else
a6215749 56#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
4da06fb3 57#define UNALIGN(C) 0
fe25591e
AJ
58#endif
59
6f1c2af6 60/* Target-specific values for ctx->base.is_jmp. */
4834871b
RH
61/* We want to exit back to the cpu loop for some reason.
62 Usually this is to recognize interrupts immediately. */
63#define DISAS_STOP DISAS_TARGET_0
823029f9 64
1e8864f7 65/* global register indexes */
3a3bb8d2 66static TCGv cpu_gregs[32];
1d565b21
AJ
67static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 69static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
f85da308
RH
70static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
71static TCGv cpu_lock_addr, cpu_lock_value;
66ba317c 72static TCGv cpu_fregs[32];
1000822b
AJ
73
74/* internal register indexes */
47b9f4d5 75static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 76
aa7408ec 77void sh4_translate_init(void)
2e70f6ef 78{
1e8864f7 79 int i;
559dd74d 80 static const char * const gregnames[24] = {
1e8864f7
AJ
81 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 };
66ba317c
AJ
87 static const char * const fregnames[32] = {
88 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
89 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
90 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
93 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
94 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96 };
1e8864f7 97
3a3bb8d2 98 for (i = 0; i < 24; i++) {
ad75a51e 99 cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
73e5716c 100 offsetof(CPUSH4State, gregs[i]),
66ba317c 101 gregnames[i]);
3a3bb8d2
RH
102 }
103 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 104
ad75a51e 105 cpu_pc = tcg_global_mem_new_i32(tcg_env,
73e5716c 106 offsetof(CPUSH4State, pc), "PC");
ad75a51e 107 cpu_sr = tcg_global_mem_new_i32(tcg_env,
73e5716c 108 offsetof(CPUSH4State, sr), "SR");
ad75a51e 109 cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
e1ccc054 110 offsetof(CPUSH4State, sr_m), "SR_M");
ad75a51e 111 cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
e1ccc054 112 offsetof(CPUSH4State, sr_q), "SR_Q");
ad75a51e 113 cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
e1ccc054 114 offsetof(CPUSH4State, sr_t), "SR_T");
ad75a51e 115 cpu_ssr = tcg_global_mem_new_i32(tcg_env,
73e5716c 116 offsetof(CPUSH4State, ssr), "SSR");
ad75a51e 117 cpu_spc = tcg_global_mem_new_i32(tcg_env,
73e5716c 118 offsetof(CPUSH4State, spc), "SPC");
ad75a51e 119 cpu_gbr = tcg_global_mem_new_i32(tcg_env,
73e5716c 120 offsetof(CPUSH4State, gbr), "GBR");
ad75a51e 121 cpu_vbr = tcg_global_mem_new_i32(tcg_env,
73e5716c 122 offsetof(CPUSH4State, vbr), "VBR");
ad75a51e 123 cpu_sgr = tcg_global_mem_new_i32(tcg_env,
73e5716c 124 offsetof(CPUSH4State, sgr), "SGR");
ad75a51e 125 cpu_dbr = tcg_global_mem_new_i32(tcg_env,
73e5716c 126 offsetof(CPUSH4State, dbr), "DBR");
ad75a51e 127 cpu_mach = tcg_global_mem_new_i32(tcg_env,
73e5716c 128 offsetof(CPUSH4State, mach), "MACH");
ad75a51e 129 cpu_macl = tcg_global_mem_new_i32(tcg_env,
73e5716c 130 offsetof(CPUSH4State, macl), "MACL");
ad75a51e 131 cpu_pr = tcg_global_mem_new_i32(tcg_env,
73e5716c 132 offsetof(CPUSH4State, pr), "PR");
ad75a51e 133 cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
73e5716c 134 offsetof(CPUSH4State, fpscr), "FPSCR");
ad75a51e 135 cpu_fpul = tcg_global_mem_new_i32(tcg_env,
73e5716c 136 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 137
ad75a51e 138 cpu_flags = tcg_global_mem_new_i32(tcg_env,
55339361 139 offsetof(CPUSH4State, flags), "_flags_");
ad75a51e 140 cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
55339361
YP
141 offsetof(CPUSH4State, delayed_pc),
142 "_delayed_pc_");
ad75a51e 143 cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
47b9f4d5
AJ
144 offsetof(CPUSH4State,
145 delayed_cond),
146 "_delayed_cond_");
ad75a51e 147 cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
f85da308
RH
148 offsetof(CPUSH4State, lock_addr),
149 "_lock_addr_");
ad75a51e 150 cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
f85da308
RH
151 offsetof(CPUSH4State, lock_value),
152 "_lock_value_");
1000822b 153
66ba317c 154 for (i = 0; i < 32; i++)
ad75a51e 155 cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
73e5716c 156 offsetof(CPUSH4State, fregs[i]),
66ba317c 157 fregnames[i]);
2e70f6ef
PB
158}
159
90c84c56 160void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
fdf9b3e8 161{
878096ee
AF
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
fdf9b3e8 164 int i;
90c84c56
MA
165
166 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169 env->spc, env->ssr, env->gbr, env->vbr);
170 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8 172 for (i = 0; i < 24; i += 4) {
ad4052f1
IL
173 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
174 i, env->gregs[i], i + 1, env->gregs[i + 1],
175 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
fdf9b3e8 176 }
ab419fd8 177 if (env->flags & TB_FLAG_DELAY_SLOT) {
ad4052f1
IL
178 qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
179 env->delayed_pc);
ab419fd8 180 } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
ad4052f1
IL
181 qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
182 env->delayed_pc);
ab419fd8 183 } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
90c84c56
MA
184 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
185 env->delayed_pc);
fdf9b3e8
FB
186 }
187}
188
34086945
AJ
189static void gen_read_sr(TCGv dst)
190{
1d565b21
AJ
191 TCGv t0 = tcg_temp_new();
192 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
193 tcg_gen_or_i32(dst, dst, t0);
194 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
195 tcg_gen_or_i32(dst, dst, t0);
196 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
197 tcg_gen_or_i32(dst, cpu_sr, t0);
34086945
AJ
198}
199
200static void gen_write_sr(TCGv src)
201{
1d565b21
AJ
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
207}
208
ac9707ea
AJ
209static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
210{
211 if (save_pc) {
6f1c2af6 212 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
ac9707ea
AJ
213 }
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
216 }
e1933d14 217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
219 }
220}
221
ec2eb22e
RH
222static inline bool use_exit_tb(DisasContext *ctx)
223{
ab419fd8 224 return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
ec2eb22e
RH
225}
226
3f1e2098 227static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 228{
3f1e2098 229 if (use_exit_tb(ctx)) {
4bfa602b
RH
230 return false;
231 }
3f1e2098 232 return translator_use_goto_tb(&ctx->base, dest);
90aa39a1 233}
fdf9b3e8 234
90aa39a1
SF
235static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
236{
237 if (use_goto_tb(ctx, dest)) {
57fec1fe 238 tcg_gen_goto_tb(n);
3a8a44c4 239 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 240 tcg_gen_exit_tb(ctx->base.tb, n);
fdf9b3e8 241 } else {
3a8a44c4 242 tcg_gen_movi_i32(cpu_pc, dest);
52df5adc 243 if (use_exit_tb(ctx)) {
07ea28b4 244 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 245 } else {
7f11636d 246 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 247 }
fdf9b3e8 248 }
6f1c2af6 249 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
250}
251
fdf9b3e8
FB
252static void gen_jump(DisasContext * ctx)
253{
ec2eb22e 254 if (ctx->delayed_pc == -1) {
55339361
YP
255 /* Target is not statically known, it comes necessarily from a
256 delayed jump as immediate jump are conditinal jumps */
257 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 258 tcg_gen_discard_i32(cpu_delayed_pc);
52df5adc 259 if (use_exit_tb(ctx)) {
07ea28b4 260 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 261 } else {
7f11636d 262 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 263 }
6f1c2af6 264 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8 265 } else {
55339361 266 gen_goto_tb(ctx, 0, ctx->delayed_pc);
fdf9b3e8
FB
267 }
268}
269
270/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
271static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
272 bool jump_if_true)
fdf9b3e8 273{
34086945 274 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
275 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
276
ab419fd8 277 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
278 /* When in an exclusive region, we must continue to the end.
279 Therefore, exit the region on a taken branch, but otherwise
280 fall through to the next instruction. */
281 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
ab419fd8 282 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
283 /* Note that this won't actually use a goto_tb opcode because we
284 disallow it in use_goto_tb, but it handles exit + singlestep. */
285 gen_goto_tb(ctx, 0, dest);
286 gen_set_label(l1);
5b38d026 287 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
288 return;
289 }
290
ac9707ea 291 gen_save_cpu_state(ctx, false);
4bfa602b
RH
292 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
293 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 294 gen_set_label(l1);
6f1c2af6
RH
295 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
296 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
297}
298
299/* Delayed conditional jump (bt or bf) */
300static void gen_delayed_conditional_jump(DisasContext * ctx)
301{
4bfa602b
RH
302 TCGLabel *l1 = gen_new_label();
303 TCGv ds = tcg_temp_new();
fdf9b3e8 304
47b9f4d5
AJ
305 tcg_gen_mov_i32(ds, cpu_delayed_cond);
306 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b 307
ab419fd8 308 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
309 /* When in an exclusive region, we must continue to the end.
310 Therefore, exit the region on a taken branch, but otherwise
311 fall through to the next instruction. */
312 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
313
314 /* Leave the gUSA region. */
ab419fd8 315 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
316 gen_jump(ctx);
317
318 gen_set_label(l1);
6f1c2af6 319 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
320 return;
321 }
322
6f396c8f 323 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
6f1c2af6 324 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
fdf9b3e8 325 gen_set_label(l1);
9c2a9ea1 326 gen_jump(ctx);
fdf9b3e8
FB
327}
328
e5d8053e 329static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 330{
1e0b21d8
RH
331 /* We have already signaled illegal instruction for odd Dr. */
332 tcg_debug_assert((reg & 1) == 0);
333 reg ^= ctx->fbank;
66ba317c 334 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
335}
336
e5d8053e 337static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 338{
1e0b21d8
RH
339 /* We have already signaled illegal instruction for odd Dr. */
340 tcg_debug_assert((reg & 1) == 0);
341 reg ^= ctx->fbank;
58d2a9ae 342 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
343}
344
fdf9b3e8
FB
345#define B3_0 (ctx->opcode & 0xf)
346#define B6_4 ((ctx->opcode >> 4) & 0x7)
347#define B7_4 ((ctx->opcode >> 4) & 0xf)
348#define B7_0 (ctx->opcode & 0xff)
349#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
350#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
351 (ctx->opcode & 0xfff))
352#define B11_8 ((ctx->opcode >> 8) & 0xf)
353#define B15_12 ((ctx->opcode >> 12) & 0xf)
354
3a3bb8d2
RH
355#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
356#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 357#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 358
f09111e0 359#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 360
fdf9b3e8 361#define CHECK_NOT_DELAY_SLOT \
ab419fd8
RH
362 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
363 goto do_illegal_slot; \
a6215749
AJ
364 }
365
6b98213d
RH
366#define CHECK_PRIVILEGED \
367 if (IS_USER(ctx)) { \
368 goto do_illegal; \
a6215749
AJ
369 }
370
dec4f042
RH
371#define CHECK_FPU_ENABLED \
372 if (ctx->tbflags & (1u << SR_FD)) { \
373 goto do_fpu_disabled; \
a6215749 374 }
d8299bcc 375
7e9f7ca8
RH
376#define CHECK_FPSCR_PR_0 \
377 if (ctx->tbflags & FPSCR_PR) { \
378 goto do_illegal; \
379 }
380
381#define CHECK_FPSCR_PR_1 \
382 if (!(ctx->tbflags & FPSCR_PR)) { \
383 goto do_illegal; \
384 }
385
ccae24d4
RH
386#define CHECK_SH4A \
387 if (!(ctx->features & SH_FEATURE_SH4A)) { \
388 goto do_illegal; \
389 }
390
b1d8e52e 391static void _decode_opc(DisasContext * ctx)
fdf9b3e8 392{
852d481f
EI
393 /* This code tries to make movcal emulation sufficiently
394 accurate for Linux purposes. This instruction writes
395 memory, and prior to that, always allocates a cache line.
396 It is used in two contexts:
397 - in memcpy, where data is copied in blocks, the first write
398 of to a block uses movca.l for performance.
399 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
400 to flush the cache. Here, the data written by movcal.l is never
401 written to memory, and the data written is just bogus.
402
403 To simulate this, we simulate movcal.l, we store the value to memory,
404 but we also remember the previous content. If we see ocbi, we check
405 if movcal.l for that address was done previously. If so, the write should
406 not have hit the memory, so we restore the previous content.
407 When we see an instruction that is neither movca.l
408 nor ocbi, the previous content is discarded.
409
410 To optimize, we only try to flush stores when we're at the start of
411 TB, or if we already saw movca.l in this TB and did not flush stores
412 yet. */
413 if (ctx->has_movcal)
55339361
YP
414 {
415 int opcode = ctx->opcode & 0xf0ff;
416 if (opcode != 0x0093 /* ocbi */
417 && opcode != 0x00c3 /* movca.l */)
418 {
419 gen_helper_discard_movcal_backup(tcg_env);
420 ctx->has_movcal = 0;
421 }
422 }
852d481f 423
fdf9b3e8
FB
424#if 0
425 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
426#endif
f6198371 427
fdf9b3e8 428 switch (ctx->opcode) {
55339361 429 case 0x0019: /* div0u */
1d565b21
AJ
430 tcg_gen_movi_i32(cpu_sr_m, 0);
431 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 432 tcg_gen_movi_i32(cpu_sr_t, 0);
55339361
YP
433 return;
434 case 0x000b: /* rts */
435 CHECK_NOT_DELAY_SLOT
436 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
ab419fd8 437 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361
YP
438 ctx->delayed_pc = (uint32_t) - 1;
439 return;
440 case 0x0028: /* clrmac */
441 tcg_gen_movi_i32(cpu_mach, 0);
442 tcg_gen_movi_i32(cpu_macl, 0);
443 return;
444 case 0x0048: /* clrs */
5ed9a259 445 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
55339361
YP
446 return;
447 case 0x0008: /* clrt */
34086945 448 tcg_gen_movi_i32(cpu_sr_t, 0);
55339361
YP
449 return;
450 case 0x0038: /* ldtlb */
451 CHECK_PRIVILEGED
ad75a51e 452 gen_helper_ldtlb(tcg_env);
55339361
YP
453 return;
454 case 0x002b: /* rte */
455 CHECK_PRIVILEGED
456 CHECK_NOT_DELAY_SLOT
34086945 457 gen_write_sr(cpu_ssr);
55339361 458 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
ab419fd8 459 ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
55339361 460 ctx->delayed_pc = (uint32_t) - 1;
6f1c2af6 461 ctx->base.is_jmp = DISAS_STOP;
55339361
YP
462 return;
463 case 0x0058: /* sets */
5ed9a259 464 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
55339361
YP
465 return;
466 case 0x0018: /* sett */
34086945 467 tcg_gen_movi_i32(cpu_sr_t, 1);
55339361
YP
468 return;
469 case 0xfbfd: /* frchg */
61dedf2a 470 CHECK_FPSCR_PR_0
55339361 471 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
6f1c2af6 472 ctx->base.is_jmp = DISAS_STOP;
55339361
YP
473 return;
474 case 0xf3fd: /* fschg */
61dedf2a 475 CHECK_FPSCR_PR_0
7a64244f 476 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
6f1c2af6 477 ctx->base.is_jmp = DISAS_STOP;
55339361
YP
478 return;
479 case 0xf7fd: /* fpchg */
907759f9
RH
480 CHECK_SH4A
481 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
6f1c2af6 482 ctx->base.is_jmp = DISAS_STOP;
907759f9 483 return;
55339361
YP
484 case 0x0009: /* nop */
485 return;
486 case 0x001b: /* sleep */
487 CHECK_PRIVILEGED
6f1c2af6 488 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
ad75a51e 489 gen_helper_sleep(tcg_env);
55339361 490 return;
fdf9b3e8
FB
491 }
492
493 switch (ctx->opcode & 0xf000) {
55339361
YP
494 case 0x1000: /* mov.l Rm,@(disp,Rn) */
495 {
496 TCGv addr = tcg_temp_new();
497 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
4da06fb3
RH
498 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
499 MO_TEUL | UNALIGN(ctx));
55339361
YP
500 }
501 return;
502 case 0x5000: /* mov.l @(disp,Rm),Rn */
503 {
504 TCGv addr = tcg_temp_new();
505 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
4da06fb3
RH
506 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
507 MO_TESL | UNALIGN(ctx));
55339361
YP
508 }
509 return;
510 case 0xe000: /* mov #imm,Rn */
4bfa602b 511#ifdef CONFIG_USER_ONLY
ab419fd8
RH
512 /*
513 * Detect the start of a gUSA region (mov #-n, r15).
514 * If so, update envflags and end the TB. This will allow us
515 * to see the end of the region (stored in R0) in the next TB.
516 */
6f1c2af6
RH
517 if (B11_8 == 15 && B7_0s < 0 &&
518 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
ab419fd8
RH
519 ctx->envflags =
520 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
6f1c2af6 521 ctx->base.is_jmp = DISAS_STOP;
4bfa602b
RH
522 }
523#endif
55339361
YP
524 tcg_gen_movi_i32(REG(B11_8), B7_0s);
525 return;
526 case 0x9000: /* mov.w @(disp,PC),Rn */
527 {
950b91be 528 TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
03a0d87e
RH
529 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
530 MO_TESW | MO_ALIGN);
55339361
YP
531 }
532 return;
533 case 0xd000: /* mov.l @(disp,PC),Rn */
534 {
950b91be 535 TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
03a0d87e
RH
536 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
537 MO_TESL | MO_ALIGN);
55339361
YP
538 }
539 return;
540 case 0x7000: /* add #imm,Rn */
541 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
542 return;
543 case 0xa000: /* bra disp */
544 CHECK_NOT_DELAY_SLOT
6f1c2af6 545 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 546 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361
YP
547 return;
548 case 0xb000: /* bsr disp */
549 CHECK_NOT_DELAY_SLOT
6f1c2af6
RH
550 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
551 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 552 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361 553 return;
fdf9b3e8
FB
554 }
555
556 switch (ctx->opcode & 0xf00f) {
55339361
YP
557 case 0x6003: /* mov Rm,Rn */
558 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
559 return;
560 case 0x2000: /* mov.b Rm,@Rn */
3376f415 561 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
55339361
YP
562 return;
563 case 0x2001: /* mov.w Rm,@Rn */
4da06fb3
RH
564 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
565 MO_TEUW | UNALIGN(ctx));
55339361
YP
566 return;
567 case 0x2002: /* mov.l Rm,@Rn */
4da06fb3
RH
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
569 MO_TEUL | UNALIGN(ctx));
55339361
YP
570 return;
571 case 0x6000: /* mov.b @Rm,Rn */
3376f415 572 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
55339361
YP
573 return;
574 case 0x6001: /* mov.w @Rm,Rn */
4da06fb3
RH
575 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
576 MO_TESW | UNALIGN(ctx));
55339361
YP
577 return;
578 case 0x6002: /* mov.l @Rm,Rn */
4da06fb3
RH
579 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
580 MO_TESL | UNALIGN(ctx));
55339361
YP
581 return;
582 case 0x2004: /* mov.b Rm,@-Rn */
583 {
584 TCGv addr = tcg_temp_new();
585 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
586 /* might cause re-execution */
587 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
55339361
YP
588 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
589 }
590 return;
591 case 0x2005: /* mov.w Rm,@-Rn */
592 {
593 TCGv addr = tcg_temp_new();
594 tcg_gen_subi_i32(addr, REG(B11_8), 2);
4da06fb3
RH
595 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
596 MO_TEUW | UNALIGN(ctx));
55339361
YP
597 tcg_gen_mov_i32(REG(B11_8), addr);
598 }
599 return;
600 case 0x2006: /* mov.l Rm,@-Rn */
601 {
602 TCGv addr = tcg_temp_new();
603 tcg_gen_subi_i32(addr, REG(B11_8), 4);
4da06fb3
RH
604 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
605 MO_TEUL | UNALIGN(ctx));
55339361
YP
606 tcg_gen_mov_i32(REG(B11_8), addr);
607 }
608 return;
609 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 610 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
55339361
YP
611 if ( B11_8 != B7_4 )
612 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
613 return;
614 case 0x6005: /* mov.w @Rm+,Rn */
4da06fb3
RH
615 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
616 MO_TESW | UNALIGN(ctx));
55339361
YP
617 if ( B11_8 != B7_4 )
618 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
619 return;
620 case 0x6006: /* mov.l @Rm+,Rn */
4da06fb3
RH
621 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
622 MO_TESL | UNALIGN(ctx));
55339361
YP
623 if ( B11_8 != B7_4 )
624 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
625 return;
626 case 0x0004: /* mov.b Rm,@(R0,Rn) */
627 {
628 TCGv addr = tcg_temp_new();
629 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 630 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
55339361
YP
631 }
632 return;
633 case 0x0005: /* mov.w Rm,@(R0,Rn) */
634 {
635 TCGv addr = tcg_temp_new();
636 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
637 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
638 MO_TEUW | UNALIGN(ctx));
55339361
YP
639 }
640 return;
641 case 0x0006: /* mov.l Rm,@(R0,Rn) */
642 {
643 TCGv addr = tcg_temp_new();
644 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
645 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
646 MO_TEUL | UNALIGN(ctx));
55339361
YP
647 }
648 return;
649 case 0x000c: /* mov.b @(R0,Rm),Rn */
650 {
651 TCGv addr = tcg_temp_new();
652 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 653 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
55339361
YP
654 }
655 return;
656 case 0x000d: /* mov.w @(R0,Rm),Rn */
657 {
658 TCGv addr = tcg_temp_new();
659 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
660 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
661 MO_TESW | UNALIGN(ctx));
55339361
YP
662 }
663 return;
664 case 0x000e: /* mov.l @(R0,Rm),Rn */
665 {
666 TCGv addr = tcg_temp_new();
667 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
668 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
669 MO_TESL | UNALIGN(ctx));
55339361
YP
670 }
671 return;
672 case 0x6008: /* swap.b Rm,Rn */
673 {
3c254ab8 674 TCGv low = tcg_temp_new();
b983a0e1 675 tcg_gen_bswap16_i32(low, REG(B7_4), 0);
218fd730 676 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
55339361
YP
677 }
678 return;
679 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 680 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
55339361
YP
681 return;
682 case 0x200d: /* xtrct Rm,Rn */
683 {
684 TCGv high, low;
685 high = tcg_temp_new();
686 tcg_gen_shli_i32(high, REG(B7_4), 16);
687 low = tcg_temp_new();
688 tcg_gen_shri_i32(low, REG(B11_8), 16);
689 tcg_gen_or_i32(REG(B11_8), high, low);
690 }
691 return;
692 case 0x300c: /* add Rm,Rn */
693 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
694 return;
695 case 0x300e: /* addc Rm,Rn */
22b88fd7 696 {
34086945 697 TCGv t0, t1;
950b91be 698 t0 = tcg_constant_tl(0);
22b88fd7 699 t1 = tcg_temp_new();
a2368e01
AJ
700 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
701 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
702 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 703 }
55339361
YP
704 return;
705 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
706 {
707 TCGv t0, t1, t2;
708 t0 = tcg_temp_new();
709 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
710 t1 = tcg_temp_new();
711 tcg_gen_xor_i32(t1, t0, REG(B11_8));
712 t2 = tcg_temp_new();
713 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 714 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
34086945 715 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1 716 tcg_gen_mov_i32(REG(B7_4), t0);
ad8d25a1 717 }
55339361
YP
718 return;
719 case 0x2009: /* and Rm,Rn */
720 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
721 return;
722 case 0x3000: /* cmp/eq Rm,Rn */
34086945 723 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
55339361
YP
724 return;
725 case 0x3003: /* cmp/ge Rm,Rn */
34086945 726 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
55339361
YP
727 return;
728 case 0x3007: /* cmp/gt Rm,Rn */
34086945 729 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
55339361
YP
730 return;
731 case 0x3006: /* cmp/hi Rm,Rn */
34086945 732 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
55339361
YP
733 return;
734 case 0x3002: /* cmp/hs Rm,Rn */
34086945 735 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
55339361
YP
736 return;
737 case 0x200c: /* cmp/str Rm,Rn */
738 {
739 TCGv cmp1 = tcg_temp_new();
740 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
741 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
742 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
743 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
744 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
745 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
55339361
YP
746 }
747 return;
748 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
749 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
750 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
751 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
55339361
YP
752 return;
753 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
754 {
755 TCGv t0 = tcg_temp_new();
756 TCGv t1 = tcg_temp_new();
757 TCGv t2 = tcg_temp_new();
950b91be 758 TCGv zero = tcg_constant_i32(0);
1d565b21
AJ
759
760 /* shift left arg1, saving the bit being pushed out and inserting
761 T on the right */
762 tcg_gen_shri_i32(t0, REG(B11_8), 31);
763 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
764 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
765
766 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
767 using 64-bit temps, we compute arg0's high part from q ^ m, so
768 that it is 0x00000000 when adding the value or 0xffffffff when
769 subtracting it. */
770 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
771 tcg_gen_subi_i32(t1, t1, 1);
772 tcg_gen_neg_i32(t2, REG(B7_4));
773 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
774 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
775
776 /* compute T and Q depending on carry */
777 tcg_gen_andi_i32(t1, t1, 1);
778 tcg_gen_xor_i32(t1, t1, t0);
779 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
780 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
1d565b21 781 }
55339361
YP
782 return;
783 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 784 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
55339361
YP
785 return;
786 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 787 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
55339361
YP
788 return;
789 case 0x600e: /* exts.b Rm,Rn */
790 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
791 return;
792 case 0x600f: /* exts.w Rm,Rn */
793 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
794 return;
795 case 0x600c: /* extu.b Rm,Rn */
796 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
797 return;
798 case 0x600d: /* extu.w Rm,Rn */
799 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
800 return;
801 case 0x000f: /* mac.l @Rm+,@Rn+ */
802 {
803 TCGv arg0, arg1;
804 arg0 = tcg_temp_new();
03a0d87e
RH
805 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
806 MO_TESL | MO_ALIGN);
55339361 807 arg1 = tcg_temp_new();
03a0d87e
RH
808 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
809 MO_TESL | MO_ALIGN);
ad75a51e 810 gen_helper_macl(tcg_env, arg0, arg1);
55339361
YP
811 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
812 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
813 }
814 return;
815 case 0x400f: /* mac.w @Rm+,@Rn+ */
816 {
817 TCGv arg0, arg1;
818 arg0 = tcg_temp_new();
03a0d87e
RH
819 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
820 MO_TESL | MO_ALIGN);
55339361 821 arg1 = tcg_temp_new();
03a0d87e
RH
822 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
823 MO_TESL | MO_ALIGN);
ad75a51e 824 gen_helper_macw(tcg_env, arg0, arg1);
55339361
YP
825 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
826 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
827 }
828 return;
829 case 0x0007: /* mul.l Rm,Rn */
830 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
831 return;
832 case 0x200f: /* muls.w Rm,Rn */
833 {
834 TCGv arg0, arg1;
835 arg0 = tcg_temp_new();
836 tcg_gen_ext16s_i32(arg0, REG(B7_4));
837 arg1 = tcg_temp_new();
838 tcg_gen_ext16s_i32(arg1, REG(B11_8));
839 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
840 }
841 return;
842 case 0x200e: /* mulu.w Rm,Rn */
843 {
844 TCGv arg0, arg1;
845 arg0 = tcg_temp_new();
846 tcg_gen_ext16u_i32(arg0, REG(B7_4));
847 arg1 = tcg_temp_new();
848 tcg_gen_ext16u_i32(arg1, REG(B11_8));
849 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
850 }
851 return;
852 case 0x600b: /* neg Rm,Rn */
853 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
854 return;
855 case 0x600a: /* negc Rm,Rn */
b2d9eda5 856 {
950b91be 857 TCGv t0 = tcg_constant_i32(0);
60eb27fe
AJ
858 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
859 REG(B7_4), t0, cpu_sr_t, t0);
860 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
861 t0, t0, REG(B11_8), cpu_sr_t);
862 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 863 }
55339361
YP
864 return;
865 case 0x6007: /* not Rm,Rn */
866 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
867 return;
868 case 0x200b: /* or Rm,Rn */
869 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
870 return;
871 case 0x400c: /* shad Rm,Rn */
872 {
be654c83
AJ
873 TCGv t0 = tcg_temp_new();
874 TCGv t1 = tcg_temp_new();
875 TCGv t2 = tcg_temp_new();
876
877 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
878
879 /* positive case: shift to the left */
880 tcg_gen_shl_i32(t1, REG(B11_8), t0);
881
882 /* negative case: shift to the right in two steps to
883 correctly handle the -32 case */
884 tcg_gen_xori_i32(t0, t0, 0x1f);
885 tcg_gen_sar_i32(t2, REG(B11_8), t0);
886 tcg_gen_sari_i32(t2, t2, 1);
887
888 /* select between the two cases */
889 tcg_gen_movi_i32(t0, 0);
890 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
55339361
YP
891 }
892 return;
893 case 0x400d: /* shld Rm,Rn */
894 {
57760161
AJ
895 TCGv t0 = tcg_temp_new();
896 TCGv t1 = tcg_temp_new();
897 TCGv t2 = tcg_temp_new();
898
899 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
900
901 /* positive case: shift to the left */
902 tcg_gen_shl_i32(t1, REG(B11_8), t0);
903
904 /* negative case: shift to the right in two steps to
905 correctly handle the -32 case */
906 tcg_gen_xori_i32(t0, t0, 0x1f);
907 tcg_gen_shr_i32(t2, REG(B11_8), t0);
908 tcg_gen_shri_i32(t2, t2, 1);
909
910 /* select between the two cases */
911 tcg_gen_movi_i32(t0, 0);
912 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
55339361
YP
913 }
914 return;
915 case 0x3008: /* sub Rm,Rn */
916 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
917 return;
918 case 0x300a: /* subc Rm,Rn */
22b88fd7 919 {
d0f44a55 920 TCGv t0, t1;
950b91be 921 t0 = tcg_constant_tl(0);
22b88fd7 922 t1 = tcg_temp_new();
d0f44a55
AJ
923 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
924 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
925 REG(B11_8), t0, t1, cpu_sr_t);
926 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 927 }
55339361
YP
928 return;
929 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
930 {
931 TCGv t0, t1, t2;
932 t0 = tcg_temp_new();
933 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
934 t1 = tcg_temp_new();
935 tcg_gen_xor_i32(t1, t0, REG(B7_4));
936 t2 = tcg_temp_new();
937 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
938 tcg_gen_and_i32(t1, t1, t2);
34086945 939 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1 940 tcg_gen_mov_i32(REG(B11_8), t0);
ad8d25a1 941 }
55339361
YP
942 return;
943 case 0x2008: /* tst Rm,Rn */
944 {
945 TCGv val = tcg_temp_new();
946 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 947 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
55339361
YP
948 }
949 return;
950 case 0x200a: /* xor Rm,Rn */
951 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
952 return;
e67888a7 953 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
55339361 954 CHECK_FPU_ENABLED
a6215749 955 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
956 int xsrc = XHACK(B7_4);
957 int xdst = XHACK(B11_8);
958 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
959 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
55339361 960 } else {
7c9f7038 961 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
55339361
YP
962 }
963 return;
e67888a7 964 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
55339361 965 CHECK_FPU_ENABLED
a6215749 966 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
967 TCGv_i64 fp = tcg_temp_new_i64();
968 gen_load_fpr64(ctx, fp, XHACK(B7_4));
03a0d87e
RH
969 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
970 MO_TEUQ | MO_ALIGN);
55339361 971 } else {
03a0d87e
RH
972 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
973 MO_TEUL | MO_ALIGN);
55339361
YP
974 }
975 return;
e67888a7 976 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
55339361 977 CHECK_FPU_ENABLED
a6215749 978 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 979 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
980 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
981 MO_TEUQ | MO_ALIGN);
4d57fa50 982 gen_store_fpr64(ctx, fp, XHACK(B11_8));
55339361 983 } else {
03a0d87e
RH
984 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
985 MO_TEUL | MO_ALIGN);
55339361
YP
986 }
987 return;
e67888a7 988 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
55339361 989 CHECK_FPU_ENABLED
a6215749 990 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 991 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
992 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
993 MO_TEUQ | MO_ALIGN);
4d57fa50 994 gen_store_fpr64(ctx, fp, XHACK(B11_8));
4d57fa50 995 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
55339361 996 } else {
03a0d87e
RH
997 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
998 MO_TEUL | MO_ALIGN);
55339361
YP
999 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1000 }
1001 return;
e67888a7 1002 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
55339361 1003 CHECK_FPU_ENABLED
4d57fa50
RH
1004 {
1005 TCGv addr = tcg_temp_new_i32();
1006 if (ctx->tbflags & FPSCR_SZ) {
1007 TCGv_i64 fp = tcg_temp_new_i64();
1008 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1009 tcg_gen_subi_i32(addr, REG(B11_8), 8);
03a0d87e
RH
1010 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1011 MO_TEUQ | MO_ALIGN);
4d57fa50
RH
1012 } else {
1013 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e
RH
1014 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1015 MO_TEUL | MO_ALIGN);
4d57fa50
RH
1016 }
1017 tcg_gen_mov_i32(REG(B11_8), addr);
4d57fa50 1018 }
55339361 1019 return;
e67888a7 1020 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
55339361
YP
1021 CHECK_FPU_ENABLED
1022 {
1023 TCGv addr = tcg_temp_new_i32();
1024 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1025 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 1026 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
1027 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1028 MO_TEUQ | MO_ALIGN);
4d57fa50 1029 gen_store_fpr64(ctx, fp, XHACK(B11_8));
55339361 1030 } else {
03a0d87e
RH
1031 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1032 MO_TEUL | MO_ALIGN);
55339361
YP
1033 }
1034 }
1035 return;
e67888a7 1036 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
55339361
YP
1037 CHECK_FPU_ENABLED
1038 {
1039 TCGv addr = tcg_temp_new();
1040 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1041 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1042 TCGv_i64 fp = tcg_temp_new_i64();
1043 gen_load_fpr64(ctx, fp, XHACK(B7_4));
03a0d87e
RH
1044 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1045 MO_TEUQ | MO_ALIGN);
55339361 1046 } else {
03a0d87e
RH
1047 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1048 MO_TEUL | MO_ALIGN);
55339361
YP
1049 }
1050 }
1051 return;
e67888a7
TS
1052 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1056 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1057 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
55339361
YP
1058 {
1059 CHECK_FPU_ENABLED
a6215749 1060 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1061 TCGv_i64 fp0, fp1;
1062
93dc9c89
RH
1063 if (ctx->opcode & 0x0110) {
1064 goto do_illegal;
1065 }
55339361
YP
1066 fp0 = tcg_temp_new_i64();
1067 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1068 gen_load_fpr64(ctx, fp0, B11_8);
1069 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4 1070 switch (ctx->opcode & 0xf00f) {
55339361 1071 case 0xf000: /* fadd Rm,Rn */
ad75a51e 1072 gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
a7812ae4 1073 break;
55339361 1074 case 0xf001: /* fsub Rm,Rn */
ad75a51e 1075 gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
a7812ae4 1076 break;
55339361 1077 case 0xf002: /* fmul Rm,Rn */
ad75a51e 1078 gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
a7812ae4 1079 break;
55339361 1080 case 0xf003: /* fdiv Rm,Rn */
ad75a51e 1081 gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
a7812ae4 1082 break;
55339361 1083 case 0xf004: /* fcmp/eq Rm,Rn */
ad75a51e 1084 gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
a7812ae4 1085 return;
55339361 1086 case 0xf005: /* fcmp/gt Rm,Rn */
ad75a51e 1087 gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
a7812ae4
PB
1088 return;
1089 }
1e0b21d8 1090 gen_store_fpr64(ctx, fp0, B11_8);
55339361 1091 } else {
a7812ae4 1092 switch (ctx->opcode & 0xf00f) {
55339361 1093 case 0xf000: /* fadd Rm,Rn */
ad75a51e 1094 gen_helper_fadd_FT(FREG(B11_8), tcg_env,
7c9f7038 1095 FREG(B11_8), FREG(B7_4));
a7812ae4 1096 break;
55339361 1097 case 0xf001: /* fsub Rm,Rn */
ad75a51e 1098 gen_helper_fsub_FT(FREG(B11_8), tcg_env,
7c9f7038 1099 FREG(B11_8), FREG(B7_4));
a7812ae4 1100 break;
55339361 1101 case 0xf002: /* fmul Rm,Rn */
ad75a51e 1102 gen_helper_fmul_FT(FREG(B11_8), tcg_env,
7c9f7038 1103 FREG(B11_8), FREG(B7_4));
a7812ae4 1104 break;
55339361 1105 case 0xf003: /* fdiv Rm,Rn */
ad75a51e 1106 gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
7c9f7038 1107 FREG(B11_8), FREG(B7_4));
a7812ae4 1108 break;
55339361 1109 case 0xf004: /* fcmp/eq Rm,Rn */
ad75a51e 1110 gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
7c9f7038 1111 FREG(B11_8), FREG(B7_4));
a7812ae4 1112 return;
55339361 1113 case 0xf005: /* fcmp/gt Rm,Rn */
ad75a51e 1114 gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
7c9f7038 1115 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1116 return;
1117 }
55339361
YP
1118 }
1119 }
1120 return;
5b7141a1 1121 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1122 CHECK_FPU_ENABLED
1123 CHECK_FPSCR_PR_0
ad75a51e 1124 gen_helper_fmac_FT(FREG(B11_8), tcg_env,
7e9f7ca8
RH
1125 FREG(0), FREG(B7_4), FREG(B11_8));
1126 return;
fdf9b3e8
FB
1127 }
1128
1129 switch (ctx->opcode & 0xff00) {
55339361
YP
1130 case 0xc900: /* and #imm,R0 */
1131 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1132 return;
1133 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1134 {
1135 TCGv addr, val;
1136 addr = tcg_temp_new();
1137 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1138 val = tcg_temp_new();
3376f415 1139 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
55339361 1140 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1141 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
55339361
YP
1142 }
1143 return;
1144 case 0x8b00: /* bf label */
1145 CHECK_NOT_DELAY_SLOT
6f1c2af6 1146 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
55339361
YP
1147 return;
1148 case 0x8f00: /* bf/s label */
1149 CHECK_NOT_DELAY_SLOT
ac9707ea 1150 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
6f1c2af6 1151 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1152 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
55339361
YP
1153 return;
1154 case 0x8900: /* bt label */
1155 CHECK_NOT_DELAY_SLOT
6f1c2af6 1156 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
55339361
YP
1157 return;
1158 case 0x8d00: /* bt/s label */
1159 CHECK_NOT_DELAY_SLOT
ac9707ea 1160 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
6f1c2af6 1161 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1162 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
55339361
YP
1163 return;
1164 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1165 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
55339361
YP
1166 return;
1167 case 0xc400: /* mov.b @(disp,GBR),R0 */
1168 {
1169 TCGv addr = tcg_temp_new();
1170 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1171 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
55339361
YP
1172 }
1173 return;
1174 case 0xc500: /* mov.w @(disp,GBR),R0 */
1175 {
1176 TCGv addr = tcg_temp_new();
1177 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
03a0d87e 1178 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
55339361
YP
1179 }
1180 return;
1181 case 0xc600: /* mov.l @(disp,GBR),R0 */
1182 {
1183 TCGv addr = tcg_temp_new();
1184 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
03a0d87e 1185 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
55339361
YP
1186 }
1187 return;
1188 case 0xc000: /* mov.b R0,@(disp,GBR) */
1189 {
1190 TCGv addr = tcg_temp_new();
1191 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1192 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
55339361
YP
1193 }
1194 return;
1195 case 0xc100: /* mov.w R0,@(disp,GBR) */
1196 {
1197 TCGv addr = tcg_temp_new();
1198 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
03a0d87e 1199 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
55339361
YP
1200 }
1201 return;
1202 case 0xc200: /* mov.l R0,@(disp,GBR) */
1203 {
1204 TCGv addr = tcg_temp_new();
1205 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
03a0d87e 1206 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
55339361
YP
1207 }
1208 return;
1209 case 0x8000: /* mov.b R0,@(disp,Rn) */
1210 {
1211 TCGv addr = tcg_temp_new();
1212 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1213 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
55339361
YP
1214 }
1215 return;
1216 case 0x8100: /* mov.w R0,@(disp,Rn) */
1217 {
1218 TCGv addr = tcg_temp_new();
1219 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1220 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1221 MO_TEUW | UNALIGN(ctx));
55339361
YP
1222 }
1223 return;
1224 case 0x8400: /* mov.b @(disp,Rn),R0 */
1225 {
1226 TCGv addr = tcg_temp_new();
1227 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1228 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
55339361
YP
1229 }
1230 return;
1231 case 0x8500: /* mov.w @(disp,Rn),R0 */
1232 {
1233 TCGv addr = tcg_temp_new();
1234 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1235 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1236 MO_TESW | UNALIGN(ctx));
55339361
YP
1237 }
1238 return;
1239 case 0xc700: /* mova @(disp,PC),R0 */
6f1c2af6
RH
1240 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1241 4 + B7_0 * 4) & ~3);
55339361
YP
1242 return;
1243 case 0xcb00: /* or #imm,R0 */
1244 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1245 return;
1246 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1247 {
1248 TCGv addr, val;
1249 addr = tcg_temp_new();
1250 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1251 val = tcg_temp_new();
3376f415 1252 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
55339361 1253 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1254 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
55339361
YP
1255 }
1256 return;
1257 case 0xc300: /* trapa #imm */
1258 {
1259 TCGv imm;
1260 CHECK_NOT_DELAY_SLOT
ac9707ea 1261 gen_save_cpu_state(ctx, true);
55339361 1262 imm = tcg_constant_i32(B7_0);
ad75a51e 1263 gen_helper_trapa(tcg_env, imm);
6f1c2af6 1264 ctx->base.is_jmp = DISAS_NORETURN;
55339361
YP
1265 }
1266 return;
1267 case 0xc800: /* tst #imm,R0 */
1268 {
1269 TCGv val = tcg_temp_new();
1270 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1271 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
55339361
YP
1272 }
1273 return;
1274 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1275 {
1276 TCGv val = tcg_temp_new();
1277 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1278 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
55339361 1279 tcg_gen_andi_i32(val, val, B7_0);
34086945 1280 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
55339361
YP
1281 }
1282 return;
1283 case 0xca00: /* xor #imm,R0 */
1284 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1285 return;
1286 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1287 {
1288 TCGv addr, val;
1289 addr = tcg_temp_new();
1290 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1291 val = tcg_temp_new();
3376f415 1292 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
55339361 1293 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1294 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
55339361
YP
1295 }
1296 return;
fdf9b3e8
FB
1297 }
1298
1299 switch (ctx->opcode & 0xf08f) {
55339361
YP
1300 case 0x408e: /* ldc Rm,Rn_BANK */
1301 CHECK_PRIVILEGED
1302 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1303 return;
1304 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1305 CHECK_PRIVILEGED
03a0d87e
RH
1306 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1307 MO_TESL | MO_ALIGN);
55339361
YP
1308 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1309 return;
1310 case 0x0082: /* stc Rm_BANK,Rn */
1311 CHECK_PRIVILEGED
1312 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1313 return;
1314 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1315 CHECK_PRIVILEGED
1316 {
1317 TCGv addr = tcg_temp_new();
1318 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e
RH
1319 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1320 MO_TEUL | MO_ALIGN);
55339361
YP
1321 tcg_gen_mov_i32(REG(B11_8), addr);
1322 }
1323 return;
fdf9b3e8
FB
1324 }
1325
1326 switch (ctx->opcode & 0xf0ff) {
55339361
YP
1327 case 0x0023: /* braf Rn */
1328 CHECK_NOT_DELAY_SLOT
6f1c2af6 1329 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
ab419fd8 1330 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361
YP
1331 ctx->delayed_pc = (uint32_t) - 1;
1332 return;
1333 case 0x0003: /* bsrf Rn */
1334 CHECK_NOT_DELAY_SLOT
6f1c2af6 1335 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
55339361 1336 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
ab419fd8 1337 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361
YP
1338 ctx->delayed_pc = (uint32_t) - 1;
1339 return;
1340 case 0x4015: /* cmp/pl Rn */
34086945 1341 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
55339361
YP
1342 return;
1343 case 0x4011: /* cmp/pz Rn */
34086945 1344 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
55339361
YP
1345 return;
1346 case 0x4010: /* dt Rn */
1347 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1348 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
55339361
YP
1349 return;
1350 case 0x402b: /* jmp @Rn */
1351 CHECK_NOT_DELAY_SLOT
1352 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1353 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361
YP
1354 ctx->delayed_pc = (uint32_t) - 1;
1355 return;
1356 case 0x400b: /* jsr @Rn */
1357 CHECK_NOT_DELAY_SLOT
6f1c2af6 1358 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
55339361 1359 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1360 ctx->envflags |= TB_FLAG_DELAY_SLOT;
55339361
YP
1361 ctx->delayed_pc = (uint32_t) - 1;
1362 return;
1363 case 0x400e: /* ldc Rm,SR */
1364 CHECK_PRIVILEGED
34086945
AJ
1365 {
1366 TCGv val = tcg_temp_new();
1367 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1368 gen_write_sr(val);
6f1c2af6 1369 ctx->base.is_jmp = DISAS_STOP;
34086945 1370 }
55339361
YP
1371 return;
1372 case 0x4007: /* ldc.l @Rm+,SR */
1373 CHECK_PRIVILEGED
1374 {
1375 TCGv val = tcg_temp_new();
03a0d87e
RH
1376 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1377 MO_TESL | MO_ALIGN);
34086945
AJ
1378 tcg_gen_andi_i32(val, val, 0x700083f3);
1379 gen_write_sr(val);
55339361 1380 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
6f1c2af6 1381 ctx->base.is_jmp = DISAS_STOP;
55339361
YP
1382 }
1383 return;
1384 case 0x0002: /* stc SR,Rn */
1385 CHECK_PRIVILEGED
34086945 1386 gen_read_sr(REG(B11_8));
55339361
YP
1387 return;
1388 case 0x4003: /* stc SR,@-Rn */
1389 CHECK_PRIVILEGED
1390 {
1391 TCGv addr = tcg_temp_new();
34086945 1392 TCGv val = tcg_temp_new();
55339361 1393 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945 1394 gen_read_sr(val);
03a0d87e 1395 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
55339361
YP
1396 tcg_gen_mov_i32(REG(B11_8), addr);
1397 }
1398 return;
1399#define LD(reg,ldnum,ldpnum,prechk) \
1400 case ldnum: \
1401 prechk \
1402 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1403 return; \
1404 case ldpnum: \
1405 prechk \
1406 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
1407 MO_TESL | MO_ALIGN); \
1408 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678 1409 return;
55339361
YP
1410#define ST(reg,stnum,stpnum,prechk) \
1411 case stnum: \
1412 prechk \
1413 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1414 return; \
1415 case stpnum: \
1416 prechk \
1417 { \
1418 TCGv addr = tcg_temp_new(); \
1419 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1420 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
1421 MO_TEUL | MO_ALIGN); \
1422 tcg_gen_mov_i32(REG(B11_8), addr); \
1423 } \
fdf9b3e8 1424 return;
55339361
YP
1425#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1426 LD(reg,ldnum,ldpnum,prechk) \
1427 ST(reg,stnum,stpnum,prechk)
1428 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1429 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1430 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1431 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1432 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1433 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
55339361
YP
1434 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1435 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1436 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1437 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1438 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1439 case 0x406a: /* lds Rm,FPSCR */
1440 CHECK_FPU_ENABLED
ad75a51e 1441 gen_helper_ld_fpscr(tcg_env, REG(B11_8));
6f1c2af6 1442 ctx->base.is_jmp = DISAS_STOP;
55339361
YP
1443 return;
1444 case 0x4066: /* lds.l @Rm+,FPSCR */
1445 CHECK_FPU_ENABLED
1446 {
1447 TCGv addr = tcg_temp_new();
03a0d87e
RH
1448 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1449 MO_TESL | MO_ALIGN);
55339361 1450 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
ad75a51e 1451 gen_helper_ld_fpscr(tcg_env, addr);
6f1c2af6 1452 ctx->base.is_jmp = DISAS_STOP;
55339361
YP
1453 }
1454 return;
1455 case 0x006a: /* sts FPSCR,Rn */
1456 CHECK_FPU_ENABLED
1457 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1458 return;
1459 case 0x4062: /* sts FPSCR,@-Rn */
1460 CHECK_FPU_ENABLED
1461 {
1462 TCGv addr, val;
1463 val = tcg_temp_new();
1464 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1465 addr = tcg_temp_new();
1466 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e 1467 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
55339361
YP
1468 tcg_gen_mov_i32(REG(B11_8), addr);
1469 }
1470 return;
1471 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1472 {
1473 TCGv val = tcg_temp_new();
03a0d87e
RH
1474 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1475 MO_TEUL | MO_ALIGN);
ad75a51e 1476 gen_helper_movcal(tcg_env, REG(B11_8), val);
03a0d87e
RH
1477 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1478 MO_TEUL | MO_ALIGN);
852d481f
EI
1479 }
1480 ctx->has_movcal = 1;
55339361
YP
1481 return;
1482 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1483 CHECK_SH4A
143021b2 1484 /* Load non-boundary-aligned data */
ccae24d4
RH
1485 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1486 MO_TEUL | MO_UNALN);
1487 return;
55339361 1488 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1489 CHECK_SH4A
143021b2 1490 /* Load non-boundary-aligned data */
ccae24d4
RH
1491 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1492 MO_TEUL | MO_UNALN);
1493 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1494 return;
55339361 1495 case 0x0029: /* movt Rn */
34086945 1496 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
55339361 1497 return;
66c7c806
AJ
1498 case 0x0073:
1499 /* MOVCO.L
f85da308
RH
1500 * LDST -> T
1501 * If (T == 1) R0 -> (Rn)
1502 * 0 -> LDST
1503 *
1504 * The above description doesn't work in a parallel context.
1505 * Since we currently support no smp boards, this implies user-mode.
1506 * But we can still support the official mechanism while user-mode
1507 * is single-threaded. */
ccae24d4
RH
1508 CHECK_SH4A
1509 {
f85da308
RH
1510 TCGLabel *fail = gen_new_label();
1511 TCGLabel *done = gen_new_label();
1512
6f1c2af6 1513 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1514 TCGv tmp;
1515
1516 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1517 cpu_lock_addr, fail);
1518 tmp = tcg_temp_new();
1519 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
03a0d87e
RH
1520 REG(0), ctx->memidx,
1521 MO_TEUL | MO_ALIGN);
f85da308 1522 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
f85da308
RH
1523 } else {
1524 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
03a0d87e
RH
1525 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1526 MO_TEUL | MO_ALIGN);
f85da308
RH
1527 tcg_gen_movi_i32(cpu_sr_t, 1);
1528 }
1529 tcg_gen_br(done);
1530
1531 gen_set_label(fail);
1532 tcg_gen_movi_i32(cpu_sr_t, 0);
1533
1534 gen_set_label(done);
1535 tcg_gen_movi_i32(cpu_lock_addr, -1);
ccae24d4 1536 }
f85da308 1537 return;
66c7c806
AJ
1538 case 0x0063:
1539 /* MOVLI.L @Rm,R0
f85da308
RH
1540 * 1 -> LDST
1541 * (Rm) -> R0
1542 * When interrupt/exception
1543 * occurred 0 -> LDST
1544 *
1545 * In a parallel context, we must also save the loaded value
1546 * for use with the cmpxchg that we'll use with movco.l. */
ccae24d4 1547 CHECK_SH4A
6f1c2af6 1548 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1549 TCGv tmp = tcg_temp_new();
1550 tcg_gen_mov_i32(tmp, REG(B11_8));
03a0d87e
RH
1551 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1552 MO_TESL | MO_ALIGN);
f85da308
RH
1553 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1554 tcg_gen_mov_i32(cpu_lock_addr, tmp);
f85da308 1555 } else {
03a0d87e
RH
1556 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1557 MO_TESL | MO_ALIGN);
f85da308
RH
1558 tcg_gen_movi_i32(cpu_lock_addr, 0);
1559 }
ccae24d4 1560 return;
55339361
YP
1561 case 0x0093: /* ocbi @Rn */
1562 {
ad75a51e 1563 gen_helper_ocbi(tcg_env, REG(B11_8));
55339361
YP
1564 }
1565 return;
1566 case 0x00a3: /* ocbp @Rn */
1567 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1568 /* These instructions are supposed to do nothing in case of
1569 a cache miss. Given that we only partially emulate caches
1570 it is safe to simply ignore them. */
55339361
YP
1571 return;
1572 case 0x0083: /* pref @Rn */
1573 return;
1574 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1575 CHECK_SH4A
1576 return;
55339361 1577 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1578 CHECK_SH4A
1579 return;
55339361 1580 case 0x00ab: /* synco */
ccae24d4
RH
1581 CHECK_SH4A
1582 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1583 return;
55339361
YP
1584 case 0x4024: /* rotcl Rn */
1585 {
1586 TCGv tmp = tcg_temp_new();
34086945
AJ
1587 tcg_gen_mov_i32(tmp, cpu_sr_t);
1588 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
55339361 1589 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1590 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
55339361
YP
1591 }
1592 return;
1593 case 0x4025: /* rotcr Rn */
1594 {
1595 TCGv tmp = tcg_temp_new();
34086945
AJ
1596 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1597 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
55339361 1598 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1599 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
55339361
YP
1600 }
1601 return;
1602 case 0x4004: /* rotl Rn */
1603 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1604 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
55339361
YP
1605 return;
1606 case 0x4005: /* rotr Rn */
34086945 1607 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
55339361
YP
1608 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1609 return;
1610 case 0x4000: /* shll Rn */
1611 case 0x4020: /* shal Rn */
34086945 1612 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
55339361
YP
1613 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1614 return;
1615 case 0x4021: /* shar Rn */
34086945 1616 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
55339361
YP
1617 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1618 return;
1619 case 0x4001: /* shlr Rn */
34086945 1620 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
55339361
YP
1621 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1622 return;
1623 case 0x4008: /* shll2 Rn */
1624 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1625 return;
1626 case 0x4018: /* shll8 Rn */
1627 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1628 return;
1629 case 0x4028: /* shll16 Rn */
1630 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1631 return;
1632 case 0x4009: /* shlr2 Rn */
1633 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1634 return;
1635 case 0x4019: /* shlr8 Rn */
1636 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1637 return;
1638 case 0x4029: /* shlr16 Rn */
1639 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1640 return;
1641 case 0x401b: /* tas.b @Rn */
d3c2b2b3
RH
1642 tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1643 tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1644 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
cb32f179 1645 return;
e67888a7 1646 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
55339361 1647 CHECK_FPU_ENABLED
7c9f7038 1648 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
55339361 1649 return;
e67888a7 1650 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
55339361 1651 CHECK_FPU_ENABLED
7c9f7038 1652 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
55339361 1653 return;
e67888a7 1654 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
55339361 1655 CHECK_FPU_ENABLED
a6215749 1656 if (ctx->tbflags & FPSCR_PR) {
55339361 1657 TCGv_i64 fp;
93dc9c89
RH
1658 if (ctx->opcode & 0x0100) {
1659 goto do_illegal;
1660 }
55339361 1661 fp = tcg_temp_new_i64();
ad75a51e 1662 gen_helper_float_DT(fp, tcg_env, cpu_fpul);
1e0b21d8 1663 gen_store_fpr64(ctx, fp, B11_8);
55339361
YP
1664 }
1665 else {
ad75a51e 1666 gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
55339361
YP
1667 }
1668 return;
e67888a7 1669 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
55339361 1670 CHECK_FPU_ENABLED
a6215749 1671 if (ctx->tbflags & FPSCR_PR) {
55339361 1672 TCGv_i64 fp;
93dc9c89
RH
1673 if (ctx->opcode & 0x0100) {
1674 goto do_illegal;
1675 }
55339361 1676 fp = tcg_temp_new_i64();
1e0b21d8 1677 gen_load_fpr64(ctx, fp, B11_8);
ad75a51e 1678 gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
55339361
YP
1679 }
1680 else {
ad75a51e 1681 gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
55339361
YP
1682 }
1683 return;
24988dc2 1684 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
55339361 1685 CHECK_FPU_ENABLED
7c9f7038 1686 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
55339361 1687 return;
57f5c1b0 1688 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
55339361 1689 CHECK_FPU_ENABLED
7c9f7038 1690 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
55339361 1691 return;
24988dc2 1692 case 0xf06d: /* fsqrt FRn */
55339361 1693 CHECK_FPU_ENABLED
a6215749 1694 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1695 if (ctx->opcode & 0x0100) {
1696 goto do_illegal;
1697 }
55339361 1698 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1699 gen_load_fpr64(ctx, fp, B11_8);
ad75a51e 1700 gen_helper_fsqrt_DT(fp, tcg_env, fp);
1e0b21d8 1701 gen_store_fpr64(ctx, fp, B11_8);
55339361 1702 } else {
ad75a51e 1703 gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
55339361
YP
1704 }
1705 return;
24988dc2 1706 case 0xf07d: /* fsrra FRn */
55339361 1707 CHECK_FPU_ENABLED
11b7aa23 1708 CHECK_FPSCR_PR_0
ad75a51e 1709 gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
55339361 1710 break;
e67888a7 1711 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
55339361 1712 CHECK_FPU_ENABLED
7e9f7ca8
RH
1713 CHECK_FPSCR_PR_0
1714 tcg_gen_movi_i32(FREG(B11_8), 0);
1715 return;
e67888a7 1716 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
55339361 1717 CHECK_FPU_ENABLED
7e9f7ca8
RH
1718 CHECK_FPSCR_PR_0
1719 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1720 return;
24988dc2 1721 case 0xf0ad: /* fcnvsd FPUL,DRn */
55339361
YP
1722 CHECK_FPU_ENABLED
1723 {
1724 TCGv_i64 fp = tcg_temp_new_i64();
ad75a51e 1725 gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
1e0b21d8 1726 gen_store_fpr64(ctx, fp, B11_8);
55339361
YP
1727 }
1728 return;
24988dc2 1729 case 0xf0bd: /* fcnvds DRn,FPUL */
55339361
YP
1730 CHECK_FPU_ENABLED
1731 {
1732 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1733 gen_load_fpr64(ctx, fp, B11_8);
ad75a51e 1734 gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
55339361
YP
1735 }
1736 return;
af8c2bde
AJ
1737 case 0xf0ed: /* fipr FVm,FVn */
1738 CHECK_FPU_ENABLED
7e9f7ca8
RH
1739 CHECK_FPSCR_PR_1
1740 {
950b91be
RH
1741 TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1742 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
ad75a51e 1743 gen_helper_fipr(tcg_env, m, n);
af8c2bde
AJ
1744 return;
1745 }
1746 break;
17075f10
AJ
1747 case 0xf0fd: /* ftrv XMTRX,FVn */
1748 CHECK_FPU_ENABLED
7e9f7ca8
RH
1749 CHECK_FPSCR_PR_1
1750 {
1751 if ((ctx->opcode & 0x0300) != 0x0100) {
1752 goto do_illegal;
1753 }
950b91be 1754 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
ad75a51e 1755 gen_helper_ftrv(tcg_env, n);
17075f10
AJ
1756 return;
1757 }
1758 break;
fdf9b3e8 1759 }
bacc637a 1760#if 0
fdf9b3e8 1761 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
6f1c2af6 1762 ctx->opcode, ctx->base.pc_next);
bacc637a
AJ
1763 fflush(stderr);
1764#endif
6b98213d 1765 do_illegal:
ab419fd8 1766 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
dec16c6e
RH
1767 do_illegal_slot:
1768 gen_save_cpu_state(ctx, true);
ad75a51e 1769 gen_helper_raise_slot_illegal_instruction(tcg_env);
86865c5f 1770 } else {
dec16c6e 1771 gen_save_cpu_state(ctx, true);
ad75a51e 1772 gen_helper_raise_illegal_instruction(tcg_env);
86865c5f 1773 }
6f1c2af6 1774 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042
RH
1775 return;
1776
1777 do_fpu_disabled:
1778 gen_save_cpu_state(ctx, true);
ab419fd8 1779 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
ad75a51e 1780 gen_helper_raise_slot_fpu_disable(tcg_env);
dec4f042 1781 } else {
ad75a51e 1782 gen_helper_raise_fpu_disable(tcg_env);
dec4f042 1783 }
6f1c2af6 1784 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042 1785 return;
823029f9
TS
1786}
1787
b1d8e52e 1788static void decode_opc(DisasContext * ctx)
823029f9 1789{
a6215749 1790 uint32_t old_flags = ctx->envflags;
823029f9
TS
1791
1792 _decode_opc(ctx);
1793
ab419fd8 1794 if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
39682608 1795 /* go out of the delay slot */
ab419fd8 1796 ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
4bfa602b
RH
1797
1798 /* When in an exclusive region, we must continue to the end
1799 for conditional branches. */
ab419fd8
RH
1800 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1801 && old_flags & TB_FLAG_DELAY_SLOT_COND) {
4bfa602b
RH
1802 gen_delayed_conditional_jump(ctx);
1803 return;
1804 }
1805 /* Otherwise this is probably an invalid gUSA region.
1806 Drop the GUSA bits so the next TB doesn't see them. */
ab419fd8 1807 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b 1808
ac9707ea 1809 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
ab419fd8 1810 if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
55339361 1811 gen_delayed_conditional_jump(ctx);
be53081a 1812 } else {
823029f9 1813 gen_jump(ctx);
55339361 1814 }
4bfa602b
RH
1815 }
1816}
1817
1818#ifdef CONFIG_USER_ONLY
4f9ef4ee
RH
1819/*
1820 * Restart with the EXCLUSIVE bit set, within a TB run via
1821 * cpu_exec_step_atomic holding the exclusive lock.
1822 */
1823static void gen_restart_exclusive(DisasContext *ctx)
1824{
1825 ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
1826 gen_save_cpu_state(ctx, false);
1827 gen_helper_exclusive(tcg_env);
1828 ctx->base.is_jmp = DISAS_NORETURN;
1829}
1830
4bfa602b
RH
1831/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1832 Upon an interrupt, a real kernel would simply notice magic values in
1833 the registers and reset the PC to the start of the sequence.
1834
1835 For QEMU, we cannot do this in quite the same way. Instead, we notice
1836 the normal start of such a sequence (mov #-x,r15). While we can handle
1837 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1838 sequences and transform them into atomic operations as seen by the host.
1839*/
be0e3d7a 1840static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
4bfa602b 1841{
d6a6cffd
RH
1842 uint16_t insns[5];
1843 int ld_adr, ld_dst, ld_mop;
1844 int op_dst, op_src, op_opc;
1845 int mv_src, mt_dst, st_src, st_mop;
1846 TCGv op_arg;
6f1c2af6
RH
1847 uint32_t pc = ctx->base.pc_next;
1848 uint32_t pc_end = ctx->base.tb->cs_base;
4bfa602b 1849 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1850 int i;
4bfa602b 1851
d6a6cffd
RH
1852 /* The state machine below will consume only a few insns.
1853 If there are more than that in a region, fail now. */
1854 if (max_insns > ARRAY_SIZE(insns)) {
1855 goto fail;
1856 }
1857
1858 /* Read all of the insns for the region. */
1859 for (i = 0; i < max_insns; ++i) {
4e116893 1860 insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
d6a6cffd
RH
1861 }
1862
1863 ld_adr = ld_dst = ld_mop = -1;
1864 mv_src = -1;
1865 op_dst = op_src = op_opc = -1;
1866 mt_dst = -1;
1867 st_src = st_mop = -1;
f764718d 1868 op_arg = NULL;
d6a6cffd
RH
1869 i = 0;
1870
1871#define NEXT_INSN \
1872 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1873
1874 /*
1875 * Expect a load to begin the region.
1876 */
1877 NEXT_INSN;
1878 switch (ctx->opcode & 0xf00f) {
1879 case 0x6000: /* mov.b @Rm,Rn */
1880 ld_mop = MO_SB;
1881 break;
1882 case 0x6001: /* mov.w @Rm,Rn */
1883 ld_mop = MO_TESW;
1884 break;
1885 case 0x6002: /* mov.l @Rm,Rn */
1886 ld_mop = MO_TESL;
1887 break;
1888 default:
1889 goto fail;
1890 }
1891 ld_adr = B7_4;
1892 ld_dst = B11_8;
1893 if (ld_adr == ld_dst) {
1894 goto fail;
1895 }
1896 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1897 op_dst = ld_dst;
1898
1899 /*
1900 * Expect an optional register move.
1901 */
1902 NEXT_INSN;
1903 switch (ctx->opcode & 0xf00f) {
1904 case 0x6003: /* mov Rm,Rn */
02b8e735 1905 /*
23b5d9fa 1906 * Here we want to recognize ld_dst being saved for later consumption,
02b8e735
PMD
1907 * or for another input register being copied so that ld_dst need not
1908 * be clobbered during the operation.
1909 */
d6a6cffd
RH
1910 op_dst = B11_8;
1911 mv_src = B7_4;
1912 if (op_dst == ld_dst) {
1913 /* Overwriting the load output. */
1914 goto fail;
1915 }
1916 if (mv_src != ld_dst) {
1917 /* Copying a new input; constrain op_src to match the load. */
1918 op_src = ld_dst;
1919 }
1920 break;
1921
1922 default:
1923 /* Put back and re-examine as operation. */
1924 --i;
1925 }
1926
1927 /*
1928 * Expect the operation.
1929 */
1930 NEXT_INSN;
1931 switch (ctx->opcode & 0xf00f) {
1932 case 0x300c: /* add Rm,Rn */
1933 op_opc = INDEX_op_add_i32;
1934 goto do_reg_op;
1935 case 0x2009: /* and Rm,Rn */
1936 op_opc = INDEX_op_and_i32;
1937 goto do_reg_op;
1938 case 0x200a: /* xor Rm,Rn */
1939 op_opc = INDEX_op_xor_i32;
1940 goto do_reg_op;
1941 case 0x200b: /* or Rm,Rn */
1942 op_opc = INDEX_op_or_i32;
1943 do_reg_op:
1944 /* The operation register should be as expected, and the
1945 other input cannot depend on the load. */
1946 if (op_dst != B11_8) {
1947 goto fail;
1948 }
1949 if (op_src < 0) {
1950 /* Unconstrainted input. */
1951 op_src = B7_4;
1952 } else if (op_src == B7_4) {
1953 /* Constrained input matched load. All operations are
1954 commutative; "swap" them by "moving" the load output
1955 to the (implicit) first argument and the move source
1956 to the (explicit) second argument. */
1957 op_src = mv_src;
1958 } else {
1959 goto fail;
1960 }
1961 op_arg = REG(op_src);
1962 break;
1963
1964 case 0x6007: /* not Rm,Rn */
1965 if (ld_dst != B7_4 || mv_src >= 0) {
1966 goto fail;
1967 }
1968 op_dst = B11_8;
1969 op_opc = INDEX_op_xor_i32;
950b91be 1970 op_arg = tcg_constant_i32(-1);
d6a6cffd
RH
1971 break;
1972
1973 case 0x7000 ... 0x700f: /* add #imm,Rn */
1974 if (op_dst != B11_8 || mv_src >= 0) {
1975 goto fail;
1976 }
1977 op_opc = INDEX_op_add_i32;
950b91be 1978 op_arg = tcg_constant_i32(B7_0s);
d6a6cffd
RH
1979 break;
1980
1981 case 0x3000: /* cmp/eq Rm,Rn */
1982 /* Looking for the middle of a compare-and-swap sequence,
1983 beginning with the compare. Operands can be either order,
1984 but with only one overlapping the load. */
1985 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1986 goto fail;
1987 }
1988 op_opc = INDEX_op_setcond_i32; /* placeholder */
1989 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1990 op_arg = REG(op_src);
1991
1992 NEXT_INSN;
1993 switch (ctx->opcode & 0xff00) {
1994 case 0x8b00: /* bf label */
1995 case 0x8f00: /* bf/s label */
1996 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
1997 goto fail;
1998 }
1999 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2000 break;
2001 }
2002 /* We're looking to unconditionally modify Rn with the
2003 result of the comparison, within the delay slot of
2004 the branch. This is used by older gcc. */
2005 NEXT_INSN;
2006 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2007 mt_dst = B11_8;
2008 } else {
2009 goto fail;
2010 }
2011 break;
2012
2013 default:
2014 goto fail;
2015 }
2016 break;
2017
2018 case 0x2008: /* tst Rm,Rn */
2019 /* Looking for a compare-and-swap against zero. */
2020 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2021 goto fail;
2022 }
2023 op_opc = INDEX_op_setcond_i32;
950b91be 2024 op_arg = tcg_constant_i32(0);
d6a6cffd
RH
2025
2026 NEXT_INSN;
2027 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2028 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2029 goto fail;
2030 }
2031 break;
2032
2033 default:
2034 /* Put back and re-examine as store. */
2035 --i;
2036 }
2037
2038 /*
2039 * Expect the store.
2040 */
2041 /* The store must be the last insn. */
2042 if (i != max_insns - 1) {
2043 goto fail;
2044 }
2045 NEXT_INSN;
2046 switch (ctx->opcode & 0xf00f) {
2047 case 0x2000: /* mov.b Rm,@Rn */
2048 st_mop = MO_UB;
2049 break;
2050 case 0x2001: /* mov.w Rm,@Rn */
2051 st_mop = MO_UW;
2052 break;
2053 case 0x2002: /* mov.l Rm,@Rn */
2054 st_mop = MO_UL;
2055 break;
2056 default:
2057 goto fail;
2058 }
2059 /* The store must match the load. */
2060 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2061 goto fail;
2062 }
2063 st_src = B7_4;
2064
2065#undef NEXT_INSN
2066
2067 /*
2068 * Emit the operation.
2069 */
d6a6cffd
RH
2070 switch (op_opc) {
2071 case -1:
2072 /* No operation found. Look for exchange pattern. */
2073 if (st_src == ld_dst || mv_src >= 0) {
2074 goto fail;
2075 }
2076 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2077 ctx->memidx, ld_mop);
2078 break;
2079
2080 case INDEX_op_add_i32:
2081 if (op_dst != st_src) {
2082 goto fail;
2083 }
2084 if (op_dst == ld_dst && st_mop == MO_UL) {
2085 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2086 op_arg, ctx->memidx, ld_mop);
2087 } else {
2088 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2089 op_arg, ctx->memidx, ld_mop);
2090 if (op_dst != ld_dst) {
2091 /* Note that mop sizes < 4 cannot use add_fetch
2092 because it won't carry into the higher bits. */
2093 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2094 }
2095 }
2096 break;
2097
2098 case INDEX_op_and_i32:
2099 if (op_dst != st_src) {
2100 goto fail;
2101 }
2102 if (op_dst == ld_dst) {
2103 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2104 op_arg, ctx->memidx, ld_mop);
2105 } else {
2106 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2107 op_arg, ctx->memidx, ld_mop);
2108 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2109 }
2110 break;
2111
2112 case INDEX_op_or_i32:
2113 if (op_dst != st_src) {
2114 goto fail;
2115 }
2116 if (op_dst == ld_dst) {
2117 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2118 op_arg, ctx->memidx, ld_mop);
2119 } else {
2120 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2121 op_arg, ctx->memidx, ld_mop);
2122 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2123 }
2124 break;
2125
2126 case INDEX_op_xor_i32:
2127 if (op_dst != st_src) {
2128 goto fail;
2129 }
2130 if (op_dst == ld_dst) {
2131 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2132 op_arg, ctx->memidx, ld_mop);
2133 } else {
2134 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2135 op_arg, ctx->memidx, ld_mop);
2136 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2137 }
2138 break;
2139
2140 case INDEX_op_setcond_i32:
2141 if (st_src == ld_dst) {
2142 goto fail;
2143 }
2144 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2145 REG(st_src), ctx->memidx, ld_mop);
2146 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2147 if (mt_dst >= 0) {
2148 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2149 }
2150 break;
2151
2152 default:
2153 g_assert_not_reached();
2154 }
2155
d6a6cffd 2156 /* The entire region has been translated. */
ab419fd8 2157 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
e03291cd 2158 goto done;
d6a6cffd
RH
2159
2160 fail:
4bfa602b
RH
2161 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2162 pc, pc_end);
2163
4f9ef4ee 2164 gen_restart_exclusive(ctx);
4bfa602b
RH
2165
2166 /* We're not executing an instruction, but we must report one for the
2167 purposes of accounting within the TB. We might as well report the
6f1c2af6
RH
2168 entire region consumed via ctx->base.pc_next so that it's immediately
2169 available in the disassembly dump. */
e03291cd
RH
2170
2171 done:
6f1c2af6 2172 ctx->base.pc_next = pc_end;
be0e3d7a 2173 ctx->base.num_insns += max_insns - 1;
e03291cd
RH
2174
2175 /*
2176 * Emit insn_start to cover each of the insns in the region.
2177 * This matches an assert in tcg.c making sure that we have
2178 * tb->icount * insn_start.
2179 */
2180 for (i = 1; i < max_insns; ++i) {
2181 tcg_gen_insn_start(pc + i * 2, ctx->envflags);
2182 }
fdf9b3e8 2183}
4bfa602b 2184#endif
fdf9b3e8 2185
fd1b3d38 2186static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
fdf9b3e8 2187{
fd1b3d38 2188 DisasContext *ctx = container_of(dcbase, DisasContext, base);
b77af26e 2189 CPUSH4State *env = cpu_env(cs);
be0e3d7a 2190 uint32_t tbflags;
fd1b3d38
EC
2191 int bound;
2192
be0e3d7a
RH
2193 ctx->tbflags = tbflags = ctx->base.tb->flags;
2194 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2195 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2196 /* We don't know if the delayed pc came from a dynamic or static branch,
2197 so assume it is a dynamic branch. */
fd1b3d38
EC
2198 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2199 ctx->features = env->features;
be0e3d7a
RH
2200 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2201 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2202 (tbflags & (1 << SR_RB))) * 0x10;
2203 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2204
ab419fd8
RH
2205#ifdef CONFIG_USER_ONLY
2206 if (tbflags & TB_FLAG_GUSA_MASK) {
2207 /* In gUSA exclusive region. */
be0e3d7a
RH
2208 uint32_t pc = ctx->base.pc_next;
2209 uint32_t pc_end = ctx->base.tb->cs_base;
ab419fd8 2210 int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
be0e3d7a
RH
2211 int max_insns = (pc_end - pc) / 2;
2212
2213 if (pc != pc_end + backup || max_insns < 2) {
2214 /* This is a malformed gUSA region. Don't do anything special,
2215 since the interpreter is likely to get confused. */
ab419fd8
RH
2216 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2217 } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
be0e3d7a
RH
2218 /* Regardless of single-stepping or the end of the page,
2219 we must complete execution of the gUSA region while
2220 holding the exclusive lock. */
2221 ctx->base.max_insns = max_insns;
2222 return;
2223 }
2224 }
ab419fd8 2225#endif
4448a836
RH
2226
2227 /* Since the ISA is fixed-width, we can bound by the number
2228 of instructions remaining on the page. */
fd1b3d38
EC
2229 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2230 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2231}
4448a836 2232
fd1b3d38
EC
2233static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2234{
fd1b3d38 2235}
4bfa602b 2236
fd1b3d38
EC
2237static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2238{
2239 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2240
fd1b3d38
EC
2241 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2242}
b933066a 2243
fd1b3d38
EC
2244static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2245{
b77af26e 2246 CPUSH4State *env = cpu_env(cs);
fd1b3d38 2247 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4bfa602b 2248
be0e3d7a 2249#ifdef CONFIG_USER_ONLY
ab419fd8
RH
2250 if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2251 && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
4f9ef4ee
RH
2252 /*
2253 * We're in an gUSA region, and we have not already fallen
2254 * back on using an exclusive region. Attempt to parse the
2255 * region into a single supported atomic operation. Failure
2256 * is handled within the parser by raising an exception to
2257 * retry using an exclusive region.
2258 *
2259 * Parsing the region in one block conflicts with plugins,
2260 * so always use exclusive mode if plugins enabled.
2261 */
2262 if (ctx->base.plugin_enabled) {
2263 gen_restart_exclusive(ctx);
2264 ctx->base.pc_next += 2;
2265 } else {
2266 decode_gusa(ctx, env);
2267 }
be0e3d7a
RH
2268 return;
2269 }
2270#endif
2271
4e116893 2272 ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
fd1b3d38
EC
2273 decode_opc(ctx);
2274 ctx->base.pc_next += 2;
2275}
2276
2277static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2278{
2279 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2280
ab419fd8 2281 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b 2282 /* Ending the region of exclusivity. Clear the bits. */
ab419fd8 2283 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b
RH
2284 }
2285
fd1b3d38 2286 switch (ctx->base.is_jmp) {
34cf5678 2287 case DISAS_STOP:
fd1b3d38 2288 gen_save_cpu_state(ctx, true);
52df5adc 2289 tcg_gen_exit_tb(NULL, 0);
34cf5678
RH
2290 break;
2291 case DISAS_NEXT:
fd1b3d38
EC
2292 case DISAS_TOO_MANY:
2293 gen_save_cpu_state(ctx, false);
2294 gen_goto_tb(ctx, 0, ctx->base.pc_next);
34cf5678
RH
2295 break;
2296 case DISAS_NORETURN:
2297 break;
2298 default:
2299 g_assert_not_reached();
fdf9b3e8 2300 }
fd1b3d38 2301}
823029f9 2302
8eb806a7
RH
2303static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2304 CPUState *cs, FILE *logfile)
fd1b3d38 2305{
8eb806a7
RH
2306 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2307 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
fd1b3d38 2308}
0a7df5da 2309
fd1b3d38
EC
2310static const TranslatorOps sh4_tr_ops = {
2311 .init_disas_context = sh4_tr_init_disas_context,
2312 .tb_start = sh4_tr_tb_start,
2313 .insn_start = sh4_tr_insn_start,
fd1b3d38
EC
2314 .translate_insn = sh4_tr_translate_insn,
2315 .tb_stop = sh4_tr_tb_stop,
2316 .disas_log = sh4_tr_disas_log,
2317};
2318
597f9b2d 2319void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
32f0c394 2320 vaddr pc, void *host_pc)
fd1b3d38
EC
2321{
2322 DisasContext ctx;
fdf9b3e8 2323
306c8721 2324 translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
fdf9b3e8 2325}