]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
accel/tcg: Pass max_insn to gen_intermediate_code by pointer
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
6faf2b6c 9 * version 2.1 of the License, or (at your option) any later version.
fdf9b3e8
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8
FB
19
20#define DEBUG_DISAS
fdf9b3e8 21
9d4c9946 22#include "qemu/osdep.h"
fdf9b3e8 23#include "cpu.h"
76cad711 24#include "disas/disas.h"
63c91552 25#include "exec/exec-all.h"
dcb32f1d 26#include "tcg/tcg-op.h"
f08b6170 27#include "exec/cpu_ldst.h"
2ef6175a
RH
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
4834871b 30#include "exec/translator.h"
508127e2 31#include "exec/log.h"
90c84c56 32#include "qemu/qemu-print.h"
a7e30d84
LV
33
34
fdf9b3e8 35typedef struct DisasContext {
6f1c2af6
RH
36 DisasContextBase base;
37
38 uint32_t tbflags; /* should stay unmodified during the TB translation */
39 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
fdf9b3e8 40 int memidx;
3a3bb8d2 41 int gbank;
5c13bad9 42 int fbank;
fdf9b3e8 43 uint32_t delayed_pc;
71968fa6 44 uint32_t features;
6f1c2af6
RH
45
46 uint16_t opcode;
47
48 bool has_movcal;
fdf9b3e8
FB
49} DisasContext;
50
fe25591e
AJ
51#if defined(CONFIG_USER_ONLY)
52#define IS_USER(ctx) 1
4da06fb3 53#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
fe25591e 54#else
a6215749 55#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
4da06fb3 56#define UNALIGN(C) 0
fe25591e
AJ
57#endif
58
6f1c2af6 59/* Target-specific values for ctx->base.is_jmp. */
4834871b
RH
60/* We want to exit back to the cpu loop for some reason.
61 Usually this is to recognize interrupts immediately. */
62#define DISAS_STOP DISAS_TARGET_0
823029f9 63
1e8864f7 64/* global register indexes */
3a3bb8d2 65static TCGv cpu_gregs[32];
1d565b21
AJ
66static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
67static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 68static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
f85da308
RH
69static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
70static TCGv cpu_lock_addr, cpu_lock_value;
66ba317c 71static TCGv cpu_fregs[32];
1000822b
AJ
72
73/* internal register indexes */
47b9f4d5 74static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 75
022c62cb 76#include "exec/gen-icount.h"
2e70f6ef 77
aa7408ec 78void sh4_translate_init(void)
2e70f6ef 79{
1e8864f7 80 int i;
559dd74d 81 static const char * const gregnames[24] = {
1e8864f7
AJ
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 };
66ba317c
AJ
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 };
1e8864f7 98
3a3bb8d2 99 for (i = 0; i < 24; i++) {
e1ccc054 100 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 101 offsetof(CPUSH4State, gregs[i]),
66ba317c 102 gregnames[i]);
3a3bb8d2
RH
103 }
104 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 105
e1ccc054 106 cpu_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 107 offsetof(CPUSH4State, pc), "PC");
e1ccc054 108 cpu_sr = tcg_global_mem_new_i32(cpu_env,
73e5716c 109 offsetof(CPUSH4State, sr), "SR");
e1ccc054
RH
110 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr_m), "SR_M");
112 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_q), "SR_Q");
114 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
115 offsetof(CPUSH4State, sr_t), "SR_T");
116 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
73e5716c 117 offsetof(CPUSH4State, ssr), "SSR");
e1ccc054 118 cpu_spc = tcg_global_mem_new_i32(cpu_env,
73e5716c 119 offsetof(CPUSH4State, spc), "SPC");
e1ccc054 120 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 121 offsetof(CPUSH4State, gbr), "GBR");
e1ccc054 122 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 123 offsetof(CPUSH4State, vbr), "VBR");
e1ccc054 124 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
73e5716c 125 offsetof(CPUSH4State, sgr), "SGR");
e1ccc054 126 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 127 offsetof(CPUSH4State, dbr), "DBR");
e1ccc054 128 cpu_mach = tcg_global_mem_new_i32(cpu_env,
73e5716c 129 offsetof(CPUSH4State, mach), "MACH");
e1ccc054 130 cpu_macl = tcg_global_mem_new_i32(cpu_env,
73e5716c 131 offsetof(CPUSH4State, macl), "MACL");
e1ccc054 132 cpu_pr = tcg_global_mem_new_i32(cpu_env,
73e5716c 133 offsetof(CPUSH4State, pr), "PR");
e1ccc054 134 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
73e5716c 135 offsetof(CPUSH4State, fpscr), "FPSCR");
e1ccc054 136 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
73e5716c 137 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 138
e1ccc054 139 cpu_flags = tcg_global_mem_new_i32(cpu_env,
73e5716c 140 offsetof(CPUSH4State, flags), "_flags_");
e1ccc054 141 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 142 offsetof(CPUSH4State, delayed_pc),
a7812ae4 143 "_delayed_pc_");
47b9f4d5
AJ
144 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
145 offsetof(CPUSH4State,
146 delayed_cond),
147 "_delayed_cond_");
f85da308
RH
148 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
149 offsetof(CPUSH4State, lock_addr),
150 "_lock_addr_");
151 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
152 offsetof(CPUSH4State, lock_value),
153 "_lock_value_");
1000822b 154
66ba317c 155 for (i = 0; i < 32; i++)
e1ccc054 156 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 157 offsetof(CPUSH4State, fregs[i]),
66ba317c 158 fregnames[i]);
2e70f6ef
PB
159}
160
90c84c56 161void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
fdf9b3e8 162{
878096ee
AF
163 SuperHCPU *cpu = SUPERH_CPU(cs);
164 CPUSH4State *env = &cpu->env;
fdf9b3e8 165 int i;
90c84c56
MA
166
167 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
168 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
169 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
170 env->spc, env->ssr, env->gbr, env->vbr);
171 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
172 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8 173 for (i = 0; i < 24; i += 4) {
90c84c56 174 qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
fdf9b3e8
FB
175 i, env->gregs[i], i + 1, env->gregs[i + 1],
176 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
177 }
ab419fd8 178 if (env->flags & TB_FLAG_DELAY_SLOT) {
90c84c56 179 qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
fdf9b3e8 180 env->delayed_pc);
ab419fd8 181 } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
90c84c56 182 qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
fdf9b3e8 183 env->delayed_pc);
ab419fd8 184 } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
90c84c56
MA
185 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
186 env->delayed_pc);
fdf9b3e8
FB
187 }
188}
189
34086945
AJ
190static void gen_read_sr(TCGv dst)
191{
1d565b21
AJ
192 TCGv t0 = tcg_temp_new();
193 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
196 tcg_gen_or_i32(dst, dst, t0);
197 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
198 tcg_gen_or_i32(dst, cpu_sr, t0);
199 tcg_temp_free_i32(t0);
34086945
AJ
200}
201
202static void gen_write_sr(TCGv src)
203{
1d565b21
AJ
204 tcg_gen_andi_i32(cpu_sr, src,
205 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
206 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
207 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
208 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
209}
210
ac9707ea
AJ
211static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
212{
213 if (save_pc) {
6f1c2af6 214 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
ac9707ea
AJ
215 }
216 if (ctx->delayed_pc != (uint32_t) -1) {
217 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
218 }
e1933d14 219 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
220 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
221 }
222}
223
ec2eb22e
RH
224static inline bool use_exit_tb(DisasContext *ctx)
225{
ab419fd8 226 return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
ec2eb22e
RH
227}
228
3f1e2098 229static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 230{
3f1e2098 231 if (use_exit_tb(ctx)) {
4bfa602b
RH
232 return false;
233 }
3f1e2098 234 return translator_use_goto_tb(&ctx->base, dest);
90aa39a1 235}
fdf9b3e8 236
90aa39a1
SF
237static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
238{
239 if (use_goto_tb(ctx, dest)) {
57fec1fe 240 tcg_gen_goto_tb(n);
3a8a44c4 241 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 242 tcg_gen_exit_tb(ctx->base.tb, n);
fdf9b3e8 243 } else {
3a8a44c4 244 tcg_gen_movi_i32(cpu_pc, dest);
52df5adc 245 if (use_exit_tb(ctx)) {
07ea28b4 246 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 247 } else {
7f11636d 248 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 249 }
fdf9b3e8 250 }
6f1c2af6 251 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
252}
253
fdf9b3e8
FB
254static void gen_jump(DisasContext * ctx)
255{
ec2eb22e 256 if (ctx->delayed_pc == -1) {
fdf9b3e8
FB
257 /* Target is not statically known, it comes necessarily from a
258 delayed jump as immediate jump are conditinal jumps */
1000822b 259 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 260 tcg_gen_discard_i32(cpu_delayed_pc);
52df5adc 261 if (use_exit_tb(ctx)) {
07ea28b4 262 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 263 } else {
7f11636d 264 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 265 }
6f1c2af6 266 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
267 } else {
268 gen_goto_tb(ctx, 0, ctx->delayed_pc);
269 }
270}
271
272/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
273static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
274 bool jump_if_true)
fdf9b3e8 275{
34086945 276 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
277 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
278
ab419fd8 279 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
280 /* When in an exclusive region, we must continue to the end.
281 Therefore, exit the region on a taken branch, but otherwise
282 fall through to the next instruction. */
283 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
ab419fd8 284 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
285 /* Note that this won't actually use a goto_tb opcode because we
286 disallow it in use_goto_tb, but it handles exit + singlestep. */
287 gen_goto_tb(ctx, 0, dest);
288 gen_set_label(l1);
5b38d026 289 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
290 return;
291 }
292
ac9707ea 293 gen_save_cpu_state(ctx, false);
4bfa602b
RH
294 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
295 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 296 gen_set_label(l1);
6f1c2af6
RH
297 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
298 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
299}
300
301/* Delayed conditional jump (bt or bf) */
302static void gen_delayed_conditional_jump(DisasContext * ctx)
303{
4bfa602b
RH
304 TCGLabel *l1 = gen_new_label();
305 TCGv ds = tcg_temp_new();
fdf9b3e8 306
47b9f4d5
AJ
307 tcg_gen_mov_i32(ds, cpu_delayed_cond);
308 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b 309
ab419fd8 310 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
311 /* When in an exclusive region, we must continue to the end.
312 Therefore, exit the region on a taken branch, but otherwise
313 fall through to the next instruction. */
314 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
315
316 /* Leave the gUSA region. */
ab419fd8 317 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
318 gen_jump(ctx);
319
320 gen_set_label(l1);
6f1c2af6 321 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
322 return;
323 }
324
6f396c8f 325 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
6f1c2af6 326 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
fdf9b3e8 327 gen_set_label(l1);
9c2a9ea1 328 gen_jump(ctx);
fdf9b3e8
FB
329}
330
e5d8053e 331static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 332{
1e0b21d8
RH
333 /* We have already signaled illegal instruction for odd Dr. */
334 tcg_debug_assert((reg & 1) == 0);
335 reg ^= ctx->fbank;
66ba317c 336 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
337}
338
e5d8053e 339static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 340{
1e0b21d8
RH
341 /* We have already signaled illegal instruction for odd Dr. */
342 tcg_debug_assert((reg & 1) == 0);
343 reg ^= ctx->fbank;
58d2a9ae 344 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
345}
346
fdf9b3e8
FB
347#define B3_0 (ctx->opcode & 0xf)
348#define B6_4 ((ctx->opcode >> 4) & 0x7)
349#define B7_4 ((ctx->opcode >> 4) & 0xf)
350#define B7_0 (ctx->opcode & 0xff)
351#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
352#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
353 (ctx->opcode & 0xfff))
354#define B11_8 ((ctx->opcode >> 8) & 0xf)
355#define B15_12 ((ctx->opcode >> 12) & 0xf)
356
3a3bb8d2
RH
357#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
358#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 359#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 360
f09111e0 361#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 362
fdf9b3e8 363#define CHECK_NOT_DELAY_SLOT \
ab419fd8
RH
364 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
365 goto do_illegal_slot; \
a6215749
AJ
366 }
367
6b98213d
RH
368#define CHECK_PRIVILEGED \
369 if (IS_USER(ctx)) { \
370 goto do_illegal; \
a6215749
AJ
371 }
372
dec4f042
RH
373#define CHECK_FPU_ENABLED \
374 if (ctx->tbflags & (1u << SR_FD)) { \
375 goto do_fpu_disabled; \
a6215749 376 }
d8299bcc 377
7e9f7ca8
RH
378#define CHECK_FPSCR_PR_0 \
379 if (ctx->tbflags & FPSCR_PR) { \
380 goto do_illegal; \
381 }
382
383#define CHECK_FPSCR_PR_1 \
384 if (!(ctx->tbflags & FPSCR_PR)) { \
385 goto do_illegal; \
386 }
387
ccae24d4
RH
388#define CHECK_SH4A \
389 if (!(ctx->features & SH_FEATURE_SH4A)) { \
390 goto do_illegal; \
391 }
392
b1d8e52e 393static void _decode_opc(DisasContext * ctx)
fdf9b3e8 394{
852d481f
EI
395 /* This code tries to make movcal emulation sufficiently
396 accurate for Linux purposes. This instruction writes
397 memory, and prior to that, always allocates a cache line.
398 It is used in two contexts:
399 - in memcpy, where data is copied in blocks, the first write
400 of to a block uses movca.l for performance.
401 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
402 to flush the cache. Here, the data written by movcal.l is never
403 written to memory, and the data written is just bogus.
404
405 To simulate this, we simulate movcal.l, we store the value to memory,
406 but we also remember the previous content. If we see ocbi, we check
407 if movcal.l for that address was done previously. If so, the write should
408 not have hit the memory, so we restore the previous content.
409 When we see an instruction that is neither movca.l
410 nor ocbi, the previous content is discarded.
411
412 To optimize, we only try to flush stores when we're at the start of
413 TB, or if we already saw movca.l in this TB and did not flush stores
414 yet. */
415 if (ctx->has_movcal)
416 {
417 int opcode = ctx->opcode & 0xf0ff;
418 if (opcode != 0x0093 /* ocbi */
419 && opcode != 0x00c3 /* movca.l */)
420 {
485d0035 421 gen_helper_discard_movcal_backup(cpu_env);
852d481f
EI
422 ctx->has_movcal = 0;
423 }
424 }
425
fdf9b3e8
FB
426#if 0
427 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
428#endif
f6198371 429
fdf9b3e8
FB
430 switch (ctx->opcode) {
431 case 0x0019: /* div0u */
1d565b21
AJ
432 tcg_gen_movi_i32(cpu_sr_m, 0);
433 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 434 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
435 return;
436 case 0x000b: /* rts */
1000822b
AJ
437 CHECK_NOT_DELAY_SLOT
438 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
ab419fd8 439 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
440 ctx->delayed_pc = (uint32_t) - 1;
441 return;
442 case 0x0028: /* clrmac */
3a8a44c4
AJ
443 tcg_gen_movi_i32(cpu_mach, 0);
444 tcg_gen_movi_i32(cpu_macl, 0);
fdf9b3e8
FB
445 return;
446 case 0x0048: /* clrs */
5ed9a259 447 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
fdf9b3e8
FB
448 return;
449 case 0x0008: /* clrt */
34086945 450 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
451 return;
452 case 0x0038: /* ldtlb */
fe25591e 453 CHECK_PRIVILEGED
485d0035 454 gen_helper_ldtlb(cpu_env);
fdf9b3e8 455 return;
c5e814b2 456 case 0x002b: /* rte */
fe25591e 457 CHECK_PRIVILEGED
1000822b 458 CHECK_NOT_DELAY_SLOT
34086945 459 gen_write_sr(cpu_ssr);
1000822b 460 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
ab419fd8 461 ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
fdf9b3e8 462 ctx->delayed_pc = (uint32_t) - 1;
6f1c2af6 463 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8
FB
464 return;
465 case 0x0058: /* sets */
5ed9a259 466 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
fdf9b3e8
FB
467 return;
468 case 0x0018: /* sett */
34086945 469 tcg_gen_movi_i32(cpu_sr_t, 1);
fdf9b3e8 470 return;
24988dc2 471 case 0xfbfd: /* frchg */
61dedf2a 472 CHECK_FPSCR_PR_0
6f06939b 473 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
6f1c2af6 474 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 475 return;
24988dc2 476 case 0xf3fd: /* fschg */
61dedf2a 477 CHECK_FPSCR_PR_0
7a64244f 478 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
6f1c2af6 479 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 480 return;
907759f9
RH
481 case 0xf7fd: /* fpchg */
482 CHECK_SH4A
483 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
6f1c2af6 484 ctx->base.is_jmp = DISAS_STOP;
907759f9 485 return;
fdf9b3e8
FB
486 case 0x0009: /* nop */
487 return;
488 case 0x001b: /* sleep */
fe25591e 489 CHECK_PRIVILEGED
6f1c2af6 490 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
10127400 491 gen_helper_sleep(cpu_env);
fdf9b3e8
FB
492 return;
493 }
494
495 switch (ctx->opcode & 0xf000) {
496 case 0x1000: /* mov.l Rm,@(disp,Rn) */
c55497ec 497 {
a7812ae4 498 TCGv addr = tcg_temp_new();
c55497ec 499 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
4da06fb3
RH
500 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
501 MO_TEUL | UNALIGN(ctx));
c55497ec
AJ
502 tcg_temp_free(addr);
503 }
fdf9b3e8
FB
504 return;
505 case 0x5000: /* mov.l @(disp,Rm),Rn */
c55497ec 506 {
a7812ae4 507 TCGv addr = tcg_temp_new();
c55497ec 508 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
4da06fb3
RH
509 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
510 MO_TESL | UNALIGN(ctx));
c55497ec
AJ
511 tcg_temp_free(addr);
512 }
fdf9b3e8 513 return;
24988dc2 514 case 0xe000: /* mov #imm,Rn */
4bfa602b 515#ifdef CONFIG_USER_ONLY
ab419fd8
RH
516 /*
517 * Detect the start of a gUSA region (mov #-n, r15).
518 * If so, update envflags and end the TB. This will allow us
519 * to see the end of the region (stored in R0) in the next TB.
520 */
6f1c2af6
RH
521 if (B11_8 == 15 && B7_0s < 0 &&
522 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
ab419fd8
RH
523 ctx->envflags =
524 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
6f1c2af6 525 ctx->base.is_jmp = DISAS_STOP;
4bfa602b
RH
526 }
527#endif
7efbe241 528 tcg_gen_movi_i32(REG(B11_8), B7_0s);
fdf9b3e8
FB
529 return;
530 case 0x9000: /* mov.w @(disp,PC),Rn */
c55497ec 531 {
6f1c2af6 532 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
3376f415 533 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
534 tcg_temp_free(addr);
535 }
fdf9b3e8
FB
536 return;
537 case 0xd000: /* mov.l @(disp,PC),Rn */
c55497ec 538 {
6f1c2af6 539 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
3376f415 540 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
541 tcg_temp_free(addr);
542 }
fdf9b3e8 543 return;
24988dc2 544 case 0x7000: /* add #imm,Rn */
7efbe241 545 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
fdf9b3e8
FB
546 return;
547 case 0xa000: /* bra disp */
548 CHECK_NOT_DELAY_SLOT
6f1c2af6 549 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 550 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
551 return;
552 case 0xb000: /* bsr disp */
553 CHECK_NOT_DELAY_SLOT
6f1c2af6
RH
554 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
555 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 556 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
557 return;
558 }
559
560 switch (ctx->opcode & 0xf00f) {
561 case 0x6003: /* mov Rm,Rn */
7efbe241 562 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
563 return;
564 case 0x2000: /* mov.b Rm,@Rn */
3376f415 565 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
fdf9b3e8
FB
566 return;
567 case 0x2001: /* mov.w Rm,@Rn */
4da06fb3
RH
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
569 MO_TEUW | UNALIGN(ctx));
fdf9b3e8
FB
570 return;
571 case 0x2002: /* mov.l Rm,@Rn */
4da06fb3
RH
572 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
573 MO_TEUL | UNALIGN(ctx));
fdf9b3e8
FB
574 return;
575 case 0x6000: /* mov.b @Rm,Rn */
3376f415 576 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
fdf9b3e8
FB
577 return;
578 case 0x6001: /* mov.w @Rm,Rn */
4da06fb3
RH
579 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
580 MO_TESW | UNALIGN(ctx));
fdf9b3e8
FB
581 return;
582 case 0x6002: /* mov.l @Rm,Rn */
4da06fb3
RH
583 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
584 MO_TESL | UNALIGN(ctx));
fdf9b3e8
FB
585 return;
586 case 0x2004: /* mov.b Rm,@-Rn */
c55497ec 587 {
a7812ae4 588 TCGv addr = tcg_temp_new();
c55497ec 589 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
590 /* might cause re-execution */
591 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
3101e99c 592 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
c55497ec
AJ
593 tcg_temp_free(addr);
594 }
fdf9b3e8
FB
595 return;
596 case 0x2005: /* mov.w Rm,@-Rn */
c55497ec 597 {
a7812ae4 598 TCGv addr = tcg_temp_new();
c55497ec 599 tcg_gen_subi_i32(addr, REG(B11_8), 2);
4da06fb3
RH
600 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
601 MO_TEUW | UNALIGN(ctx));
3101e99c 602 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
603 tcg_temp_free(addr);
604 }
fdf9b3e8
FB
605 return;
606 case 0x2006: /* mov.l Rm,@-Rn */
c55497ec 607 {
a7812ae4 608 TCGv addr = tcg_temp_new();
c55497ec 609 tcg_gen_subi_i32(addr, REG(B11_8), 4);
4da06fb3
RH
610 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
611 MO_TEUL | UNALIGN(ctx));
3101e99c 612 tcg_gen_mov_i32(REG(B11_8), addr);
e691e0ed 613 tcg_temp_free(addr);
c55497ec 614 }
fdf9b3e8 615 return;
eda9b09b 616 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 617 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
24988dc2 618 if ( B11_8 != B7_4 )
7efbe241 619 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
fdf9b3e8
FB
620 return;
621 case 0x6005: /* mov.w @Rm+,Rn */
4da06fb3
RH
622 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
623 MO_TESW | UNALIGN(ctx));
24988dc2 624 if ( B11_8 != B7_4 )
7efbe241 625 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
fdf9b3e8
FB
626 return;
627 case 0x6006: /* mov.l @Rm+,Rn */
4da06fb3
RH
628 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
629 MO_TESL | UNALIGN(ctx));
24988dc2 630 if ( B11_8 != B7_4 )
7efbe241 631 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
fdf9b3e8
FB
632 return;
633 case 0x0004: /* mov.b Rm,@(R0,Rn) */
c55497ec 634 {
a7812ae4 635 TCGv addr = tcg_temp_new();
c55497ec 636 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 637 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
c55497ec
AJ
638 tcg_temp_free(addr);
639 }
fdf9b3e8
FB
640 return;
641 case 0x0005: /* mov.w Rm,@(R0,Rn) */
c55497ec 642 {
a7812ae4 643 TCGv addr = tcg_temp_new();
c55497ec 644 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
645 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
646 MO_TEUW | UNALIGN(ctx));
c55497ec
AJ
647 tcg_temp_free(addr);
648 }
fdf9b3e8
FB
649 return;
650 case 0x0006: /* mov.l Rm,@(R0,Rn) */
c55497ec 651 {
a7812ae4 652 TCGv addr = tcg_temp_new();
c55497ec 653 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
654 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
655 MO_TEUL | UNALIGN(ctx));
c55497ec
AJ
656 tcg_temp_free(addr);
657 }
fdf9b3e8
FB
658 return;
659 case 0x000c: /* mov.b @(R0,Rm),Rn */
c55497ec 660 {
a7812ae4 661 TCGv addr = tcg_temp_new();
c55497ec 662 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 663 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
c55497ec
AJ
664 tcg_temp_free(addr);
665 }
fdf9b3e8
FB
666 return;
667 case 0x000d: /* mov.w @(R0,Rm),Rn */
c55497ec 668 {
a7812ae4 669 TCGv addr = tcg_temp_new();
c55497ec 670 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
671 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
672 MO_TESW | UNALIGN(ctx));
c55497ec
AJ
673 tcg_temp_free(addr);
674 }
fdf9b3e8
FB
675 return;
676 case 0x000e: /* mov.l @(R0,Rm),Rn */
c55497ec 677 {
a7812ae4 678 TCGv addr = tcg_temp_new();
c55497ec 679 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
680 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
681 MO_TESL | UNALIGN(ctx));
c55497ec
AJ
682 tcg_temp_free(addr);
683 }
fdf9b3e8
FB
684 return;
685 case 0x6008: /* swap.b Rm,Rn */
c55497ec 686 {
3c254ab8 687 TCGv low = tcg_temp_new();
b983a0e1 688 tcg_gen_bswap16_i32(low, REG(B7_4), 0);
218fd730 689 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
c55497ec 690 tcg_temp_free(low);
c55497ec 691 }
fdf9b3e8
FB
692 return;
693 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 694 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
fdf9b3e8
FB
695 return;
696 case 0x200d: /* xtrct Rm,Rn */
c55497ec
AJ
697 {
698 TCGv high, low;
a7812ae4 699 high = tcg_temp_new();
3101e99c 700 tcg_gen_shli_i32(high, REG(B7_4), 16);
a7812ae4 701 low = tcg_temp_new();
c55497ec 702 tcg_gen_shri_i32(low, REG(B11_8), 16);
c55497ec
AJ
703 tcg_gen_or_i32(REG(B11_8), high, low);
704 tcg_temp_free(low);
705 tcg_temp_free(high);
706 }
fdf9b3e8
FB
707 return;
708 case 0x300c: /* add Rm,Rn */
7efbe241 709 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
710 return;
711 case 0x300e: /* addc Rm,Rn */
22b88fd7 712 {
34086945 713 TCGv t0, t1;
a2368e01 714 t0 = tcg_const_tl(0);
22b88fd7 715 t1 = tcg_temp_new();
a2368e01
AJ
716 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
717 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
718 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 719 tcg_temp_free(t0);
34086945 720 tcg_temp_free(t1);
22b88fd7 721 }
fdf9b3e8
FB
722 return;
723 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
724 {
725 TCGv t0, t1, t2;
726 t0 = tcg_temp_new();
727 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
728 t1 = tcg_temp_new();
729 tcg_gen_xor_i32(t1, t0, REG(B11_8));
730 t2 = tcg_temp_new();
731 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 732 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
ad8d25a1 733 tcg_temp_free(t2);
34086945 734 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1
AJ
735 tcg_temp_free(t1);
736 tcg_gen_mov_i32(REG(B7_4), t0);
737 tcg_temp_free(t0);
738 }
fdf9b3e8
FB
739 return;
740 case 0x2009: /* and Rm,Rn */
7efbe241 741 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
742 return;
743 case 0x3000: /* cmp/eq Rm,Rn */
34086945 744 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
745 return;
746 case 0x3003: /* cmp/ge Rm,Rn */
34086945 747 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
748 return;
749 case 0x3007: /* cmp/gt Rm,Rn */
34086945 750 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
751 return;
752 case 0x3006: /* cmp/hi Rm,Rn */
34086945 753 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
754 return;
755 case 0x3002: /* cmp/hs Rm,Rn */
34086945 756 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
757 return;
758 case 0x200c: /* cmp/str Rm,Rn */
69d6275b 759 {
c5c19137
AJ
760 TCGv cmp1 = tcg_temp_new();
761 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
762 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
763 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
764 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
765 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
766 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
c55497ec
AJ
767 tcg_temp_free(cmp2);
768 tcg_temp_free(cmp1);
69d6275b 769 }
fdf9b3e8
FB
770 return;
771 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
772 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
773 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
774 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
fdf9b3e8
FB
775 return;
776 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
777 {
778 TCGv t0 = tcg_temp_new();
779 TCGv t1 = tcg_temp_new();
780 TCGv t2 = tcg_temp_new();
781 TCGv zero = tcg_const_i32(0);
782
783 /* shift left arg1, saving the bit being pushed out and inserting
784 T on the right */
785 tcg_gen_shri_i32(t0, REG(B11_8), 31);
786 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
787 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
788
789 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
790 using 64-bit temps, we compute arg0's high part from q ^ m, so
791 that it is 0x00000000 when adding the value or 0xffffffff when
792 subtracting it. */
793 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
794 tcg_gen_subi_i32(t1, t1, 1);
795 tcg_gen_neg_i32(t2, REG(B7_4));
796 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
797 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
798
799 /* compute T and Q depending on carry */
800 tcg_gen_andi_i32(t1, t1, 1);
801 tcg_gen_xor_i32(t1, t1, t0);
802 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
803 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
804
805 tcg_temp_free(zero);
806 tcg_temp_free(t2);
807 tcg_temp_free(t1);
808 tcg_temp_free(t0);
809 }
fdf9b3e8
FB
810 return;
811 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 812 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
813 return;
814 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 815 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
816 return;
817 case 0x600e: /* exts.b Rm,Rn */
7efbe241 818 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
819 return;
820 case 0x600f: /* exts.w Rm,Rn */
7efbe241 821 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
822 return;
823 case 0x600c: /* extu.b Rm,Rn */
7efbe241 824 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
825 return;
826 case 0x600d: /* extu.w Rm,Rn */
7efbe241 827 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8 828 return;
24988dc2 829 case 0x000f: /* mac.l @Rm+,@Rn+ */
c55497ec
AJ
830 {
831 TCGv arg0, arg1;
a7812ae4 832 arg0 = tcg_temp_new();
3376f415 833 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 834 arg1 = tcg_temp_new();
3376f415 835 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 836 gen_helper_macl(cpu_env, arg0, arg1);
c55497ec
AJ
837 tcg_temp_free(arg1);
838 tcg_temp_free(arg0);
839 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
840 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
841 }
fdf9b3e8
FB
842 return;
843 case 0x400f: /* mac.w @Rm+,@Rn+ */
c55497ec
AJ
844 {
845 TCGv arg0, arg1;
a7812ae4 846 arg0 = tcg_temp_new();
3376f415 847 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 848 arg1 = tcg_temp_new();
3376f415 849 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 850 gen_helper_macw(cpu_env, arg0, arg1);
c55497ec
AJ
851 tcg_temp_free(arg1);
852 tcg_temp_free(arg0);
853 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
854 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
855 }
fdf9b3e8
FB
856 return;
857 case 0x0007: /* mul.l Rm,Rn */
7efbe241 858 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
859 return;
860 case 0x200f: /* muls.w Rm,Rn */
c55497ec
AJ
861 {
862 TCGv arg0, arg1;
a7812ae4 863 arg0 = tcg_temp_new();
c55497ec 864 tcg_gen_ext16s_i32(arg0, REG(B7_4));
a7812ae4 865 arg1 = tcg_temp_new();
c55497ec
AJ
866 tcg_gen_ext16s_i32(arg1, REG(B11_8));
867 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
868 tcg_temp_free(arg1);
869 tcg_temp_free(arg0);
870 }
fdf9b3e8
FB
871 return;
872 case 0x200e: /* mulu.w Rm,Rn */
c55497ec
AJ
873 {
874 TCGv arg0, arg1;
a7812ae4 875 arg0 = tcg_temp_new();
c55497ec 876 tcg_gen_ext16u_i32(arg0, REG(B7_4));
a7812ae4 877 arg1 = tcg_temp_new();
c55497ec
AJ
878 tcg_gen_ext16u_i32(arg1, REG(B11_8));
879 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
880 tcg_temp_free(arg1);
881 tcg_temp_free(arg0);
882 }
fdf9b3e8
FB
883 return;
884 case 0x600b: /* neg Rm,Rn */
7efbe241 885 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
886 return;
887 case 0x600a: /* negc Rm,Rn */
b2d9eda5 888 {
60eb27fe
AJ
889 TCGv t0 = tcg_const_i32(0);
890 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
891 REG(B7_4), t0, cpu_sr_t, t0);
892 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
893 t0, t0, REG(B11_8), cpu_sr_t);
894 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 895 tcg_temp_free(t0);
b2d9eda5 896 }
fdf9b3e8
FB
897 return;
898 case 0x6007: /* not Rm,Rn */
7efbe241 899 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
900 return;
901 case 0x200b: /* or Rm,Rn */
7efbe241 902 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
903 return;
904 case 0x400c: /* shad Rm,Rn */
69d6275b 905 {
be654c83
AJ
906 TCGv t0 = tcg_temp_new();
907 TCGv t1 = tcg_temp_new();
908 TCGv t2 = tcg_temp_new();
909
910 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
911
912 /* positive case: shift to the left */
913 tcg_gen_shl_i32(t1, REG(B11_8), t0);
914
915 /* negative case: shift to the right in two steps to
916 correctly handle the -32 case */
917 tcg_gen_xori_i32(t0, t0, 0x1f);
918 tcg_gen_sar_i32(t2, REG(B11_8), t0);
919 tcg_gen_sari_i32(t2, t2, 1);
920
921 /* select between the two cases */
922 tcg_gen_movi_i32(t0, 0);
923 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
924
925 tcg_temp_free(t0);
926 tcg_temp_free(t1);
927 tcg_temp_free(t2);
69d6275b 928 }
fdf9b3e8
FB
929 return;
930 case 0x400d: /* shld Rm,Rn */
69d6275b 931 {
57760161
AJ
932 TCGv t0 = tcg_temp_new();
933 TCGv t1 = tcg_temp_new();
934 TCGv t2 = tcg_temp_new();
935
936 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
937
938 /* positive case: shift to the left */
939 tcg_gen_shl_i32(t1, REG(B11_8), t0);
940
941 /* negative case: shift to the right in two steps to
942 correctly handle the -32 case */
943 tcg_gen_xori_i32(t0, t0, 0x1f);
944 tcg_gen_shr_i32(t2, REG(B11_8), t0);
945 tcg_gen_shri_i32(t2, t2, 1);
946
947 /* select between the two cases */
948 tcg_gen_movi_i32(t0, 0);
949 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
950
951 tcg_temp_free(t0);
952 tcg_temp_free(t1);
953 tcg_temp_free(t2);
69d6275b 954 }
fdf9b3e8
FB
955 return;
956 case 0x3008: /* sub Rm,Rn */
7efbe241 957 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
958 return;
959 case 0x300a: /* subc Rm,Rn */
22b88fd7 960 {
d0f44a55
AJ
961 TCGv t0, t1;
962 t0 = tcg_const_tl(0);
22b88fd7 963 t1 = tcg_temp_new();
d0f44a55
AJ
964 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
965 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
966 REG(B11_8), t0, t1, cpu_sr_t);
967 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 968 tcg_temp_free(t0);
d0f44a55 969 tcg_temp_free(t1);
22b88fd7 970 }
fdf9b3e8
FB
971 return;
972 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
973 {
974 TCGv t0, t1, t2;
975 t0 = tcg_temp_new();
976 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
977 t1 = tcg_temp_new();
978 tcg_gen_xor_i32(t1, t0, REG(B7_4));
979 t2 = tcg_temp_new();
980 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
981 tcg_gen_and_i32(t1, t1, t2);
982 tcg_temp_free(t2);
34086945 983 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1
AJ
984 tcg_temp_free(t1);
985 tcg_gen_mov_i32(REG(B11_8), t0);
986 tcg_temp_free(t0);
987 }
fdf9b3e8
FB
988 return;
989 case 0x2008: /* tst Rm,Rn */
c55497ec 990 {
a7812ae4 991 TCGv val = tcg_temp_new();
c55497ec 992 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 993 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
994 tcg_temp_free(val);
995 }
fdf9b3e8
FB
996 return;
997 case 0x200a: /* xor Rm,Rn */
7efbe241 998 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8 999 return;
e67888a7 1000 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1001 CHECK_FPU_ENABLED
a6215749 1002 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
1003 int xsrc = XHACK(B7_4);
1004 int xdst = XHACK(B11_8);
1005 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
1006 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
eda9b09b 1007 } else {
7c9f7038 1008 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
eda9b09b
FB
1009 }
1010 return;
e67888a7 1011 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
f6198371 1012 CHECK_FPU_ENABLED
a6215749 1013 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1014 TCGv_i64 fp = tcg_temp_new_i64();
1015 gen_load_fpr64(ctx, fp, XHACK(B7_4));
fc313c64 1016 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEUQ);
4d57fa50 1017 tcg_temp_free_i64(fp);
eda9b09b 1018 } else {
7c9f7038 1019 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
eda9b09b
FB
1020 }
1021 return;
e67888a7 1022 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1023 CHECK_FPU_ENABLED
a6215749 1024 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 1025 TCGv_i64 fp = tcg_temp_new_i64();
fc313c64 1026 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
4d57fa50
RH
1027 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1028 tcg_temp_free_i64(fp);
eda9b09b 1029 } else {
7c9f7038 1030 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
eda9b09b
FB
1031 }
1032 return;
e67888a7 1033 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1034 CHECK_FPU_ENABLED
a6215749 1035 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 1036 TCGv_i64 fp = tcg_temp_new_i64();
fc313c64 1037 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEUQ);
4d57fa50
RH
1038 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1039 tcg_temp_free_i64(fp);
1040 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
eda9b09b 1041 } else {
7c9f7038 1042 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
cc4ba6a9 1043 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
eda9b09b
FB
1044 }
1045 return;
e67888a7 1046 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
f6198371 1047 CHECK_FPU_ENABLED
4d57fa50
RH
1048 {
1049 TCGv addr = tcg_temp_new_i32();
1050 if (ctx->tbflags & FPSCR_SZ) {
1051 TCGv_i64 fp = tcg_temp_new_i64();
1052 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1053 tcg_gen_subi_i32(addr, REG(B11_8), 8);
fc313c64 1054 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
4d57fa50
RH
1055 tcg_temp_free_i64(fp);
1056 } else {
1057 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1058 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1059 }
1060 tcg_gen_mov_i32(REG(B11_8), addr);
1061 tcg_temp_free(addr);
1062 }
eda9b09b 1063 return;
e67888a7 1064 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
f6198371 1065 CHECK_FPU_ENABLED
cc4ba6a9 1066 {
a7812ae4 1067 TCGv addr = tcg_temp_new_i32();
cc4ba6a9 1068 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1069 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 1070 TCGv_i64 fp = tcg_temp_new_i64();
fc313c64 1071 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEUQ);
4d57fa50
RH
1072 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1073 tcg_temp_free_i64(fp);
cc4ba6a9 1074 } else {
7c9f7038 1075 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1076 }
1077 tcg_temp_free(addr);
eda9b09b
FB
1078 }
1079 return;
e67888a7 1080 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
f6198371 1081 CHECK_FPU_ENABLED
cc4ba6a9 1082 {
a7812ae4 1083 TCGv addr = tcg_temp_new();
cc4ba6a9 1084 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1085 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1086 TCGv_i64 fp = tcg_temp_new_i64();
1087 gen_load_fpr64(ctx, fp, XHACK(B7_4));
fc313c64 1088 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEUQ);
4d57fa50 1089 tcg_temp_free_i64(fp);
cc4ba6a9 1090 } else {
7c9f7038 1091 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1092 }
1093 tcg_temp_free(addr);
eda9b09b
FB
1094 }
1095 return;
e67888a7
TS
1096 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1097 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1098 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1099 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1100 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1101 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
cc4ba6a9 1102 {
f6198371 1103 CHECK_FPU_ENABLED
a6215749 1104 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1105 TCGv_i64 fp0, fp1;
1106
93dc9c89
RH
1107 if (ctx->opcode & 0x0110) {
1108 goto do_illegal;
1109 }
a7812ae4
PB
1110 fp0 = tcg_temp_new_i64();
1111 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1112 gen_load_fpr64(ctx, fp0, B11_8);
1113 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4
PB
1114 switch (ctx->opcode & 0xf00f) {
1115 case 0xf000: /* fadd Rm,Rn */
485d0035 1116 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1117 break;
1118 case 0xf001: /* fsub Rm,Rn */
485d0035 1119 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1120 break;
1121 case 0xf002: /* fmul Rm,Rn */
485d0035 1122 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1123 break;
1124 case 0xf003: /* fdiv Rm,Rn */
485d0035 1125 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1126 break;
1127 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1128 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1129 return;
1130 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1131 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1132 return;
1133 }
1e0b21d8 1134 gen_store_fpr64(ctx, fp0, B11_8);
a7812ae4
PB
1135 tcg_temp_free_i64(fp0);
1136 tcg_temp_free_i64(fp1);
1137 } else {
a7812ae4
PB
1138 switch (ctx->opcode & 0xf00f) {
1139 case 0xf000: /* fadd Rm,Rn */
7c9f7038
RH
1140 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1141 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1142 break;
1143 case 0xf001: /* fsub Rm,Rn */
7c9f7038
RH
1144 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1145 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1146 break;
1147 case 0xf002: /* fmul Rm,Rn */
7c9f7038
RH
1148 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1149 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1150 break;
1151 case 0xf003: /* fdiv Rm,Rn */
7c9f7038
RH
1152 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1153 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1154 break;
1155 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1156 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
7c9f7038 1157 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1158 return;
1159 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1160 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
7c9f7038 1161 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1162 return;
1163 }
cc4ba6a9 1164 }
ea6cf6be
TS
1165 }
1166 return;
5b7141a1 1167 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1168 CHECK_FPU_ENABLED
1169 CHECK_FPSCR_PR_0
1170 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1171 FREG(0), FREG(B7_4), FREG(B11_8));
1172 return;
fdf9b3e8
FB
1173 }
1174
1175 switch (ctx->opcode & 0xff00) {
1176 case 0xc900: /* and #imm,R0 */
7efbe241 1177 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1178 return;
24988dc2 1179 case 0xcd00: /* and.b #imm,@(R0,GBR) */
c55497ec
AJ
1180 {
1181 TCGv addr, val;
a7812ae4 1182 addr = tcg_temp_new();
c55497ec 1183 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1184 val = tcg_temp_new();
3376f415 1185 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1186 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1187 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1188 tcg_temp_free(val);
1189 tcg_temp_free(addr);
1190 }
fdf9b3e8
FB
1191 return;
1192 case 0x8b00: /* bf label */
1193 CHECK_NOT_DELAY_SLOT
6f1c2af6 1194 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
fdf9b3e8
FB
1195 return;
1196 case 0x8f00: /* bf/s label */
1197 CHECK_NOT_DELAY_SLOT
ac9707ea 1198 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
6f1c2af6 1199 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1200 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
fdf9b3e8
FB
1201 return;
1202 case 0x8900: /* bt label */
1203 CHECK_NOT_DELAY_SLOT
6f1c2af6 1204 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
fdf9b3e8
FB
1205 return;
1206 case 0x8d00: /* bt/s label */
1207 CHECK_NOT_DELAY_SLOT
ac9707ea 1208 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
6f1c2af6 1209 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1210 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
fdf9b3e8
FB
1211 return;
1212 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1213 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
fdf9b3e8
FB
1214 return;
1215 case 0xc400: /* mov.b @(disp,GBR),R0 */
c55497ec 1216 {
a7812ae4 1217 TCGv addr = tcg_temp_new();
c55497ec 1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1219 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1220 tcg_temp_free(addr);
1221 }
fdf9b3e8
FB
1222 return;
1223 case 0xc500: /* mov.w @(disp,GBR),R0 */
c55497ec 1224 {
a7812ae4 1225 TCGv addr = tcg_temp_new();
c55497ec 1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1227 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1228 tcg_temp_free(addr);
1229 }
fdf9b3e8
FB
1230 return;
1231 case 0xc600: /* mov.l @(disp,GBR),R0 */
c55497ec 1232 {
a7812ae4 1233 TCGv addr = tcg_temp_new();
c55497ec 1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1235 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
1236 tcg_temp_free(addr);
1237 }
fdf9b3e8
FB
1238 return;
1239 case 0xc000: /* mov.b R0,@(disp,GBR) */
c55497ec 1240 {
a7812ae4 1241 TCGv addr = tcg_temp_new();
c55497ec 1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1243 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1244 tcg_temp_free(addr);
1245 }
fdf9b3e8
FB
1246 return;
1247 case 0xc100: /* mov.w R0,@(disp,GBR) */
c55497ec 1248 {
a7812ae4 1249 TCGv addr = tcg_temp_new();
c55497ec 1250 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1251 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1252 tcg_temp_free(addr);
1253 }
fdf9b3e8
FB
1254 return;
1255 case 0xc200: /* mov.l R0,@(disp,GBR) */
c55497ec 1256 {
a7812ae4 1257 TCGv addr = tcg_temp_new();
c55497ec 1258 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1259 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
1260 tcg_temp_free(addr);
1261 }
fdf9b3e8
FB
1262 return;
1263 case 0x8000: /* mov.b R0,@(disp,Rn) */
c55497ec 1264 {
a7812ae4 1265 TCGv addr = tcg_temp_new();
c55497ec 1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1267 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1268 tcg_temp_free(addr);
1269 }
fdf9b3e8
FB
1270 return;
1271 case 0x8100: /* mov.w R0,@(disp,Rn) */
c55497ec 1272 {
a7812ae4 1273 TCGv addr = tcg_temp_new();
c55497ec 1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1275 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1276 MO_TEUW | UNALIGN(ctx));
c55497ec
AJ
1277 tcg_temp_free(addr);
1278 }
fdf9b3e8
FB
1279 return;
1280 case 0x8400: /* mov.b @(disp,Rn),R0 */
c55497ec 1281 {
a7812ae4 1282 TCGv addr = tcg_temp_new();
c55497ec 1283 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1284 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1285 tcg_temp_free(addr);
1286 }
fdf9b3e8
FB
1287 return;
1288 case 0x8500: /* mov.w @(disp,Rn),R0 */
c55497ec 1289 {
a7812ae4 1290 TCGv addr = tcg_temp_new();
c55497ec 1291 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1292 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1293 MO_TESW | UNALIGN(ctx));
c55497ec
AJ
1294 tcg_temp_free(addr);
1295 }
fdf9b3e8
FB
1296 return;
1297 case 0xc700: /* mova @(disp,PC),R0 */
6f1c2af6
RH
1298 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1299 4 + B7_0 * 4) & ~3);
fdf9b3e8
FB
1300 return;
1301 case 0xcb00: /* or #imm,R0 */
7efbe241 1302 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1303 return;
24988dc2 1304 case 0xcf00: /* or.b #imm,@(R0,GBR) */
c55497ec
AJ
1305 {
1306 TCGv addr, val;
a7812ae4 1307 addr = tcg_temp_new();
c55497ec 1308 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1309 val = tcg_temp_new();
3376f415 1310 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1311 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1312 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1313 tcg_temp_free(val);
1314 tcg_temp_free(addr);
1315 }
fdf9b3e8
FB
1316 return;
1317 case 0xc300: /* trapa #imm */
c55497ec
AJ
1318 {
1319 TCGv imm;
1320 CHECK_NOT_DELAY_SLOT
ac9707ea 1321 gen_save_cpu_state(ctx, true);
c55497ec 1322 imm = tcg_const_i32(B7_0);
485d0035 1323 gen_helper_trapa(cpu_env, imm);
c55497ec 1324 tcg_temp_free(imm);
6f1c2af6 1325 ctx->base.is_jmp = DISAS_NORETURN;
c55497ec 1326 }
fdf9b3e8
FB
1327 return;
1328 case 0xc800: /* tst #imm,R0 */
c55497ec 1329 {
a7812ae4 1330 TCGv val = tcg_temp_new();
c55497ec 1331 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1332 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1333 tcg_temp_free(val);
1334 }
fdf9b3e8 1335 return;
24988dc2 1336 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
c55497ec 1337 {
a7812ae4 1338 TCGv val = tcg_temp_new();
c55497ec 1339 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1340 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
c55497ec 1341 tcg_gen_andi_i32(val, val, B7_0);
34086945 1342 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1343 tcg_temp_free(val);
1344 }
fdf9b3e8
FB
1345 return;
1346 case 0xca00: /* xor #imm,R0 */
7efbe241 1347 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1348 return;
24988dc2 1349 case 0xce00: /* xor.b #imm,@(R0,GBR) */
c55497ec
AJ
1350 {
1351 TCGv addr, val;
a7812ae4 1352 addr = tcg_temp_new();
c55497ec 1353 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1354 val = tcg_temp_new();
3376f415 1355 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1356 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1357 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1358 tcg_temp_free(val);
1359 tcg_temp_free(addr);
1360 }
fdf9b3e8
FB
1361 return;
1362 }
1363
1364 switch (ctx->opcode & 0xf08f) {
1365 case 0x408e: /* ldc Rm,Rn_BANK */
fe25591e 1366 CHECK_PRIVILEGED
7efbe241 1367 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
fdf9b3e8
FB
1368 return;
1369 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
fe25591e 1370 CHECK_PRIVILEGED
3376f415 1371 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
7efbe241 1372 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
fdf9b3e8
FB
1373 return;
1374 case 0x0082: /* stc Rm_BANK,Rn */
fe25591e 1375 CHECK_PRIVILEGED
7efbe241 1376 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
fdf9b3e8
FB
1377 return;
1378 case 0x4083: /* stc.l Rm_BANK,@-Rn */
fe25591e 1379 CHECK_PRIVILEGED
c55497ec 1380 {
a7812ae4 1381 TCGv addr = tcg_temp_new();
c55497ec 1382 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1383 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
3101e99c 1384 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1385 tcg_temp_free(addr);
c55497ec 1386 }
fdf9b3e8
FB
1387 return;
1388 }
1389
1390 switch (ctx->opcode & 0xf0ff) {
1391 case 0x0023: /* braf Rn */
7efbe241 1392 CHECK_NOT_DELAY_SLOT
6f1c2af6 1393 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
ab419fd8 1394 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1395 ctx->delayed_pc = (uint32_t) - 1;
1396 return;
1397 case 0x0003: /* bsrf Rn */
7efbe241 1398 CHECK_NOT_DELAY_SLOT
6f1c2af6 1399 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1400 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
ab419fd8 1401 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1402 ctx->delayed_pc = (uint32_t) - 1;
1403 return;
1404 case 0x4015: /* cmp/pl Rn */
34086945 1405 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1406 return;
1407 case 0x4011: /* cmp/pz Rn */
34086945 1408 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1409 return;
1410 case 0x4010: /* dt Rn */
7efbe241 1411 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1412 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1413 return;
1414 case 0x402b: /* jmp @Rn */
7efbe241
AJ
1415 CHECK_NOT_DELAY_SLOT
1416 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1417 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1418 ctx->delayed_pc = (uint32_t) - 1;
1419 return;
1420 case 0x400b: /* jsr @Rn */
7efbe241 1421 CHECK_NOT_DELAY_SLOT
6f1c2af6 1422 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1423 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1424 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1425 ctx->delayed_pc = (uint32_t) - 1;
1426 return;
fe25591e
AJ
1427 case 0x400e: /* ldc Rm,SR */
1428 CHECK_PRIVILEGED
34086945
AJ
1429 {
1430 TCGv val = tcg_temp_new();
1431 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1432 gen_write_sr(val);
1433 tcg_temp_free(val);
6f1c2af6 1434 ctx->base.is_jmp = DISAS_STOP;
34086945 1435 }
390af821 1436 return;
fe25591e
AJ
1437 case 0x4007: /* ldc.l @Rm+,SR */
1438 CHECK_PRIVILEGED
c55497ec 1439 {
a7812ae4 1440 TCGv val = tcg_temp_new();
3376f415 1441 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
34086945
AJ
1442 tcg_gen_andi_i32(val, val, 0x700083f3);
1443 gen_write_sr(val);
c55497ec
AJ
1444 tcg_temp_free(val);
1445 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
6f1c2af6 1446 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1447 }
390af821 1448 return;
fe25591e
AJ
1449 case 0x0002: /* stc SR,Rn */
1450 CHECK_PRIVILEGED
34086945 1451 gen_read_sr(REG(B11_8));
390af821 1452 return;
fe25591e
AJ
1453 case 0x4003: /* stc SR,@-Rn */
1454 CHECK_PRIVILEGED
c55497ec 1455 {
a7812ae4 1456 TCGv addr = tcg_temp_new();
34086945 1457 TCGv val = tcg_temp_new();
c55497ec 1458 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945
AJ
1459 gen_read_sr(val);
1460 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1461 tcg_gen_mov_i32(REG(B11_8), addr);
34086945 1462 tcg_temp_free(val);
c55497ec 1463 tcg_temp_free(addr);
c55497ec 1464 }
390af821 1465 return;
8e9b0678 1466#define LD(reg,ldnum,ldpnum,prechk) \
fdf9b3e8 1467 case ldnum: \
fe25591e 1468 prechk \
7efbe241 1469 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
fdf9b3e8
FB
1470 return; \
1471 case ldpnum: \
fe25591e 1472 prechk \
3376f415 1473 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
7efbe241 1474 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678
AC
1475 return;
1476#define ST(reg,stnum,stpnum,prechk) \
fdf9b3e8 1477 case stnum: \
fe25591e 1478 prechk \
7efbe241 1479 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
fdf9b3e8
FB
1480 return; \
1481 case stpnum: \
fe25591e 1482 prechk \
c55497ec 1483 { \
3101e99c 1484 TCGv addr = tcg_temp_new(); \
c55497ec 1485 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
3376f415 1486 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
3101e99c 1487 tcg_gen_mov_i32(REG(B11_8), addr); \
c55497ec 1488 tcg_temp_free(addr); \
86e0abc7 1489 } \
fdf9b3e8 1490 return;
8e9b0678
AC
1491#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1492 LD(reg,ldnum,ldpnum,prechk) \
1493 ST(reg,stnum,stpnum,prechk)
fe25591e
AJ
1494 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1495 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1496 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1497 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
935fc175 1498 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1499 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
fe25591e
AJ
1500 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1501 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1502 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1503 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
d8299bcc 1504 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
390af821 1505 case 0x406a: /* lds Rm,FPSCR */
d8299bcc 1506 CHECK_FPU_ENABLED
485d0035 1507 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
6f1c2af6 1508 ctx->base.is_jmp = DISAS_STOP;
390af821
AJ
1509 return;
1510 case 0x4066: /* lds.l @Rm+,FPSCR */
d8299bcc 1511 CHECK_FPU_ENABLED
c55497ec 1512 {
a7812ae4 1513 TCGv addr = tcg_temp_new();
3376f415 1514 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
c55497ec 1515 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
485d0035 1516 gen_helper_ld_fpscr(cpu_env, addr);
c55497ec 1517 tcg_temp_free(addr);
6f1c2af6 1518 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1519 }
390af821
AJ
1520 return;
1521 case 0x006a: /* sts FPSCR,Rn */
d8299bcc 1522 CHECK_FPU_ENABLED
c55497ec 1523 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
390af821
AJ
1524 return;
1525 case 0x4062: /* sts FPSCR,@-Rn */
d8299bcc 1526 CHECK_FPU_ENABLED
c55497ec
AJ
1527 {
1528 TCGv addr, val;
a7812ae4 1529 val = tcg_temp_new();
c55497ec 1530 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
a7812ae4 1531 addr = tcg_temp_new();
c55497ec 1532 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1533 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1534 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
1535 tcg_temp_free(addr);
1536 tcg_temp_free(val);
c55497ec 1537 }
390af821 1538 return;
fdf9b3e8 1539 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1540 {
1541 TCGv val = tcg_temp_new();
3376f415 1542 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
485d0035 1543 gen_helper_movcal(cpu_env, REG(B11_8), val);
3376f415 1544 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
e691e0ed 1545 tcg_temp_free(val);
852d481f
EI
1546 }
1547 ctx->has_movcal = 1;
fdf9b3e8 1548 return;
143021b2 1549 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1550 CHECK_SH4A
143021b2 1551 /* Load non-boundary-aligned data */
ccae24d4
RH
1552 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1553 MO_TEUL | MO_UNALN);
1554 return;
143021b2 1555 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1556 CHECK_SH4A
143021b2 1557 /* Load non-boundary-aligned data */
ccae24d4
RH
1558 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1559 MO_TEUL | MO_UNALN);
1560 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1561 return;
fdf9b3e8 1562 case 0x0029: /* movt Rn */
34086945 1563 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
fdf9b3e8 1564 return;
66c7c806
AJ
1565 case 0x0073:
1566 /* MOVCO.L
f85da308
RH
1567 * LDST -> T
1568 * If (T == 1) R0 -> (Rn)
1569 * 0 -> LDST
1570 *
1571 * The above description doesn't work in a parallel context.
1572 * Since we currently support no smp boards, this implies user-mode.
1573 * But we can still support the official mechanism while user-mode
1574 * is single-threaded. */
ccae24d4
RH
1575 CHECK_SH4A
1576 {
f85da308
RH
1577 TCGLabel *fail = gen_new_label();
1578 TCGLabel *done = gen_new_label();
1579
6f1c2af6 1580 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1581 TCGv tmp;
1582
1583 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1584 cpu_lock_addr, fail);
1585 tmp = tcg_temp_new();
1586 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1587 REG(0), ctx->memidx, MO_TEUL);
1588 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1589 tcg_temp_free(tmp);
1590 } else {
1591 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1592 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1593 tcg_gen_movi_i32(cpu_sr_t, 1);
1594 }
1595 tcg_gen_br(done);
1596
1597 gen_set_label(fail);
1598 tcg_gen_movi_i32(cpu_sr_t, 0);
1599
1600 gen_set_label(done);
1601 tcg_gen_movi_i32(cpu_lock_addr, -1);
ccae24d4 1602 }
f85da308 1603 return;
66c7c806
AJ
1604 case 0x0063:
1605 /* MOVLI.L @Rm,R0
f85da308
RH
1606 * 1 -> LDST
1607 * (Rm) -> R0
1608 * When interrupt/exception
1609 * occurred 0 -> LDST
1610 *
1611 * In a parallel context, we must also save the loaded value
1612 * for use with the cmpxchg that we'll use with movco.l. */
ccae24d4 1613 CHECK_SH4A
6f1c2af6 1614 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1615 TCGv tmp = tcg_temp_new();
1616 tcg_gen_mov_i32(tmp, REG(B11_8));
1617 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1618 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1619 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1620 tcg_temp_free(tmp);
1621 } else {
1622 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1623 tcg_gen_movi_i32(cpu_lock_addr, 0);
1624 }
ccae24d4 1625 return;
fdf9b3e8 1626 case 0x0093: /* ocbi @Rn */
c55497ec 1627 {
485d0035 1628 gen_helper_ocbi(cpu_env, REG(B11_8));
c55497ec 1629 }
fdf9b3e8 1630 return;
24988dc2 1631 case 0x00a3: /* ocbp @Rn */
fdf9b3e8 1632 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1633 /* These instructions are supposed to do nothing in case of
1634 a cache miss. Given that we only partially emulate caches
1635 it is safe to simply ignore them. */
fdf9b3e8
FB
1636 return;
1637 case 0x0083: /* pref @Rn */
1638 return;
71968fa6 1639 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1640 CHECK_SH4A
1641 return;
71968fa6 1642 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1643 CHECK_SH4A
1644 return;
71968fa6 1645 case 0x00ab: /* synco */
ccae24d4
RH
1646 CHECK_SH4A
1647 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1648 return;
fdf9b3e8 1649 case 0x4024: /* rotcl Rn */
c55497ec 1650 {
a7812ae4 1651 TCGv tmp = tcg_temp_new();
34086945
AJ
1652 tcg_gen_mov_i32(tmp, cpu_sr_t);
1653 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
c55497ec 1654 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1655 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1656 tcg_temp_free(tmp);
1657 }
fdf9b3e8
FB
1658 return;
1659 case 0x4025: /* rotcr Rn */
c55497ec 1660 {
a7812ae4 1661 TCGv tmp = tcg_temp_new();
34086945
AJ
1662 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1663 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
c55497ec 1664 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1665 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1666 tcg_temp_free(tmp);
1667 }
fdf9b3e8
FB
1668 return;
1669 case 0x4004: /* rotl Rn */
2411fde9 1670 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1671 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1672 return;
1673 case 0x4005: /* rotr Rn */
34086945 1674 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
2411fde9 1675 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1676 return;
1677 case 0x4000: /* shll Rn */
1678 case 0x4020: /* shal Rn */
34086945 1679 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
7efbe241 1680 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1681 return;
1682 case 0x4021: /* shar Rn */
34086945 1683 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1684 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1685 return;
1686 case 0x4001: /* shlr Rn */
34086945 1687 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1688 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1689 return;
1690 case 0x4008: /* shll2 Rn */
7efbe241 1691 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1692 return;
1693 case 0x4018: /* shll8 Rn */
7efbe241 1694 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1695 return;
1696 case 0x4028: /* shll16 Rn */
7efbe241 1697 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1698 return;
1699 case 0x4009: /* shlr2 Rn */
7efbe241 1700 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1701 return;
1702 case 0x4019: /* shlr8 Rn */
7efbe241 1703 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1704 return;
1705 case 0x4029: /* shlr16 Rn */
7efbe241 1706 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1707 return;
1708 case 0x401b: /* tas.b @Rn */
cb32f179
AJ
1709 {
1710 TCGv val = tcg_const_i32(0x80);
1711 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1712 ctx->memidx, MO_UB);
34086945 1713 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
cb32f179
AJ
1714 tcg_temp_free(val);
1715 }
1716 return;
e67888a7 1717 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
f6198371 1718 CHECK_FPU_ENABLED
7c9f7038 1719 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
eda9b09b 1720 return;
e67888a7 1721 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
f6198371 1722 CHECK_FPU_ENABLED
7c9f7038 1723 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
eda9b09b 1724 return;
e67888a7 1725 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
f6198371 1726 CHECK_FPU_ENABLED
a6215749 1727 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1728 TCGv_i64 fp;
93dc9c89
RH
1729 if (ctx->opcode & 0x0100) {
1730 goto do_illegal;
1731 }
a7812ae4 1732 fp = tcg_temp_new_i64();
485d0035 1733 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1734 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1735 tcg_temp_free_i64(fp);
ea6cf6be
TS
1736 }
1737 else {
7c9f7038 1738 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
ea6cf6be
TS
1739 }
1740 return;
e67888a7 1741 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
f6198371 1742 CHECK_FPU_ENABLED
a6215749 1743 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1744 TCGv_i64 fp;
93dc9c89
RH
1745 if (ctx->opcode & 0x0100) {
1746 goto do_illegal;
1747 }
a7812ae4 1748 fp = tcg_temp_new_i64();
1e0b21d8 1749 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1750 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
a7812ae4 1751 tcg_temp_free_i64(fp);
ea6cf6be
TS
1752 }
1753 else {
7c9f7038 1754 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
ea6cf6be
TS
1755 }
1756 return;
24988dc2 1757 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
f6198371 1758 CHECK_FPU_ENABLED
7c9f7038 1759 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
24988dc2 1760 return;
57f5c1b0 1761 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
f6198371 1762 CHECK_FPU_ENABLED
7c9f7038 1763 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
24988dc2
AJ
1764 return;
1765 case 0xf06d: /* fsqrt FRn */
f6198371 1766 CHECK_FPU_ENABLED
a6215749 1767 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1768 if (ctx->opcode & 0x0100) {
1769 goto do_illegal;
1770 }
a7812ae4 1771 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1772 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1773 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1e0b21d8 1774 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1775 tcg_temp_free_i64(fp);
24988dc2 1776 } else {
7c9f7038 1777 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2
AJ
1778 }
1779 return;
1780 case 0xf07d: /* fsrra FRn */
f6198371 1781 CHECK_FPU_ENABLED
11b7aa23
RH
1782 CHECK_FPSCR_PR_0
1783 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2 1784 break;
e67888a7 1785 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
f6198371 1786 CHECK_FPU_ENABLED
7e9f7ca8
RH
1787 CHECK_FPSCR_PR_0
1788 tcg_gen_movi_i32(FREG(B11_8), 0);
1789 return;
e67888a7 1790 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
f6198371 1791 CHECK_FPU_ENABLED
7e9f7ca8
RH
1792 CHECK_FPSCR_PR_0
1793 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1794 return;
24988dc2 1795 case 0xf0ad: /* fcnvsd FPUL,DRn */
f6198371 1796 CHECK_FPU_ENABLED
cc4ba6a9 1797 {
a7812ae4 1798 TCGv_i64 fp = tcg_temp_new_i64();
485d0035 1799 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1800 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1801 tcg_temp_free_i64(fp);
cc4ba6a9 1802 }
24988dc2
AJ
1803 return;
1804 case 0xf0bd: /* fcnvds DRn,FPUL */
f6198371 1805 CHECK_FPU_ENABLED
cc4ba6a9 1806 {
a7812ae4 1807 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1808 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1809 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
a7812ae4 1810 tcg_temp_free_i64(fp);
cc4ba6a9 1811 }
24988dc2 1812 return;
af8c2bde
AJ
1813 case 0xf0ed: /* fipr FVm,FVn */
1814 CHECK_FPU_ENABLED
7e9f7ca8
RH
1815 CHECK_FPSCR_PR_1
1816 {
1817 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1818 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1819 gen_helper_fipr(cpu_env, m, n);
af8c2bde
AJ
1820 tcg_temp_free(m);
1821 tcg_temp_free(n);
1822 return;
1823 }
1824 break;
17075f10
AJ
1825 case 0xf0fd: /* ftrv XMTRX,FVn */
1826 CHECK_FPU_ENABLED
7e9f7ca8
RH
1827 CHECK_FPSCR_PR_1
1828 {
1829 if ((ctx->opcode & 0x0300) != 0x0100) {
1830 goto do_illegal;
1831 }
1832 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1833 gen_helper_ftrv(cpu_env, n);
17075f10
AJ
1834 tcg_temp_free(n);
1835 return;
1836 }
1837 break;
fdf9b3e8 1838 }
bacc637a 1839#if 0
fdf9b3e8 1840 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
6f1c2af6 1841 ctx->opcode, ctx->base.pc_next);
bacc637a
AJ
1842 fflush(stderr);
1843#endif
6b98213d 1844 do_illegal:
ab419fd8 1845 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
dec16c6e
RH
1846 do_illegal_slot:
1847 gen_save_cpu_state(ctx, true);
485d0035 1848 gen_helper_raise_slot_illegal_instruction(cpu_env);
86865c5f 1849 } else {
dec16c6e 1850 gen_save_cpu_state(ctx, true);
485d0035 1851 gen_helper_raise_illegal_instruction(cpu_env);
86865c5f 1852 }
6f1c2af6 1853 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042
RH
1854 return;
1855
1856 do_fpu_disabled:
1857 gen_save_cpu_state(ctx, true);
ab419fd8 1858 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
dec4f042
RH
1859 gen_helper_raise_slot_fpu_disable(cpu_env);
1860 } else {
1861 gen_helper_raise_fpu_disable(cpu_env);
1862 }
6f1c2af6 1863 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042 1864 return;
823029f9
TS
1865}
1866
b1d8e52e 1867static void decode_opc(DisasContext * ctx)
823029f9 1868{
a6215749 1869 uint32_t old_flags = ctx->envflags;
823029f9
TS
1870
1871 _decode_opc(ctx);
1872
ab419fd8 1873 if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
39682608 1874 /* go out of the delay slot */
ab419fd8 1875 ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
4bfa602b
RH
1876
1877 /* When in an exclusive region, we must continue to the end
1878 for conditional branches. */
ab419fd8
RH
1879 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1880 && old_flags & TB_FLAG_DELAY_SLOT_COND) {
4bfa602b
RH
1881 gen_delayed_conditional_jump(ctx);
1882 return;
1883 }
1884 /* Otherwise this is probably an invalid gUSA region.
1885 Drop the GUSA bits so the next TB doesn't see them. */
ab419fd8 1886 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b 1887
ac9707ea 1888 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
ab419fd8 1889 if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
823029f9 1890 gen_delayed_conditional_jump(ctx);
be53081a 1891 } else {
823029f9
TS
1892 gen_jump(ctx);
1893 }
4bfa602b
RH
1894 }
1895}
1896
1897#ifdef CONFIG_USER_ONLY
1898/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1899 Upon an interrupt, a real kernel would simply notice magic values in
1900 the registers and reset the PC to the start of the sequence.
1901
1902 For QEMU, we cannot do this in quite the same way. Instead, we notice
1903 the normal start of such a sequence (mov #-x,r15). While we can handle
1904 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1905 sequences and transform them into atomic operations as seen by the host.
1906*/
be0e3d7a 1907static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
4bfa602b 1908{
d6a6cffd
RH
1909 uint16_t insns[5];
1910 int ld_adr, ld_dst, ld_mop;
1911 int op_dst, op_src, op_opc;
1912 int mv_src, mt_dst, st_src, st_mop;
1913 TCGv op_arg;
6f1c2af6
RH
1914 uint32_t pc = ctx->base.pc_next;
1915 uint32_t pc_end = ctx->base.tb->cs_base;
4bfa602b 1916 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1917 int i;
4bfa602b 1918
d6a6cffd
RH
1919 /* The state machine below will consume only a few insns.
1920 If there are more than that in a region, fail now. */
1921 if (max_insns > ARRAY_SIZE(insns)) {
1922 goto fail;
1923 }
1924
1925 /* Read all of the insns for the region. */
1926 for (i = 0; i < max_insns; ++i) {
4e116893 1927 insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
d6a6cffd
RH
1928 }
1929
1930 ld_adr = ld_dst = ld_mop = -1;
1931 mv_src = -1;
1932 op_dst = op_src = op_opc = -1;
1933 mt_dst = -1;
1934 st_src = st_mop = -1;
f764718d 1935 op_arg = NULL;
d6a6cffd
RH
1936 i = 0;
1937
1938#define NEXT_INSN \
1939 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1940
1941 /*
1942 * Expect a load to begin the region.
1943 */
1944 NEXT_INSN;
1945 switch (ctx->opcode & 0xf00f) {
1946 case 0x6000: /* mov.b @Rm,Rn */
1947 ld_mop = MO_SB;
1948 break;
1949 case 0x6001: /* mov.w @Rm,Rn */
1950 ld_mop = MO_TESW;
1951 break;
1952 case 0x6002: /* mov.l @Rm,Rn */
1953 ld_mop = MO_TESL;
1954 break;
1955 default:
1956 goto fail;
1957 }
1958 ld_adr = B7_4;
1959 ld_dst = B11_8;
1960 if (ld_adr == ld_dst) {
1961 goto fail;
1962 }
1963 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1964 op_dst = ld_dst;
1965
1966 /*
1967 * Expect an optional register move.
1968 */
1969 NEXT_INSN;
1970 switch (ctx->opcode & 0xf00f) {
1971 case 0x6003: /* mov Rm,Rn */
02b8e735 1972 /*
23b5d9fa 1973 * Here we want to recognize ld_dst being saved for later consumption,
02b8e735
PMD
1974 * or for another input register being copied so that ld_dst need not
1975 * be clobbered during the operation.
1976 */
d6a6cffd
RH
1977 op_dst = B11_8;
1978 mv_src = B7_4;
1979 if (op_dst == ld_dst) {
1980 /* Overwriting the load output. */
1981 goto fail;
1982 }
1983 if (mv_src != ld_dst) {
1984 /* Copying a new input; constrain op_src to match the load. */
1985 op_src = ld_dst;
1986 }
1987 break;
1988
1989 default:
1990 /* Put back and re-examine as operation. */
1991 --i;
1992 }
1993
1994 /*
1995 * Expect the operation.
1996 */
1997 NEXT_INSN;
1998 switch (ctx->opcode & 0xf00f) {
1999 case 0x300c: /* add Rm,Rn */
2000 op_opc = INDEX_op_add_i32;
2001 goto do_reg_op;
2002 case 0x2009: /* and Rm,Rn */
2003 op_opc = INDEX_op_and_i32;
2004 goto do_reg_op;
2005 case 0x200a: /* xor Rm,Rn */
2006 op_opc = INDEX_op_xor_i32;
2007 goto do_reg_op;
2008 case 0x200b: /* or Rm,Rn */
2009 op_opc = INDEX_op_or_i32;
2010 do_reg_op:
2011 /* The operation register should be as expected, and the
2012 other input cannot depend on the load. */
2013 if (op_dst != B11_8) {
2014 goto fail;
2015 }
2016 if (op_src < 0) {
2017 /* Unconstrainted input. */
2018 op_src = B7_4;
2019 } else if (op_src == B7_4) {
2020 /* Constrained input matched load. All operations are
2021 commutative; "swap" them by "moving" the load output
2022 to the (implicit) first argument and the move source
2023 to the (explicit) second argument. */
2024 op_src = mv_src;
2025 } else {
2026 goto fail;
2027 }
2028 op_arg = REG(op_src);
2029 break;
2030
2031 case 0x6007: /* not Rm,Rn */
2032 if (ld_dst != B7_4 || mv_src >= 0) {
2033 goto fail;
2034 }
2035 op_dst = B11_8;
2036 op_opc = INDEX_op_xor_i32;
2037 op_arg = tcg_const_i32(-1);
2038 break;
2039
2040 case 0x7000 ... 0x700f: /* add #imm,Rn */
2041 if (op_dst != B11_8 || mv_src >= 0) {
2042 goto fail;
2043 }
2044 op_opc = INDEX_op_add_i32;
2045 op_arg = tcg_const_i32(B7_0s);
2046 break;
2047
2048 case 0x3000: /* cmp/eq Rm,Rn */
2049 /* Looking for the middle of a compare-and-swap sequence,
2050 beginning with the compare. Operands can be either order,
2051 but with only one overlapping the load. */
2052 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2053 goto fail;
2054 }
2055 op_opc = INDEX_op_setcond_i32; /* placeholder */
2056 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2057 op_arg = REG(op_src);
2058
2059 NEXT_INSN;
2060 switch (ctx->opcode & 0xff00) {
2061 case 0x8b00: /* bf label */
2062 case 0x8f00: /* bf/s label */
2063 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2064 goto fail;
2065 }
2066 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2067 break;
2068 }
2069 /* We're looking to unconditionally modify Rn with the
2070 result of the comparison, within the delay slot of
2071 the branch. This is used by older gcc. */
2072 NEXT_INSN;
2073 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2074 mt_dst = B11_8;
2075 } else {
2076 goto fail;
2077 }
2078 break;
2079
2080 default:
2081 goto fail;
2082 }
2083 break;
2084
2085 case 0x2008: /* tst Rm,Rn */
2086 /* Looking for a compare-and-swap against zero. */
2087 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2088 goto fail;
2089 }
2090 op_opc = INDEX_op_setcond_i32;
2091 op_arg = tcg_const_i32(0);
2092
2093 NEXT_INSN;
2094 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2095 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2096 goto fail;
2097 }
2098 break;
2099
2100 default:
2101 /* Put back and re-examine as store. */
2102 --i;
2103 }
2104
2105 /*
2106 * Expect the store.
2107 */
2108 /* The store must be the last insn. */
2109 if (i != max_insns - 1) {
2110 goto fail;
2111 }
2112 NEXT_INSN;
2113 switch (ctx->opcode & 0xf00f) {
2114 case 0x2000: /* mov.b Rm,@Rn */
2115 st_mop = MO_UB;
2116 break;
2117 case 0x2001: /* mov.w Rm,@Rn */
2118 st_mop = MO_UW;
2119 break;
2120 case 0x2002: /* mov.l Rm,@Rn */
2121 st_mop = MO_UL;
2122 break;
2123 default:
2124 goto fail;
2125 }
2126 /* The store must match the load. */
2127 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2128 goto fail;
2129 }
2130 st_src = B7_4;
2131
2132#undef NEXT_INSN
2133
2134 /*
2135 * Emit the operation.
2136 */
d6a6cffd
RH
2137 switch (op_opc) {
2138 case -1:
2139 /* No operation found. Look for exchange pattern. */
2140 if (st_src == ld_dst || mv_src >= 0) {
2141 goto fail;
2142 }
2143 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2144 ctx->memidx, ld_mop);
2145 break;
2146
2147 case INDEX_op_add_i32:
2148 if (op_dst != st_src) {
2149 goto fail;
2150 }
2151 if (op_dst == ld_dst && st_mop == MO_UL) {
2152 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2153 op_arg, ctx->memidx, ld_mop);
2154 } else {
2155 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2156 op_arg, ctx->memidx, ld_mop);
2157 if (op_dst != ld_dst) {
2158 /* Note that mop sizes < 4 cannot use add_fetch
2159 because it won't carry into the higher bits. */
2160 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2161 }
2162 }
2163 break;
2164
2165 case INDEX_op_and_i32:
2166 if (op_dst != st_src) {
2167 goto fail;
2168 }
2169 if (op_dst == ld_dst) {
2170 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2171 op_arg, ctx->memidx, ld_mop);
2172 } else {
2173 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2174 op_arg, ctx->memidx, ld_mop);
2175 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2176 }
2177 break;
2178
2179 case INDEX_op_or_i32:
2180 if (op_dst != st_src) {
2181 goto fail;
2182 }
2183 if (op_dst == ld_dst) {
2184 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2185 op_arg, ctx->memidx, ld_mop);
2186 } else {
2187 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2188 op_arg, ctx->memidx, ld_mop);
2189 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2190 }
2191 break;
2192
2193 case INDEX_op_xor_i32:
2194 if (op_dst != st_src) {
2195 goto fail;
2196 }
2197 if (op_dst == ld_dst) {
2198 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2199 op_arg, ctx->memidx, ld_mop);
2200 } else {
2201 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2202 op_arg, ctx->memidx, ld_mop);
2203 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2204 }
2205 break;
2206
2207 case INDEX_op_setcond_i32:
2208 if (st_src == ld_dst) {
2209 goto fail;
2210 }
2211 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2212 REG(st_src), ctx->memidx, ld_mop);
2213 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2214 if (mt_dst >= 0) {
2215 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2216 }
2217 break;
2218
2219 default:
2220 g_assert_not_reached();
2221 }
2222
2223 /* If op_src is not a valid register, then op_arg was a constant. */
f764718d 2224 if (op_src < 0 && op_arg) {
d6a6cffd
RH
2225 tcg_temp_free_i32(op_arg);
2226 }
2227
2228 /* The entire region has been translated. */
ab419fd8 2229 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
6f1c2af6 2230 ctx->base.pc_next = pc_end;
be0e3d7a
RH
2231 ctx->base.num_insns += max_insns - 1;
2232 return;
d6a6cffd
RH
2233
2234 fail:
4bfa602b
RH
2235 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2236 pc, pc_end);
2237
2238 /* Restart with the EXCLUSIVE bit set, within a TB run via
2239 cpu_exec_step_atomic holding the exclusive lock. */
ab419fd8 2240 ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
4bfa602b
RH
2241 gen_save_cpu_state(ctx, false);
2242 gen_helper_exclusive(cpu_env);
6f1c2af6 2243 ctx->base.is_jmp = DISAS_NORETURN;
4bfa602b
RH
2244
2245 /* We're not executing an instruction, but we must report one for the
2246 purposes of accounting within the TB. We might as well report the
6f1c2af6
RH
2247 entire region consumed via ctx->base.pc_next so that it's immediately
2248 available in the disassembly dump. */
2249 ctx->base.pc_next = pc_end;
be0e3d7a 2250 ctx->base.num_insns += max_insns - 1;
fdf9b3e8 2251}
4bfa602b 2252#endif
fdf9b3e8 2253
fd1b3d38 2254static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
fdf9b3e8 2255{
fd1b3d38 2256 DisasContext *ctx = container_of(dcbase, DisasContext, base);
9c489ea6 2257 CPUSH4State *env = cs->env_ptr;
be0e3d7a 2258 uint32_t tbflags;
fd1b3d38
EC
2259 int bound;
2260
be0e3d7a
RH
2261 ctx->tbflags = tbflags = ctx->base.tb->flags;
2262 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2263 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2264 /* We don't know if the delayed pc came from a dynamic or static branch,
2265 so assume it is a dynamic branch. */
fd1b3d38
EC
2266 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2267 ctx->features = env->features;
be0e3d7a
RH
2268 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2269 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2270 (tbflags & (1 << SR_RB))) * 0x10;
2271 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2272
ab419fd8
RH
2273#ifdef CONFIG_USER_ONLY
2274 if (tbflags & TB_FLAG_GUSA_MASK) {
2275 /* In gUSA exclusive region. */
be0e3d7a
RH
2276 uint32_t pc = ctx->base.pc_next;
2277 uint32_t pc_end = ctx->base.tb->cs_base;
ab419fd8 2278 int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
be0e3d7a
RH
2279 int max_insns = (pc_end - pc) / 2;
2280
2281 if (pc != pc_end + backup || max_insns < 2) {
2282 /* This is a malformed gUSA region. Don't do anything special,
2283 since the interpreter is likely to get confused. */
ab419fd8
RH
2284 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2285 } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
be0e3d7a
RH
2286 /* Regardless of single-stepping or the end of the page,
2287 we must complete execution of the gUSA region while
2288 holding the exclusive lock. */
2289 ctx->base.max_insns = max_insns;
2290 return;
2291 }
2292 }
ab419fd8 2293#endif
4448a836
RH
2294
2295 /* Since the ISA is fixed-width, we can bound by the number
2296 of instructions remaining on the page. */
fd1b3d38
EC
2297 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2298 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2299}
4448a836 2300
fd1b3d38
EC
2301static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2302{
fd1b3d38 2303}
4bfa602b 2304
fd1b3d38
EC
2305static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2306{
2307 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2308
fd1b3d38
EC
2309 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2310}
b933066a 2311
fd1b3d38
EC
2312static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2313{
2314 CPUSH4State *env = cs->env_ptr;
2315 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4bfa602b 2316
be0e3d7a 2317#ifdef CONFIG_USER_ONLY
ab419fd8
RH
2318 if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2319 && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
be0e3d7a
RH
2320 /* We're in an gUSA region, and we have not already fallen
2321 back on using an exclusive region. Attempt to parse the
2322 region into a single supported atomic operation. Failure
2323 is handled within the parser by raising an exception to
2324 retry using an exclusive region. */
2325 decode_gusa(ctx, env);
2326 return;
2327 }
2328#endif
2329
4e116893 2330 ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
fd1b3d38
EC
2331 decode_opc(ctx);
2332 ctx->base.pc_next += 2;
2333}
2334
2335static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2336{
2337 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2338
ab419fd8 2339 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b 2340 /* Ending the region of exclusivity. Clear the bits. */
ab419fd8 2341 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b
RH
2342 }
2343
fd1b3d38 2344 switch (ctx->base.is_jmp) {
34cf5678 2345 case DISAS_STOP:
fd1b3d38 2346 gen_save_cpu_state(ctx, true);
52df5adc 2347 tcg_gen_exit_tb(NULL, 0);
34cf5678
RH
2348 break;
2349 case DISAS_NEXT:
fd1b3d38
EC
2350 case DISAS_TOO_MANY:
2351 gen_save_cpu_state(ctx, false);
2352 gen_goto_tb(ctx, 0, ctx->base.pc_next);
34cf5678
RH
2353 break;
2354 case DISAS_NORETURN:
2355 break;
2356 default:
2357 g_assert_not_reached();
fdf9b3e8 2358 }
fd1b3d38 2359}
823029f9 2360
8eb806a7
RH
2361static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2362 CPUState *cs, FILE *logfile)
fd1b3d38 2363{
8eb806a7
RH
2364 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2365 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
fd1b3d38 2366}
0a7df5da 2367
fd1b3d38
EC
2368static const TranslatorOps sh4_tr_ops = {
2369 .init_disas_context = sh4_tr_init_disas_context,
2370 .tb_start = sh4_tr_tb_start,
2371 .insn_start = sh4_tr_insn_start,
fd1b3d38
EC
2372 .translate_insn = sh4_tr_translate_insn,
2373 .tb_stop = sh4_tr_tb_stop,
2374 .disas_log = sh4_tr_disas_log,
2375};
2376
597f9b2d 2377void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 2378 target_ulong pc, void *host_pc)
fd1b3d38
EC
2379{
2380 DisasContext ctx;
fdf9b3e8 2381
306c8721 2382 translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
fdf9b3e8 2383}