]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
tcg: Move TCGHelperInfo and dependencies to tcg/helper-info.h
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
6faf2b6c 9 * version 2.1 of the License, or (at your option) any later version.
fdf9b3e8
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8 19
9d4c9946 20#include "qemu/osdep.h"
fdf9b3e8 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
63c91552 23#include "exec/exec-all.h"
dcb32f1d 24#include "tcg/tcg-op.h"
f08b6170 25#include "exec/cpu_ldst.h"
2ef6175a
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
4834871b 28#include "exec/translator.h"
508127e2 29#include "exec/log.h"
90c84c56 30#include "qemu/qemu-print.h"
a7e30d84
LV
31
32
fdf9b3e8 33typedef struct DisasContext {
6f1c2af6
RH
34 DisasContextBase base;
35
36 uint32_t tbflags; /* should stay unmodified during the TB translation */
37 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
fdf9b3e8 38 int memidx;
3a3bb8d2 39 int gbank;
5c13bad9 40 int fbank;
fdf9b3e8 41 uint32_t delayed_pc;
71968fa6 42 uint32_t features;
6f1c2af6
RH
43
44 uint16_t opcode;
45
46 bool has_movcal;
fdf9b3e8
FB
47} DisasContext;
48
fe25591e
AJ
49#if defined(CONFIG_USER_ONLY)
50#define IS_USER(ctx) 1
4da06fb3 51#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
fe25591e 52#else
a6215749 53#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
4da06fb3 54#define UNALIGN(C) 0
fe25591e
AJ
55#endif
56
6f1c2af6 57/* Target-specific values for ctx->base.is_jmp. */
4834871b
RH
58/* We want to exit back to the cpu loop for some reason.
59 Usually this is to recognize interrupts immediately. */
60#define DISAS_STOP DISAS_TARGET_0
823029f9 61
1e8864f7 62/* global register indexes */
3a3bb8d2 63static TCGv cpu_gregs[32];
1d565b21
AJ
64static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 66static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
f85da308
RH
67static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
68static TCGv cpu_lock_addr, cpu_lock_value;
66ba317c 69static TCGv cpu_fregs[32];
1000822b
AJ
70
71/* internal register indexes */
47b9f4d5 72static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
aa7408ec 76void sh4_translate_init(void)
2e70f6ef 77{
1e8864f7 78 int i;
559dd74d 79 static const char * const gregnames[24] = {
1e8864f7
AJ
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 };
66ba317c
AJ
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
95 };
1e8864f7 96
3a3bb8d2 97 for (i = 0; i < 24; i++) {
e1ccc054 98 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 99 offsetof(CPUSH4State, gregs[i]),
66ba317c 100 gregnames[i]);
3a3bb8d2
RH
101 }
102 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 103
e1ccc054 104 cpu_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 105 offsetof(CPUSH4State, pc), "PC");
e1ccc054 106 cpu_sr = tcg_global_mem_new_i32(cpu_env,
73e5716c 107 offsetof(CPUSH4State, sr), "SR");
e1ccc054
RH
108 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, sr_m), "SR_M");
110 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr_q), "SR_Q");
112 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_t), "SR_T");
114 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
73e5716c 115 offsetof(CPUSH4State, ssr), "SSR");
e1ccc054 116 cpu_spc = tcg_global_mem_new_i32(cpu_env,
73e5716c 117 offsetof(CPUSH4State, spc), "SPC");
e1ccc054 118 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 119 offsetof(CPUSH4State, gbr), "GBR");
e1ccc054 120 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 121 offsetof(CPUSH4State, vbr), "VBR");
e1ccc054 122 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
73e5716c 123 offsetof(CPUSH4State, sgr), "SGR");
e1ccc054 124 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 125 offsetof(CPUSH4State, dbr), "DBR");
e1ccc054 126 cpu_mach = tcg_global_mem_new_i32(cpu_env,
73e5716c 127 offsetof(CPUSH4State, mach), "MACH");
e1ccc054 128 cpu_macl = tcg_global_mem_new_i32(cpu_env,
73e5716c 129 offsetof(CPUSH4State, macl), "MACL");
e1ccc054 130 cpu_pr = tcg_global_mem_new_i32(cpu_env,
73e5716c 131 offsetof(CPUSH4State, pr), "PR");
e1ccc054 132 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
73e5716c 133 offsetof(CPUSH4State, fpscr), "FPSCR");
e1ccc054 134 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
73e5716c 135 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 136
e1ccc054 137 cpu_flags = tcg_global_mem_new_i32(cpu_env,
73e5716c 138 offsetof(CPUSH4State, flags), "_flags_");
e1ccc054 139 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 140 offsetof(CPUSH4State, delayed_pc),
a7812ae4 141 "_delayed_pc_");
47b9f4d5
AJ
142 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State,
144 delayed_cond),
145 "_delayed_cond_");
f85da308
RH
146 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State, lock_addr),
148 "_lock_addr_");
149 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
150 offsetof(CPUSH4State, lock_value),
151 "_lock_value_");
1000822b 152
66ba317c 153 for (i = 0; i < 32; i++)
e1ccc054 154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 155 offsetof(CPUSH4State, fregs[i]),
66ba317c 156 fregnames[i]);
2e70f6ef
PB
157}
158
90c84c56 159void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
fdf9b3e8 160{
878096ee
AF
161 SuperHCPU *cpu = SUPERH_CPU(cs);
162 CPUSH4State *env = &cpu->env;
fdf9b3e8 163 int i;
90c84c56
MA
164
165 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
167 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8 171 for (i = 0; i < 24; i += 4) {
ad4052f1
IL
172 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
fdf9b3e8 175 }
ab419fd8 176 if (env->flags & TB_FLAG_DELAY_SLOT) {
ad4052f1
IL
177 qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178 env->delayed_pc);
ab419fd8 179 } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
ad4052f1
IL
180 qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
ab419fd8 182 } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
90c84c56
MA
183 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
fdf9b3e8
FB
185 }
186}
187
34086945
AJ
188static void gen_read_sr(TCGv dst)
189{
1d565b21
AJ
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
34086945
AJ
197}
198
199static void gen_write_sr(TCGv src)
200{
1d565b21
AJ
201 tcg_gen_andi_i32(cpu_sr, src,
202 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
203 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
204 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
205 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
206}
207
ac9707ea
AJ
208static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
209{
210 if (save_pc) {
6f1c2af6 211 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
ac9707ea
AJ
212 }
213 if (ctx->delayed_pc != (uint32_t) -1) {
214 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
215 }
e1933d14 216 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
217 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
218 }
219}
220
ec2eb22e
RH
221static inline bool use_exit_tb(DisasContext *ctx)
222{
ab419fd8 223 return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
ec2eb22e
RH
224}
225
3f1e2098 226static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 227{
3f1e2098 228 if (use_exit_tb(ctx)) {
4bfa602b
RH
229 return false;
230 }
3f1e2098 231 return translator_use_goto_tb(&ctx->base, dest);
90aa39a1 232}
fdf9b3e8 233
90aa39a1
SF
234static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
235{
236 if (use_goto_tb(ctx, dest)) {
57fec1fe 237 tcg_gen_goto_tb(n);
3a8a44c4 238 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 239 tcg_gen_exit_tb(ctx->base.tb, n);
fdf9b3e8 240 } else {
3a8a44c4 241 tcg_gen_movi_i32(cpu_pc, dest);
52df5adc 242 if (use_exit_tb(ctx)) {
07ea28b4 243 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 244 } else {
7f11636d 245 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 246 }
fdf9b3e8 247 }
6f1c2af6 248 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
249}
250
fdf9b3e8
FB
251static void gen_jump(DisasContext * ctx)
252{
ec2eb22e 253 if (ctx->delayed_pc == -1) {
fdf9b3e8
FB
254 /* Target is not statically known, it comes necessarily from a
255 delayed jump as immediate jump are conditinal jumps */
1000822b 256 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 257 tcg_gen_discard_i32(cpu_delayed_pc);
52df5adc 258 if (use_exit_tb(ctx)) {
07ea28b4 259 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 260 } else {
7f11636d 261 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 262 }
6f1c2af6 263 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
264 } else {
265 gen_goto_tb(ctx, 0, ctx->delayed_pc);
266 }
267}
268
269/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
270static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
271 bool jump_if_true)
fdf9b3e8 272{
34086945 273 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
274 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
275
ab419fd8 276 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
277 /* When in an exclusive region, we must continue to the end.
278 Therefore, exit the region on a taken branch, but otherwise
279 fall through to the next instruction. */
280 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
ab419fd8 281 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
282 /* Note that this won't actually use a goto_tb opcode because we
283 disallow it in use_goto_tb, but it handles exit + singlestep. */
284 gen_goto_tb(ctx, 0, dest);
285 gen_set_label(l1);
5b38d026 286 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
287 return;
288 }
289
ac9707ea 290 gen_save_cpu_state(ctx, false);
4bfa602b
RH
291 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
292 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 293 gen_set_label(l1);
6f1c2af6
RH
294 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
295 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
296}
297
298/* Delayed conditional jump (bt or bf) */
299static void gen_delayed_conditional_jump(DisasContext * ctx)
300{
4bfa602b
RH
301 TCGLabel *l1 = gen_new_label();
302 TCGv ds = tcg_temp_new();
fdf9b3e8 303
47b9f4d5
AJ
304 tcg_gen_mov_i32(ds, cpu_delayed_cond);
305 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b 306
ab419fd8 307 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
308 /* When in an exclusive region, we must continue to the end.
309 Therefore, exit the region on a taken branch, but otherwise
310 fall through to the next instruction. */
311 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
312
313 /* Leave the gUSA region. */
ab419fd8 314 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
315 gen_jump(ctx);
316
317 gen_set_label(l1);
6f1c2af6 318 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
319 return;
320 }
321
6f396c8f 322 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
6f1c2af6 323 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
fdf9b3e8 324 gen_set_label(l1);
9c2a9ea1 325 gen_jump(ctx);
fdf9b3e8
FB
326}
327
e5d8053e 328static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 329{
1e0b21d8
RH
330 /* We have already signaled illegal instruction for odd Dr. */
331 tcg_debug_assert((reg & 1) == 0);
332 reg ^= ctx->fbank;
66ba317c 333 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
334}
335
e5d8053e 336static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 337{
1e0b21d8
RH
338 /* We have already signaled illegal instruction for odd Dr. */
339 tcg_debug_assert((reg & 1) == 0);
340 reg ^= ctx->fbank;
58d2a9ae 341 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
342}
343
fdf9b3e8
FB
344#define B3_0 (ctx->opcode & 0xf)
345#define B6_4 ((ctx->opcode >> 4) & 0x7)
346#define B7_4 ((ctx->opcode >> 4) & 0xf)
347#define B7_0 (ctx->opcode & 0xff)
348#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
349#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
350 (ctx->opcode & 0xfff))
351#define B11_8 ((ctx->opcode >> 8) & 0xf)
352#define B15_12 ((ctx->opcode >> 12) & 0xf)
353
3a3bb8d2
RH
354#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
355#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 356#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 357
f09111e0 358#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 359
fdf9b3e8 360#define CHECK_NOT_DELAY_SLOT \
ab419fd8
RH
361 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
362 goto do_illegal_slot; \
a6215749
AJ
363 }
364
6b98213d
RH
365#define CHECK_PRIVILEGED \
366 if (IS_USER(ctx)) { \
367 goto do_illegal; \
a6215749
AJ
368 }
369
dec4f042
RH
370#define CHECK_FPU_ENABLED \
371 if (ctx->tbflags & (1u << SR_FD)) { \
372 goto do_fpu_disabled; \
a6215749 373 }
d8299bcc 374
7e9f7ca8
RH
375#define CHECK_FPSCR_PR_0 \
376 if (ctx->tbflags & FPSCR_PR) { \
377 goto do_illegal; \
378 }
379
380#define CHECK_FPSCR_PR_1 \
381 if (!(ctx->tbflags & FPSCR_PR)) { \
382 goto do_illegal; \
383 }
384
ccae24d4
RH
385#define CHECK_SH4A \
386 if (!(ctx->features & SH_FEATURE_SH4A)) { \
387 goto do_illegal; \
388 }
389
b1d8e52e 390static void _decode_opc(DisasContext * ctx)
fdf9b3e8 391{
852d481f
EI
392 /* This code tries to make movcal emulation sufficiently
393 accurate for Linux purposes. This instruction writes
394 memory, and prior to that, always allocates a cache line.
395 It is used in two contexts:
396 - in memcpy, where data is copied in blocks, the first write
397 of to a block uses movca.l for performance.
398 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
399 to flush the cache. Here, the data written by movcal.l is never
400 written to memory, and the data written is just bogus.
401
402 To simulate this, we simulate movcal.l, we store the value to memory,
403 but we also remember the previous content. If we see ocbi, we check
404 if movcal.l for that address was done previously. If so, the write should
405 not have hit the memory, so we restore the previous content.
406 When we see an instruction that is neither movca.l
407 nor ocbi, the previous content is discarded.
408
409 To optimize, we only try to flush stores when we're at the start of
410 TB, or if we already saw movca.l in this TB and did not flush stores
411 yet. */
412 if (ctx->has_movcal)
413 {
414 int opcode = ctx->opcode & 0xf0ff;
415 if (opcode != 0x0093 /* ocbi */
416 && opcode != 0x00c3 /* movca.l */)
417 {
485d0035 418 gen_helper_discard_movcal_backup(cpu_env);
852d481f
EI
419 ctx->has_movcal = 0;
420 }
421 }
422
fdf9b3e8
FB
423#if 0
424 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
425#endif
f6198371 426
fdf9b3e8
FB
427 switch (ctx->opcode) {
428 case 0x0019: /* div0u */
1d565b21
AJ
429 tcg_gen_movi_i32(cpu_sr_m, 0);
430 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 431 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
432 return;
433 case 0x000b: /* rts */
1000822b
AJ
434 CHECK_NOT_DELAY_SLOT
435 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
ab419fd8 436 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
437 ctx->delayed_pc = (uint32_t) - 1;
438 return;
439 case 0x0028: /* clrmac */
3a8a44c4
AJ
440 tcg_gen_movi_i32(cpu_mach, 0);
441 tcg_gen_movi_i32(cpu_macl, 0);
fdf9b3e8
FB
442 return;
443 case 0x0048: /* clrs */
5ed9a259 444 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
fdf9b3e8
FB
445 return;
446 case 0x0008: /* clrt */
34086945 447 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
448 return;
449 case 0x0038: /* ldtlb */
fe25591e 450 CHECK_PRIVILEGED
485d0035 451 gen_helper_ldtlb(cpu_env);
fdf9b3e8 452 return;
c5e814b2 453 case 0x002b: /* rte */
fe25591e 454 CHECK_PRIVILEGED
1000822b 455 CHECK_NOT_DELAY_SLOT
34086945 456 gen_write_sr(cpu_ssr);
1000822b 457 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
ab419fd8 458 ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
fdf9b3e8 459 ctx->delayed_pc = (uint32_t) - 1;
6f1c2af6 460 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8
FB
461 return;
462 case 0x0058: /* sets */
5ed9a259 463 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
fdf9b3e8
FB
464 return;
465 case 0x0018: /* sett */
34086945 466 tcg_gen_movi_i32(cpu_sr_t, 1);
fdf9b3e8 467 return;
24988dc2 468 case 0xfbfd: /* frchg */
61dedf2a 469 CHECK_FPSCR_PR_0
6f06939b 470 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
6f1c2af6 471 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 472 return;
24988dc2 473 case 0xf3fd: /* fschg */
61dedf2a 474 CHECK_FPSCR_PR_0
7a64244f 475 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
6f1c2af6 476 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 477 return;
907759f9
RH
478 case 0xf7fd: /* fpchg */
479 CHECK_SH4A
480 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
6f1c2af6 481 ctx->base.is_jmp = DISAS_STOP;
907759f9 482 return;
fdf9b3e8
FB
483 case 0x0009: /* nop */
484 return;
485 case 0x001b: /* sleep */
fe25591e 486 CHECK_PRIVILEGED
6f1c2af6 487 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
10127400 488 gen_helper_sleep(cpu_env);
fdf9b3e8
FB
489 return;
490 }
491
492 switch (ctx->opcode & 0xf000) {
493 case 0x1000: /* mov.l Rm,@(disp,Rn) */
c55497ec 494 {
a7812ae4 495 TCGv addr = tcg_temp_new();
c55497ec 496 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
4da06fb3
RH
497 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
498 MO_TEUL | UNALIGN(ctx));
c55497ec 499 }
fdf9b3e8
FB
500 return;
501 case 0x5000: /* mov.l @(disp,Rm),Rn */
c55497ec 502 {
a7812ae4 503 TCGv addr = tcg_temp_new();
c55497ec 504 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
4da06fb3
RH
505 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
506 MO_TESL | UNALIGN(ctx));
c55497ec 507 }
fdf9b3e8 508 return;
24988dc2 509 case 0xe000: /* mov #imm,Rn */
4bfa602b 510#ifdef CONFIG_USER_ONLY
ab419fd8
RH
511 /*
512 * Detect the start of a gUSA region (mov #-n, r15).
513 * If so, update envflags and end the TB. This will allow us
514 * to see the end of the region (stored in R0) in the next TB.
515 */
6f1c2af6
RH
516 if (B11_8 == 15 && B7_0s < 0 &&
517 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
ab419fd8
RH
518 ctx->envflags =
519 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
6f1c2af6 520 ctx->base.is_jmp = DISAS_STOP;
4bfa602b
RH
521 }
522#endif
7efbe241 523 tcg_gen_movi_i32(REG(B11_8), B7_0s);
fdf9b3e8
FB
524 return;
525 case 0x9000: /* mov.w @(disp,PC),Rn */
c55497ec 526 {
950b91be 527 TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
03a0d87e
RH
528 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
529 MO_TESW | MO_ALIGN);
c55497ec 530 }
fdf9b3e8
FB
531 return;
532 case 0xd000: /* mov.l @(disp,PC),Rn */
c55497ec 533 {
950b91be 534 TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
03a0d87e
RH
535 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
536 MO_TESL | MO_ALIGN);
c55497ec 537 }
fdf9b3e8 538 return;
24988dc2 539 case 0x7000: /* add #imm,Rn */
7efbe241 540 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
fdf9b3e8
FB
541 return;
542 case 0xa000: /* bra disp */
543 CHECK_NOT_DELAY_SLOT
6f1c2af6 544 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 545 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
546 return;
547 case 0xb000: /* bsr disp */
548 CHECK_NOT_DELAY_SLOT
6f1c2af6
RH
549 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
550 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 551 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
552 return;
553 }
554
555 switch (ctx->opcode & 0xf00f) {
556 case 0x6003: /* mov Rm,Rn */
7efbe241 557 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
558 return;
559 case 0x2000: /* mov.b Rm,@Rn */
3376f415 560 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
fdf9b3e8
FB
561 return;
562 case 0x2001: /* mov.w Rm,@Rn */
4da06fb3
RH
563 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
564 MO_TEUW | UNALIGN(ctx));
fdf9b3e8
FB
565 return;
566 case 0x2002: /* mov.l Rm,@Rn */
4da06fb3
RH
567 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
568 MO_TEUL | UNALIGN(ctx));
fdf9b3e8
FB
569 return;
570 case 0x6000: /* mov.b @Rm,Rn */
3376f415 571 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
fdf9b3e8
FB
572 return;
573 case 0x6001: /* mov.w @Rm,Rn */
4da06fb3
RH
574 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
575 MO_TESW | UNALIGN(ctx));
fdf9b3e8
FB
576 return;
577 case 0x6002: /* mov.l @Rm,Rn */
4da06fb3
RH
578 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
579 MO_TESL | UNALIGN(ctx));
fdf9b3e8
FB
580 return;
581 case 0x2004: /* mov.b Rm,@-Rn */
c55497ec 582 {
a7812ae4 583 TCGv addr = tcg_temp_new();
c55497ec 584 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
585 /* might cause re-execution */
586 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
3101e99c 587 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
c55497ec 588 }
fdf9b3e8
FB
589 return;
590 case 0x2005: /* mov.w Rm,@-Rn */
c55497ec 591 {
a7812ae4 592 TCGv addr = tcg_temp_new();
c55497ec 593 tcg_gen_subi_i32(addr, REG(B11_8), 2);
4da06fb3
RH
594 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
595 MO_TEUW | UNALIGN(ctx));
3101e99c 596 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 597 }
fdf9b3e8
FB
598 return;
599 case 0x2006: /* mov.l Rm,@-Rn */
c55497ec 600 {
a7812ae4 601 TCGv addr = tcg_temp_new();
c55497ec 602 tcg_gen_subi_i32(addr, REG(B11_8), 4);
4da06fb3
RH
603 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
604 MO_TEUL | UNALIGN(ctx));
3101e99c 605 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 606 }
fdf9b3e8 607 return;
eda9b09b 608 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 609 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
24988dc2 610 if ( B11_8 != B7_4 )
7efbe241 611 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
fdf9b3e8
FB
612 return;
613 case 0x6005: /* mov.w @Rm+,Rn */
4da06fb3
RH
614 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
615 MO_TESW | UNALIGN(ctx));
24988dc2 616 if ( B11_8 != B7_4 )
7efbe241 617 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
fdf9b3e8
FB
618 return;
619 case 0x6006: /* mov.l @Rm+,Rn */
4da06fb3
RH
620 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
621 MO_TESL | UNALIGN(ctx));
24988dc2 622 if ( B11_8 != B7_4 )
7efbe241 623 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
fdf9b3e8
FB
624 return;
625 case 0x0004: /* mov.b Rm,@(R0,Rn) */
c55497ec 626 {
a7812ae4 627 TCGv addr = tcg_temp_new();
c55497ec 628 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 629 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
c55497ec 630 }
fdf9b3e8
FB
631 return;
632 case 0x0005: /* mov.w Rm,@(R0,Rn) */
c55497ec 633 {
a7812ae4 634 TCGv addr = tcg_temp_new();
c55497ec 635 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
636 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
637 MO_TEUW | UNALIGN(ctx));
c55497ec 638 }
fdf9b3e8
FB
639 return;
640 case 0x0006: /* mov.l Rm,@(R0,Rn) */
c55497ec 641 {
a7812ae4 642 TCGv addr = tcg_temp_new();
c55497ec 643 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
644 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
645 MO_TEUL | UNALIGN(ctx));
c55497ec 646 }
fdf9b3e8
FB
647 return;
648 case 0x000c: /* mov.b @(R0,Rm),Rn */
c55497ec 649 {
a7812ae4 650 TCGv addr = tcg_temp_new();
c55497ec 651 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 652 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
c55497ec 653 }
fdf9b3e8
FB
654 return;
655 case 0x000d: /* mov.w @(R0,Rm),Rn */
c55497ec 656 {
a7812ae4 657 TCGv addr = tcg_temp_new();
c55497ec 658 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
659 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
660 MO_TESW | UNALIGN(ctx));
c55497ec 661 }
fdf9b3e8
FB
662 return;
663 case 0x000e: /* mov.l @(R0,Rm),Rn */
c55497ec 664 {
a7812ae4 665 TCGv addr = tcg_temp_new();
c55497ec 666 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
667 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
668 MO_TESL | UNALIGN(ctx));
c55497ec 669 }
fdf9b3e8
FB
670 return;
671 case 0x6008: /* swap.b Rm,Rn */
c55497ec 672 {
3c254ab8 673 TCGv low = tcg_temp_new();
b983a0e1 674 tcg_gen_bswap16_i32(low, REG(B7_4), 0);
218fd730 675 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
c55497ec 676 }
fdf9b3e8
FB
677 return;
678 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 679 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
fdf9b3e8
FB
680 return;
681 case 0x200d: /* xtrct Rm,Rn */
c55497ec
AJ
682 {
683 TCGv high, low;
a7812ae4 684 high = tcg_temp_new();
3101e99c 685 tcg_gen_shli_i32(high, REG(B7_4), 16);
a7812ae4 686 low = tcg_temp_new();
c55497ec 687 tcg_gen_shri_i32(low, REG(B11_8), 16);
c55497ec 688 tcg_gen_or_i32(REG(B11_8), high, low);
c55497ec 689 }
fdf9b3e8
FB
690 return;
691 case 0x300c: /* add Rm,Rn */
7efbe241 692 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
693 return;
694 case 0x300e: /* addc Rm,Rn */
22b88fd7 695 {
34086945 696 TCGv t0, t1;
950b91be 697 t0 = tcg_constant_tl(0);
22b88fd7 698 t1 = tcg_temp_new();
a2368e01
AJ
699 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
700 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
701 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 702 }
fdf9b3e8
FB
703 return;
704 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
705 {
706 TCGv t0, t1, t2;
707 t0 = tcg_temp_new();
708 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
709 t1 = tcg_temp_new();
710 tcg_gen_xor_i32(t1, t0, REG(B11_8));
711 t2 = tcg_temp_new();
712 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 713 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
34086945 714 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1 715 tcg_gen_mov_i32(REG(B7_4), t0);
ad8d25a1 716 }
fdf9b3e8
FB
717 return;
718 case 0x2009: /* and Rm,Rn */
7efbe241 719 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
720 return;
721 case 0x3000: /* cmp/eq Rm,Rn */
34086945 722 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
723 return;
724 case 0x3003: /* cmp/ge Rm,Rn */
34086945 725 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
726 return;
727 case 0x3007: /* cmp/gt Rm,Rn */
34086945 728 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
729 return;
730 case 0x3006: /* cmp/hi Rm,Rn */
34086945 731 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
732 return;
733 case 0x3002: /* cmp/hs Rm,Rn */
34086945 734 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
735 return;
736 case 0x200c: /* cmp/str Rm,Rn */
69d6275b 737 {
c5c19137
AJ
738 TCGv cmp1 = tcg_temp_new();
739 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
740 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
741 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
742 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
743 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
744 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
69d6275b 745 }
fdf9b3e8
FB
746 return;
747 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
748 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
749 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
750 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
fdf9b3e8
FB
751 return;
752 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
753 {
754 TCGv t0 = tcg_temp_new();
755 TCGv t1 = tcg_temp_new();
756 TCGv t2 = tcg_temp_new();
950b91be 757 TCGv zero = tcg_constant_i32(0);
1d565b21
AJ
758
759 /* shift left arg1, saving the bit being pushed out and inserting
760 T on the right */
761 tcg_gen_shri_i32(t0, REG(B11_8), 31);
762 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
763 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
764
765 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
766 using 64-bit temps, we compute arg0's high part from q ^ m, so
767 that it is 0x00000000 when adding the value or 0xffffffff when
768 subtracting it. */
769 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
770 tcg_gen_subi_i32(t1, t1, 1);
771 tcg_gen_neg_i32(t2, REG(B7_4));
772 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
773 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
774
775 /* compute T and Q depending on carry */
776 tcg_gen_andi_i32(t1, t1, 1);
777 tcg_gen_xor_i32(t1, t1, t0);
778 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
779 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
1d565b21 780 }
fdf9b3e8
FB
781 return;
782 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 783 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
784 return;
785 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 786 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
787 return;
788 case 0x600e: /* exts.b Rm,Rn */
7efbe241 789 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
790 return;
791 case 0x600f: /* exts.w Rm,Rn */
7efbe241 792 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
793 return;
794 case 0x600c: /* extu.b Rm,Rn */
7efbe241 795 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
796 return;
797 case 0x600d: /* extu.w Rm,Rn */
7efbe241 798 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8 799 return;
24988dc2 800 case 0x000f: /* mac.l @Rm+,@Rn+ */
c55497ec
AJ
801 {
802 TCGv arg0, arg1;
a7812ae4 803 arg0 = tcg_temp_new();
03a0d87e
RH
804 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
805 MO_TESL | MO_ALIGN);
a7812ae4 806 arg1 = tcg_temp_new();
03a0d87e
RH
807 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
808 MO_TESL | MO_ALIGN);
485d0035 809 gen_helper_macl(cpu_env, arg0, arg1);
c55497ec
AJ
810 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
811 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
812 }
fdf9b3e8
FB
813 return;
814 case 0x400f: /* mac.w @Rm+,@Rn+ */
c55497ec
AJ
815 {
816 TCGv arg0, arg1;
a7812ae4 817 arg0 = tcg_temp_new();
03a0d87e
RH
818 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
819 MO_TESL | MO_ALIGN);
a7812ae4 820 arg1 = tcg_temp_new();
03a0d87e
RH
821 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
822 MO_TESL | MO_ALIGN);
485d0035 823 gen_helper_macw(cpu_env, arg0, arg1);
c55497ec
AJ
824 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
825 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
826 }
fdf9b3e8
FB
827 return;
828 case 0x0007: /* mul.l Rm,Rn */
7efbe241 829 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
830 return;
831 case 0x200f: /* muls.w Rm,Rn */
c55497ec
AJ
832 {
833 TCGv arg0, arg1;
a7812ae4 834 arg0 = tcg_temp_new();
c55497ec 835 tcg_gen_ext16s_i32(arg0, REG(B7_4));
a7812ae4 836 arg1 = tcg_temp_new();
c55497ec
AJ
837 tcg_gen_ext16s_i32(arg1, REG(B11_8));
838 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
c55497ec 839 }
fdf9b3e8
FB
840 return;
841 case 0x200e: /* mulu.w Rm,Rn */
c55497ec
AJ
842 {
843 TCGv arg0, arg1;
a7812ae4 844 arg0 = tcg_temp_new();
c55497ec 845 tcg_gen_ext16u_i32(arg0, REG(B7_4));
a7812ae4 846 arg1 = tcg_temp_new();
c55497ec
AJ
847 tcg_gen_ext16u_i32(arg1, REG(B11_8));
848 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
c55497ec 849 }
fdf9b3e8
FB
850 return;
851 case 0x600b: /* neg Rm,Rn */
7efbe241 852 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
853 return;
854 case 0x600a: /* negc Rm,Rn */
b2d9eda5 855 {
950b91be 856 TCGv t0 = tcg_constant_i32(0);
60eb27fe
AJ
857 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
858 REG(B7_4), t0, cpu_sr_t, t0);
859 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
860 t0, t0, REG(B11_8), cpu_sr_t);
861 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 862 }
fdf9b3e8
FB
863 return;
864 case 0x6007: /* not Rm,Rn */
7efbe241 865 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
866 return;
867 case 0x200b: /* or Rm,Rn */
7efbe241 868 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
869 return;
870 case 0x400c: /* shad Rm,Rn */
69d6275b 871 {
be654c83
AJ
872 TCGv t0 = tcg_temp_new();
873 TCGv t1 = tcg_temp_new();
874 TCGv t2 = tcg_temp_new();
875
876 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
877
878 /* positive case: shift to the left */
879 tcg_gen_shl_i32(t1, REG(B11_8), t0);
880
881 /* negative case: shift to the right in two steps to
882 correctly handle the -32 case */
883 tcg_gen_xori_i32(t0, t0, 0x1f);
884 tcg_gen_sar_i32(t2, REG(B11_8), t0);
885 tcg_gen_sari_i32(t2, t2, 1);
886
887 /* select between the two cases */
888 tcg_gen_movi_i32(t0, 0);
889 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
69d6275b 890 }
fdf9b3e8
FB
891 return;
892 case 0x400d: /* shld Rm,Rn */
69d6275b 893 {
57760161
AJ
894 TCGv t0 = tcg_temp_new();
895 TCGv t1 = tcg_temp_new();
896 TCGv t2 = tcg_temp_new();
897
898 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
899
900 /* positive case: shift to the left */
901 tcg_gen_shl_i32(t1, REG(B11_8), t0);
902
903 /* negative case: shift to the right in two steps to
904 correctly handle the -32 case */
905 tcg_gen_xori_i32(t0, t0, 0x1f);
906 tcg_gen_shr_i32(t2, REG(B11_8), t0);
907 tcg_gen_shri_i32(t2, t2, 1);
908
909 /* select between the two cases */
910 tcg_gen_movi_i32(t0, 0);
911 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
69d6275b 912 }
fdf9b3e8
FB
913 return;
914 case 0x3008: /* sub Rm,Rn */
7efbe241 915 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
916 return;
917 case 0x300a: /* subc Rm,Rn */
22b88fd7 918 {
d0f44a55 919 TCGv t0, t1;
950b91be 920 t0 = tcg_constant_tl(0);
22b88fd7 921 t1 = tcg_temp_new();
d0f44a55
AJ
922 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
923 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
924 REG(B11_8), t0, t1, cpu_sr_t);
925 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 926 }
fdf9b3e8
FB
927 return;
928 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
929 {
930 TCGv t0, t1, t2;
931 t0 = tcg_temp_new();
932 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
933 t1 = tcg_temp_new();
934 tcg_gen_xor_i32(t1, t0, REG(B7_4));
935 t2 = tcg_temp_new();
936 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
937 tcg_gen_and_i32(t1, t1, t2);
34086945 938 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1 939 tcg_gen_mov_i32(REG(B11_8), t0);
ad8d25a1 940 }
fdf9b3e8
FB
941 return;
942 case 0x2008: /* tst Rm,Rn */
c55497ec 943 {
a7812ae4 944 TCGv val = tcg_temp_new();
c55497ec 945 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 946 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec 947 }
fdf9b3e8
FB
948 return;
949 case 0x200a: /* xor Rm,Rn */
7efbe241 950 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8 951 return;
e67888a7 952 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 953 CHECK_FPU_ENABLED
a6215749 954 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
955 int xsrc = XHACK(B7_4);
956 int xdst = XHACK(B11_8);
957 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
958 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
eda9b09b 959 } else {
7c9f7038 960 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
eda9b09b
FB
961 }
962 return;
e67888a7 963 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
f6198371 964 CHECK_FPU_ENABLED
a6215749 965 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
966 TCGv_i64 fp = tcg_temp_new_i64();
967 gen_load_fpr64(ctx, fp, XHACK(B7_4));
03a0d87e
RH
968 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
969 MO_TEUQ | MO_ALIGN);
eda9b09b 970 } else {
03a0d87e
RH
971 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
972 MO_TEUL | MO_ALIGN);
eda9b09b
FB
973 }
974 return;
e67888a7 975 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 976 CHECK_FPU_ENABLED
a6215749 977 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 978 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
979 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
980 MO_TEUQ | MO_ALIGN);
4d57fa50 981 gen_store_fpr64(ctx, fp, XHACK(B11_8));
eda9b09b 982 } else {
03a0d87e
RH
983 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
984 MO_TEUL | MO_ALIGN);
eda9b09b
FB
985 }
986 return;
e67888a7 987 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
f6198371 988 CHECK_FPU_ENABLED
a6215749 989 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 990 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
991 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
992 MO_TEUQ | MO_ALIGN);
4d57fa50 993 gen_store_fpr64(ctx, fp, XHACK(B11_8));
4d57fa50 994 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
eda9b09b 995 } else {
03a0d87e
RH
996 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
997 MO_TEUL | MO_ALIGN);
cc4ba6a9 998 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
eda9b09b
FB
999 }
1000 return;
e67888a7 1001 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
f6198371 1002 CHECK_FPU_ENABLED
4d57fa50
RH
1003 {
1004 TCGv addr = tcg_temp_new_i32();
1005 if (ctx->tbflags & FPSCR_SZ) {
1006 TCGv_i64 fp = tcg_temp_new_i64();
1007 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1008 tcg_gen_subi_i32(addr, REG(B11_8), 8);
03a0d87e
RH
1009 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1010 MO_TEUQ | MO_ALIGN);
4d57fa50
RH
1011 } else {
1012 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e
RH
1013 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1014 MO_TEUL | MO_ALIGN);
4d57fa50
RH
1015 }
1016 tcg_gen_mov_i32(REG(B11_8), addr);
4d57fa50 1017 }
eda9b09b 1018 return;
e67888a7 1019 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
f6198371 1020 CHECK_FPU_ENABLED
cc4ba6a9 1021 {
a7812ae4 1022 TCGv addr = tcg_temp_new_i32();
cc4ba6a9 1023 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1024 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 1025 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
1026 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1027 MO_TEUQ | MO_ALIGN);
4d57fa50 1028 gen_store_fpr64(ctx, fp, XHACK(B11_8));
cc4ba6a9 1029 } else {
03a0d87e
RH
1030 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1031 MO_TEUL | MO_ALIGN);
cc4ba6a9 1032 }
eda9b09b
FB
1033 }
1034 return;
e67888a7 1035 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
f6198371 1036 CHECK_FPU_ENABLED
cc4ba6a9 1037 {
a7812ae4 1038 TCGv addr = tcg_temp_new();
cc4ba6a9 1039 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1040 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1041 TCGv_i64 fp = tcg_temp_new_i64();
1042 gen_load_fpr64(ctx, fp, XHACK(B7_4));
03a0d87e
RH
1043 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1044 MO_TEUQ | MO_ALIGN);
cc4ba6a9 1045 } else {
03a0d87e
RH
1046 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1047 MO_TEUL | MO_ALIGN);
cc4ba6a9 1048 }
eda9b09b
FB
1049 }
1050 return;
e67888a7
TS
1051 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1052 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1056 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
cc4ba6a9 1057 {
f6198371 1058 CHECK_FPU_ENABLED
a6215749 1059 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1060 TCGv_i64 fp0, fp1;
1061
93dc9c89
RH
1062 if (ctx->opcode & 0x0110) {
1063 goto do_illegal;
1064 }
a7812ae4
PB
1065 fp0 = tcg_temp_new_i64();
1066 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1067 gen_load_fpr64(ctx, fp0, B11_8);
1068 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4
PB
1069 switch (ctx->opcode & 0xf00f) {
1070 case 0xf000: /* fadd Rm,Rn */
485d0035 1071 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1072 break;
1073 case 0xf001: /* fsub Rm,Rn */
485d0035 1074 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1075 break;
1076 case 0xf002: /* fmul Rm,Rn */
485d0035 1077 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1078 break;
1079 case 0xf003: /* fdiv Rm,Rn */
485d0035 1080 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1081 break;
1082 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1083 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1084 return;
1085 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1086 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1087 return;
1088 }
1e0b21d8 1089 gen_store_fpr64(ctx, fp0, B11_8);
a7812ae4 1090 } else {
a7812ae4
PB
1091 switch (ctx->opcode & 0xf00f) {
1092 case 0xf000: /* fadd Rm,Rn */
7c9f7038
RH
1093 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1094 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1095 break;
1096 case 0xf001: /* fsub Rm,Rn */
7c9f7038
RH
1097 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1098 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1099 break;
1100 case 0xf002: /* fmul Rm,Rn */
7c9f7038
RH
1101 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1102 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1103 break;
1104 case 0xf003: /* fdiv Rm,Rn */
7c9f7038
RH
1105 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1106 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1107 break;
1108 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1109 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
7c9f7038 1110 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1111 return;
1112 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1113 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
7c9f7038 1114 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1115 return;
1116 }
cc4ba6a9 1117 }
ea6cf6be
TS
1118 }
1119 return;
5b7141a1 1120 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1121 CHECK_FPU_ENABLED
1122 CHECK_FPSCR_PR_0
1123 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1124 FREG(0), FREG(B7_4), FREG(B11_8));
1125 return;
fdf9b3e8
FB
1126 }
1127
1128 switch (ctx->opcode & 0xff00) {
1129 case 0xc900: /* and #imm,R0 */
7efbe241 1130 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1131 return;
24988dc2 1132 case 0xcd00: /* and.b #imm,@(R0,GBR) */
c55497ec
AJ
1133 {
1134 TCGv addr, val;
a7812ae4 1135 addr = tcg_temp_new();
c55497ec 1136 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1137 val = tcg_temp_new();
3376f415 1138 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1139 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1140 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1141 }
fdf9b3e8
FB
1142 return;
1143 case 0x8b00: /* bf label */
1144 CHECK_NOT_DELAY_SLOT
6f1c2af6 1145 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
fdf9b3e8
FB
1146 return;
1147 case 0x8f00: /* bf/s label */
1148 CHECK_NOT_DELAY_SLOT
ac9707ea 1149 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
6f1c2af6 1150 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1151 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
fdf9b3e8
FB
1152 return;
1153 case 0x8900: /* bt label */
1154 CHECK_NOT_DELAY_SLOT
6f1c2af6 1155 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
fdf9b3e8
FB
1156 return;
1157 case 0x8d00: /* bt/s label */
1158 CHECK_NOT_DELAY_SLOT
ac9707ea 1159 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
6f1c2af6 1160 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1161 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
fdf9b3e8
FB
1162 return;
1163 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1164 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
fdf9b3e8
FB
1165 return;
1166 case 0xc400: /* mov.b @(disp,GBR),R0 */
c55497ec 1167 {
a7812ae4 1168 TCGv addr = tcg_temp_new();
c55497ec 1169 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1170 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec 1171 }
fdf9b3e8
FB
1172 return;
1173 case 0xc500: /* mov.w @(disp,GBR),R0 */
c55497ec 1174 {
a7812ae4 1175 TCGv addr = tcg_temp_new();
c55497ec 1176 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
03a0d87e 1177 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
c55497ec 1178 }
fdf9b3e8
FB
1179 return;
1180 case 0xc600: /* mov.l @(disp,GBR),R0 */
c55497ec 1181 {
a7812ae4 1182 TCGv addr = tcg_temp_new();
c55497ec 1183 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
03a0d87e 1184 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
c55497ec 1185 }
fdf9b3e8
FB
1186 return;
1187 case 0xc000: /* mov.b R0,@(disp,GBR) */
c55497ec 1188 {
a7812ae4 1189 TCGv addr = tcg_temp_new();
c55497ec 1190 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1191 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec 1192 }
fdf9b3e8
FB
1193 return;
1194 case 0xc100: /* mov.w R0,@(disp,GBR) */
c55497ec 1195 {
a7812ae4 1196 TCGv addr = tcg_temp_new();
c55497ec 1197 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
03a0d87e 1198 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
c55497ec 1199 }
fdf9b3e8
FB
1200 return;
1201 case 0xc200: /* mov.l R0,@(disp,GBR) */
c55497ec 1202 {
a7812ae4 1203 TCGv addr = tcg_temp_new();
c55497ec 1204 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
03a0d87e 1205 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
c55497ec 1206 }
fdf9b3e8
FB
1207 return;
1208 case 0x8000: /* mov.b R0,@(disp,Rn) */
c55497ec 1209 {
a7812ae4 1210 TCGv addr = tcg_temp_new();
c55497ec 1211 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1212 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec 1213 }
fdf9b3e8
FB
1214 return;
1215 case 0x8100: /* mov.w R0,@(disp,Rn) */
c55497ec 1216 {
a7812ae4 1217 TCGv addr = tcg_temp_new();
c55497ec 1218 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1219 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1220 MO_TEUW | UNALIGN(ctx));
c55497ec 1221 }
fdf9b3e8
FB
1222 return;
1223 case 0x8400: /* mov.b @(disp,Rn),R0 */
c55497ec 1224 {
a7812ae4 1225 TCGv addr = tcg_temp_new();
c55497ec 1226 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1227 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec 1228 }
fdf9b3e8
FB
1229 return;
1230 case 0x8500: /* mov.w @(disp,Rn),R0 */
c55497ec 1231 {
a7812ae4 1232 TCGv addr = tcg_temp_new();
c55497ec 1233 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1234 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1235 MO_TESW | UNALIGN(ctx));
c55497ec 1236 }
fdf9b3e8
FB
1237 return;
1238 case 0xc700: /* mova @(disp,PC),R0 */
6f1c2af6
RH
1239 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1240 4 + B7_0 * 4) & ~3);
fdf9b3e8
FB
1241 return;
1242 case 0xcb00: /* or #imm,R0 */
7efbe241 1243 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1244 return;
24988dc2 1245 case 0xcf00: /* or.b #imm,@(R0,GBR) */
c55497ec
AJ
1246 {
1247 TCGv addr, val;
a7812ae4 1248 addr = tcg_temp_new();
c55497ec 1249 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1250 val = tcg_temp_new();
3376f415 1251 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1252 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1253 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1254 }
fdf9b3e8
FB
1255 return;
1256 case 0xc300: /* trapa #imm */
c55497ec
AJ
1257 {
1258 TCGv imm;
1259 CHECK_NOT_DELAY_SLOT
ac9707ea 1260 gen_save_cpu_state(ctx, true);
950b91be 1261 imm = tcg_constant_i32(B7_0);
485d0035 1262 gen_helper_trapa(cpu_env, imm);
6f1c2af6 1263 ctx->base.is_jmp = DISAS_NORETURN;
c55497ec 1264 }
fdf9b3e8
FB
1265 return;
1266 case 0xc800: /* tst #imm,R0 */
c55497ec 1267 {
a7812ae4 1268 TCGv val = tcg_temp_new();
c55497ec 1269 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1270 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec 1271 }
fdf9b3e8 1272 return;
24988dc2 1273 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
c55497ec 1274 {
a7812ae4 1275 TCGv val = tcg_temp_new();
c55497ec 1276 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1277 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
c55497ec 1278 tcg_gen_andi_i32(val, val, B7_0);
34086945 1279 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec 1280 }
fdf9b3e8
FB
1281 return;
1282 case 0xca00: /* xor #imm,R0 */
7efbe241 1283 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1284 return;
24988dc2 1285 case 0xce00: /* xor.b #imm,@(R0,GBR) */
c55497ec
AJ
1286 {
1287 TCGv addr, val;
a7812ae4 1288 addr = tcg_temp_new();
c55497ec 1289 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1290 val = tcg_temp_new();
3376f415 1291 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1292 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1293 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1294 }
fdf9b3e8
FB
1295 return;
1296 }
1297
1298 switch (ctx->opcode & 0xf08f) {
1299 case 0x408e: /* ldc Rm,Rn_BANK */
fe25591e 1300 CHECK_PRIVILEGED
7efbe241 1301 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
fdf9b3e8
FB
1302 return;
1303 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
fe25591e 1304 CHECK_PRIVILEGED
03a0d87e
RH
1305 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1306 MO_TESL | MO_ALIGN);
7efbe241 1307 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
fdf9b3e8
FB
1308 return;
1309 case 0x0082: /* stc Rm_BANK,Rn */
fe25591e 1310 CHECK_PRIVILEGED
7efbe241 1311 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
fdf9b3e8
FB
1312 return;
1313 case 0x4083: /* stc.l Rm_BANK,@-Rn */
fe25591e 1314 CHECK_PRIVILEGED
c55497ec 1315 {
a7812ae4 1316 TCGv addr = tcg_temp_new();
c55497ec 1317 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e
RH
1318 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1319 MO_TEUL | MO_ALIGN);
3101e99c 1320 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1321 }
fdf9b3e8
FB
1322 return;
1323 }
1324
1325 switch (ctx->opcode & 0xf0ff) {
1326 case 0x0023: /* braf Rn */
7efbe241 1327 CHECK_NOT_DELAY_SLOT
6f1c2af6 1328 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
ab419fd8 1329 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1330 ctx->delayed_pc = (uint32_t) - 1;
1331 return;
1332 case 0x0003: /* bsrf Rn */
7efbe241 1333 CHECK_NOT_DELAY_SLOT
6f1c2af6 1334 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1335 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
ab419fd8 1336 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1337 ctx->delayed_pc = (uint32_t) - 1;
1338 return;
1339 case 0x4015: /* cmp/pl Rn */
34086945 1340 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1341 return;
1342 case 0x4011: /* cmp/pz Rn */
34086945 1343 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1344 return;
1345 case 0x4010: /* dt Rn */
7efbe241 1346 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1347 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1348 return;
1349 case 0x402b: /* jmp @Rn */
7efbe241
AJ
1350 CHECK_NOT_DELAY_SLOT
1351 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1352 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1353 ctx->delayed_pc = (uint32_t) - 1;
1354 return;
1355 case 0x400b: /* jsr @Rn */
7efbe241 1356 CHECK_NOT_DELAY_SLOT
6f1c2af6 1357 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1358 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1359 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1360 ctx->delayed_pc = (uint32_t) - 1;
1361 return;
fe25591e
AJ
1362 case 0x400e: /* ldc Rm,SR */
1363 CHECK_PRIVILEGED
34086945
AJ
1364 {
1365 TCGv val = tcg_temp_new();
1366 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1367 gen_write_sr(val);
6f1c2af6 1368 ctx->base.is_jmp = DISAS_STOP;
34086945 1369 }
390af821 1370 return;
fe25591e
AJ
1371 case 0x4007: /* ldc.l @Rm+,SR */
1372 CHECK_PRIVILEGED
c55497ec 1373 {
a7812ae4 1374 TCGv val = tcg_temp_new();
03a0d87e
RH
1375 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1376 MO_TESL | MO_ALIGN);
34086945
AJ
1377 tcg_gen_andi_i32(val, val, 0x700083f3);
1378 gen_write_sr(val);
c55497ec 1379 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
6f1c2af6 1380 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1381 }
390af821 1382 return;
fe25591e
AJ
1383 case 0x0002: /* stc SR,Rn */
1384 CHECK_PRIVILEGED
34086945 1385 gen_read_sr(REG(B11_8));
390af821 1386 return;
fe25591e
AJ
1387 case 0x4003: /* stc SR,@-Rn */
1388 CHECK_PRIVILEGED
c55497ec 1389 {
a7812ae4 1390 TCGv addr = tcg_temp_new();
34086945 1391 TCGv val = tcg_temp_new();
c55497ec 1392 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945 1393 gen_read_sr(val);
03a0d87e 1394 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
3101e99c 1395 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1396 }
390af821 1397 return;
8e9b0678 1398#define LD(reg,ldnum,ldpnum,prechk) \
fdf9b3e8 1399 case ldnum: \
fe25591e 1400 prechk \
7efbe241 1401 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
fdf9b3e8
FB
1402 return; \
1403 case ldpnum: \
fe25591e 1404 prechk \
03a0d87e
RH
1405 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
1406 MO_TESL | MO_ALIGN); \
7efbe241 1407 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678
AC
1408 return;
1409#define ST(reg,stnum,stpnum,prechk) \
fdf9b3e8 1410 case stnum: \
fe25591e 1411 prechk \
7efbe241 1412 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
fdf9b3e8
FB
1413 return; \
1414 case stpnum: \
fe25591e 1415 prechk \
c55497ec 1416 { \
3101e99c 1417 TCGv addr = tcg_temp_new(); \
c55497ec 1418 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
03a0d87e
RH
1419 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
1420 MO_TEUL | MO_ALIGN); \
3101e99c 1421 tcg_gen_mov_i32(REG(B11_8), addr); \
86e0abc7 1422 } \
fdf9b3e8 1423 return;
8e9b0678
AC
1424#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1425 LD(reg,ldnum,ldpnum,prechk) \
1426 ST(reg,stnum,stpnum,prechk)
fe25591e
AJ
1427 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1428 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1429 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1430 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
935fc175 1431 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1432 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
fe25591e
AJ
1433 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1434 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1435 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1436 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
d8299bcc 1437 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
390af821 1438 case 0x406a: /* lds Rm,FPSCR */
d8299bcc 1439 CHECK_FPU_ENABLED
485d0035 1440 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
6f1c2af6 1441 ctx->base.is_jmp = DISAS_STOP;
390af821
AJ
1442 return;
1443 case 0x4066: /* lds.l @Rm+,FPSCR */
d8299bcc 1444 CHECK_FPU_ENABLED
c55497ec 1445 {
a7812ae4 1446 TCGv addr = tcg_temp_new();
03a0d87e
RH
1447 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1448 MO_TESL | MO_ALIGN);
c55497ec 1449 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
485d0035 1450 gen_helper_ld_fpscr(cpu_env, addr);
6f1c2af6 1451 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1452 }
390af821
AJ
1453 return;
1454 case 0x006a: /* sts FPSCR,Rn */
d8299bcc 1455 CHECK_FPU_ENABLED
c55497ec 1456 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
390af821
AJ
1457 return;
1458 case 0x4062: /* sts FPSCR,@-Rn */
d8299bcc 1459 CHECK_FPU_ENABLED
c55497ec
AJ
1460 {
1461 TCGv addr, val;
a7812ae4 1462 val = tcg_temp_new();
c55497ec 1463 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
a7812ae4 1464 addr = tcg_temp_new();
c55497ec 1465 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e 1466 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
3101e99c 1467 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1468 }
390af821 1469 return;
fdf9b3e8 1470 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1471 {
1472 TCGv val = tcg_temp_new();
03a0d87e
RH
1473 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1474 MO_TEUL | MO_ALIGN);
485d0035 1475 gen_helper_movcal(cpu_env, REG(B11_8), val);
03a0d87e
RH
1476 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1477 MO_TEUL | MO_ALIGN);
852d481f
EI
1478 }
1479 ctx->has_movcal = 1;
fdf9b3e8 1480 return;
143021b2 1481 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1482 CHECK_SH4A
143021b2 1483 /* Load non-boundary-aligned data */
ccae24d4
RH
1484 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1485 MO_TEUL | MO_UNALN);
1486 return;
143021b2 1487 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1488 CHECK_SH4A
143021b2 1489 /* Load non-boundary-aligned data */
ccae24d4
RH
1490 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1491 MO_TEUL | MO_UNALN);
1492 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1493 return;
fdf9b3e8 1494 case 0x0029: /* movt Rn */
34086945 1495 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
fdf9b3e8 1496 return;
66c7c806
AJ
1497 case 0x0073:
1498 /* MOVCO.L
f85da308
RH
1499 * LDST -> T
1500 * If (T == 1) R0 -> (Rn)
1501 * 0 -> LDST
1502 *
1503 * The above description doesn't work in a parallel context.
1504 * Since we currently support no smp boards, this implies user-mode.
1505 * But we can still support the official mechanism while user-mode
1506 * is single-threaded. */
ccae24d4
RH
1507 CHECK_SH4A
1508 {
f85da308
RH
1509 TCGLabel *fail = gen_new_label();
1510 TCGLabel *done = gen_new_label();
1511
6f1c2af6 1512 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1513 TCGv tmp;
1514
1515 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1516 cpu_lock_addr, fail);
1517 tmp = tcg_temp_new();
1518 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
03a0d87e
RH
1519 REG(0), ctx->memidx,
1520 MO_TEUL | MO_ALIGN);
f85da308 1521 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
f85da308
RH
1522 } else {
1523 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
03a0d87e
RH
1524 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1525 MO_TEUL | MO_ALIGN);
f85da308
RH
1526 tcg_gen_movi_i32(cpu_sr_t, 1);
1527 }
1528 tcg_gen_br(done);
1529
1530 gen_set_label(fail);
1531 tcg_gen_movi_i32(cpu_sr_t, 0);
1532
1533 gen_set_label(done);
1534 tcg_gen_movi_i32(cpu_lock_addr, -1);
ccae24d4 1535 }
f85da308 1536 return;
66c7c806
AJ
1537 case 0x0063:
1538 /* MOVLI.L @Rm,R0
f85da308
RH
1539 * 1 -> LDST
1540 * (Rm) -> R0
1541 * When interrupt/exception
1542 * occurred 0 -> LDST
1543 *
1544 * In a parallel context, we must also save the loaded value
1545 * for use with the cmpxchg that we'll use with movco.l. */
ccae24d4 1546 CHECK_SH4A
6f1c2af6 1547 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1548 TCGv tmp = tcg_temp_new();
1549 tcg_gen_mov_i32(tmp, REG(B11_8));
03a0d87e
RH
1550 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1551 MO_TESL | MO_ALIGN);
f85da308
RH
1552 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1553 tcg_gen_mov_i32(cpu_lock_addr, tmp);
f85da308 1554 } else {
03a0d87e
RH
1555 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1556 MO_TESL | MO_ALIGN);
f85da308
RH
1557 tcg_gen_movi_i32(cpu_lock_addr, 0);
1558 }
ccae24d4 1559 return;
fdf9b3e8 1560 case 0x0093: /* ocbi @Rn */
c55497ec 1561 {
485d0035 1562 gen_helper_ocbi(cpu_env, REG(B11_8));
c55497ec 1563 }
fdf9b3e8 1564 return;
24988dc2 1565 case 0x00a3: /* ocbp @Rn */
fdf9b3e8 1566 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1567 /* These instructions are supposed to do nothing in case of
1568 a cache miss. Given that we only partially emulate caches
1569 it is safe to simply ignore them. */
fdf9b3e8
FB
1570 return;
1571 case 0x0083: /* pref @Rn */
1572 return;
71968fa6 1573 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1574 CHECK_SH4A
1575 return;
71968fa6 1576 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1577 CHECK_SH4A
1578 return;
71968fa6 1579 case 0x00ab: /* synco */
ccae24d4
RH
1580 CHECK_SH4A
1581 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1582 return;
fdf9b3e8 1583 case 0x4024: /* rotcl Rn */
c55497ec 1584 {
a7812ae4 1585 TCGv tmp = tcg_temp_new();
34086945
AJ
1586 tcg_gen_mov_i32(tmp, cpu_sr_t);
1587 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
c55497ec 1588 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1589 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec 1590 }
fdf9b3e8
FB
1591 return;
1592 case 0x4025: /* rotcr Rn */
c55497ec 1593 {
a7812ae4 1594 TCGv tmp = tcg_temp_new();
34086945
AJ
1595 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1596 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
c55497ec 1597 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1598 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec 1599 }
fdf9b3e8
FB
1600 return;
1601 case 0x4004: /* rotl Rn */
2411fde9 1602 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1603 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1604 return;
1605 case 0x4005: /* rotr Rn */
34086945 1606 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
2411fde9 1607 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1608 return;
1609 case 0x4000: /* shll Rn */
1610 case 0x4020: /* shal Rn */
34086945 1611 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
7efbe241 1612 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1613 return;
1614 case 0x4021: /* shar Rn */
34086945 1615 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1616 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1617 return;
1618 case 0x4001: /* shlr Rn */
34086945 1619 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1620 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1621 return;
1622 case 0x4008: /* shll2 Rn */
7efbe241 1623 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1624 return;
1625 case 0x4018: /* shll8 Rn */
7efbe241 1626 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1627 return;
1628 case 0x4028: /* shll16 Rn */
7efbe241 1629 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1630 return;
1631 case 0x4009: /* shlr2 Rn */
7efbe241 1632 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1633 return;
1634 case 0x4019: /* shlr8 Rn */
7efbe241 1635 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1636 return;
1637 case 0x4029: /* shlr16 Rn */
7efbe241 1638 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1639 return;
1640 case 0x401b: /* tas.b @Rn */
d3c2b2b3
RH
1641 tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1642 tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1643 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
cb32f179 1644 return;
e67888a7 1645 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
f6198371 1646 CHECK_FPU_ENABLED
7c9f7038 1647 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
eda9b09b 1648 return;
e67888a7 1649 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
f6198371 1650 CHECK_FPU_ENABLED
7c9f7038 1651 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
eda9b09b 1652 return;
e67888a7 1653 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
f6198371 1654 CHECK_FPU_ENABLED
a6215749 1655 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1656 TCGv_i64 fp;
93dc9c89
RH
1657 if (ctx->opcode & 0x0100) {
1658 goto do_illegal;
1659 }
a7812ae4 1660 fp = tcg_temp_new_i64();
485d0035 1661 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1662 gen_store_fpr64(ctx, fp, B11_8);
ea6cf6be
TS
1663 }
1664 else {
7c9f7038 1665 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
ea6cf6be
TS
1666 }
1667 return;
e67888a7 1668 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
f6198371 1669 CHECK_FPU_ENABLED
a6215749 1670 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1671 TCGv_i64 fp;
93dc9c89
RH
1672 if (ctx->opcode & 0x0100) {
1673 goto do_illegal;
1674 }
a7812ae4 1675 fp = tcg_temp_new_i64();
1e0b21d8 1676 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1677 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
ea6cf6be
TS
1678 }
1679 else {
7c9f7038 1680 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
ea6cf6be
TS
1681 }
1682 return;
24988dc2 1683 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
f6198371 1684 CHECK_FPU_ENABLED
7c9f7038 1685 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
24988dc2 1686 return;
57f5c1b0 1687 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
f6198371 1688 CHECK_FPU_ENABLED
7c9f7038 1689 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
24988dc2
AJ
1690 return;
1691 case 0xf06d: /* fsqrt FRn */
f6198371 1692 CHECK_FPU_ENABLED
a6215749 1693 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1694 if (ctx->opcode & 0x0100) {
1695 goto do_illegal;
1696 }
a7812ae4 1697 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1698 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1699 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1e0b21d8 1700 gen_store_fpr64(ctx, fp, B11_8);
24988dc2 1701 } else {
7c9f7038 1702 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2
AJ
1703 }
1704 return;
1705 case 0xf07d: /* fsrra FRn */
f6198371 1706 CHECK_FPU_ENABLED
11b7aa23
RH
1707 CHECK_FPSCR_PR_0
1708 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2 1709 break;
e67888a7 1710 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
f6198371 1711 CHECK_FPU_ENABLED
7e9f7ca8
RH
1712 CHECK_FPSCR_PR_0
1713 tcg_gen_movi_i32(FREG(B11_8), 0);
1714 return;
e67888a7 1715 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
f6198371 1716 CHECK_FPU_ENABLED
7e9f7ca8
RH
1717 CHECK_FPSCR_PR_0
1718 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1719 return;
24988dc2 1720 case 0xf0ad: /* fcnvsd FPUL,DRn */
f6198371 1721 CHECK_FPU_ENABLED
cc4ba6a9 1722 {
a7812ae4 1723 TCGv_i64 fp = tcg_temp_new_i64();
485d0035 1724 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1725 gen_store_fpr64(ctx, fp, B11_8);
cc4ba6a9 1726 }
24988dc2
AJ
1727 return;
1728 case 0xf0bd: /* fcnvds DRn,FPUL */
f6198371 1729 CHECK_FPU_ENABLED
cc4ba6a9 1730 {
a7812ae4 1731 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1732 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1733 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
cc4ba6a9 1734 }
24988dc2 1735 return;
af8c2bde
AJ
1736 case 0xf0ed: /* fipr FVm,FVn */
1737 CHECK_FPU_ENABLED
7e9f7ca8
RH
1738 CHECK_FPSCR_PR_1
1739 {
950b91be
RH
1740 TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1741 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
485d0035 1742 gen_helper_fipr(cpu_env, m, n);
af8c2bde
AJ
1743 return;
1744 }
1745 break;
17075f10
AJ
1746 case 0xf0fd: /* ftrv XMTRX,FVn */
1747 CHECK_FPU_ENABLED
7e9f7ca8
RH
1748 CHECK_FPSCR_PR_1
1749 {
1750 if ((ctx->opcode & 0x0300) != 0x0100) {
1751 goto do_illegal;
1752 }
950b91be 1753 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
485d0035 1754 gen_helper_ftrv(cpu_env, n);
17075f10
AJ
1755 return;
1756 }
1757 break;
fdf9b3e8 1758 }
bacc637a 1759#if 0
fdf9b3e8 1760 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
6f1c2af6 1761 ctx->opcode, ctx->base.pc_next);
bacc637a
AJ
1762 fflush(stderr);
1763#endif
6b98213d 1764 do_illegal:
ab419fd8 1765 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
dec16c6e
RH
1766 do_illegal_slot:
1767 gen_save_cpu_state(ctx, true);
485d0035 1768 gen_helper_raise_slot_illegal_instruction(cpu_env);
86865c5f 1769 } else {
dec16c6e 1770 gen_save_cpu_state(ctx, true);
485d0035 1771 gen_helper_raise_illegal_instruction(cpu_env);
86865c5f 1772 }
6f1c2af6 1773 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042
RH
1774 return;
1775
1776 do_fpu_disabled:
1777 gen_save_cpu_state(ctx, true);
ab419fd8 1778 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
dec4f042
RH
1779 gen_helper_raise_slot_fpu_disable(cpu_env);
1780 } else {
1781 gen_helper_raise_fpu_disable(cpu_env);
1782 }
6f1c2af6 1783 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042 1784 return;
823029f9
TS
1785}
1786
b1d8e52e 1787static void decode_opc(DisasContext * ctx)
823029f9 1788{
a6215749 1789 uint32_t old_flags = ctx->envflags;
823029f9
TS
1790
1791 _decode_opc(ctx);
1792
ab419fd8 1793 if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
39682608 1794 /* go out of the delay slot */
ab419fd8 1795 ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
4bfa602b
RH
1796
1797 /* When in an exclusive region, we must continue to the end
1798 for conditional branches. */
ab419fd8
RH
1799 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1800 && old_flags & TB_FLAG_DELAY_SLOT_COND) {
4bfa602b
RH
1801 gen_delayed_conditional_jump(ctx);
1802 return;
1803 }
1804 /* Otherwise this is probably an invalid gUSA region.
1805 Drop the GUSA bits so the next TB doesn't see them. */
ab419fd8 1806 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b 1807
ac9707ea 1808 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
ab419fd8 1809 if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
823029f9 1810 gen_delayed_conditional_jump(ctx);
be53081a 1811 } else {
823029f9
TS
1812 gen_jump(ctx);
1813 }
4bfa602b
RH
1814 }
1815}
1816
1817#ifdef CONFIG_USER_ONLY
1818/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1819 Upon an interrupt, a real kernel would simply notice magic values in
1820 the registers and reset the PC to the start of the sequence.
1821
1822 For QEMU, we cannot do this in quite the same way. Instead, we notice
1823 the normal start of such a sequence (mov #-x,r15). While we can handle
1824 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1825 sequences and transform them into atomic operations as seen by the host.
1826*/
be0e3d7a 1827static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
4bfa602b 1828{
d6a6cffd
RH
1829 uint16_t insns[5];
1830 int ld_adr, ld_dst, ld_mop;
1831 int op_dst, op_src, op_opc;
1832 int mv_src, mt_dst, st_src, st_mop;
1833 TCGv op_arg;
6f1c2af6
RH
1834 uint32_t pc = ctx->base.pc_next;
1835 uint32_t pc_end = ctx->base.tb->cs_base;
4bfa602b 1836 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1837 int i;
4bfa602b 1838
d6a6cffd
RH
1839 /* The state machine below will consume only a few insns.
1840 If there are more than that in a region, fail now. */
1841 if (max_insns > ARRAY_SIZE(insns)) {
1842 goto fail;
1843 }
1844
1845 /* Read all of the insns for the region. */
1846 for (i = 0; i < max_insns; ++i) {
4e116893 1847 insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
d6a6cffd
RH
1848 }
1849
1850 ld_adr = ld_dst = ld_mop = -1;
1851 mv_src = -1;
1852 op_dst = op_src = op_opc = -1;
1853 mt_dst = -1;
1854 st_src = st_mop = -1;
f764718d 1855 op_arg = NULL;
d6a6cffd
RH
1856 i = 0;
1857
1858#define NEXT_INSN \
1859 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1860
1861 /*
1862 * Expect a load to begin the region.
1863 */
1864 NEXT_INSN;
1865 switch (ctx->opcode & 0xf00f) {
1866 case 0x6000: /* mov.b @Rm,Rn */
1867 ld_mop = MO_SB;
1868 break;
1869 case 0x6001: /* mov.w @Rm,Rn */
1870 ld_mop = MO_TESW;
1871 break;
1872 case 0x6002: /* mov.l @Rm,Rn */
1873 ld_mop = MO_TESL;
1874 break;
1875 default:
1876 goto fail;
1877 }
1878 ld_adr = B7_4;
1879 ld_dst = B11_8;
1880 if (ld_adr == ld_dst) {
1881 goto fail;
1882 }
1883 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1884 op_dst = ld_dst;
1885
1886 /*
1887 * Expect an optional register move.
1888 */
1889 NEXT_INSN;
1890 switch (ctx->opcode & 0xf00f) {
1891 case 0x6003: /* mov Rm,Rn */
02b8e735 1892 /*
23b5d9fa 1893 * Here we want to recognize ld_dst being saved for later consumption,
02b8e735
PMD
1894 * or for another input register being copied so that ld_dst need not
1895 * be clobbered during the operation.
1896 */
d6a6cffd
RH
1897 op_dst = B11_8;
1898 mv_src = B7_4;
1899 if (op_dst == ld_dst) {
1900 /* Overwriting the load output. */
1901 goto fail;
1902 }
1903 if (mv_src != ld_dst) {
1904 /* Copying a new input; constrain op_src to match the load. */
1905 op_src = ld_dst;
1906 }
1907 break;
1908
1909 default:
1910 /* Put back and re-examine as operation. */
1911 --i;
1912 }
1913
1914 /*
1915 * Expect the operation.
1916 */
1917 NEXT_INSN;
1918 switch (ctx->opcode & 0xf00f) {
1919 case 0x300c: /* add Rm,Rn */
1920 op_opc = INDEX_op_add_i32;
1921 goto do_reg_op;
1922 case 0x2009: /* and Rm,Rn */
1923 op_opc = INDEX_op_and_i32;
1924 goto do_reg_op;
1925 case 0x200a: /* xor Rm,Rn */
1926 op_opc = INDEX_op_xor_i32;
1927 goto do_reg_op;
1928 case 0x200b: /* or Rm,Rn */
1929 op_opc = INDEX_op_or_i32;
1930 do_reg_op:
1931 /* The operation register should be as expected, and the
1932 other input cannot depend on the load. */
1933 if (op_dst != B11_8) {
1934 goto fail;
1935 }
1936 if (op_src < 0) {
1937 /* Unconstrainted input. */
1938 op_src = B7_4;
1939 } else if (op_src == B7_4) {
1940 /* Constrained input matched load. All operations are
1941 commutative; "swap" them by "moving" the load output
1942 to the (implicit) first argument and the move source
1943 to the (explicit) second argument. */
1944 op_src = mv_src;
1945 } else {
1946 goto fail;
1947 }
1948 op_arg = REG(op_src);
1949 break;
1950
1951 case 0x6007: /* not Rm,Rn */
1952 if (ld_dst != B7_4 || mv_src >= 0) {
1953 goto fail;
1954 }
1955 op_dst = B11_8;
1956 op_opc = INDEX_op_xor_i32;
950b91be 1957 op_arg = tcg_constant_i32(-1);
d6a6cffd
RH
1958 break;
1959
1960 case 0x7000 ... 0x700f: /* add #imm,Rn */
1961 if (op_dst != B11_8 || mv_src >= 0) {
1962 goto fail;
1963 }
1964 op_opc = INDEX_op_add_i32;
950b91be 1965 op_arg = tcg_constant_i32(B7_0s);
d6a6cffd
RH
1966 break;
1967
1968 case 0x3000: /* cmp/eq Rm,Rn */
1969 /* Looking for the middle of a compare-and-swap sequence,
1970 beginning with the compare. Operands can be either order,
1971 but with only one overlapping the load. */
1972 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1973 goto fail;
1974 }
1975 op_opc = INDEX_op_setcond_i32; /* placeholder */
1976 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1977 op_arg = REG(op_src);
1978
1979 NEXT_INSN;
1980 switch (ctx->opcode & 0xff00) {
1981 case 0x8b00: /* bf label */
1982 case 0x8f00: /* bf/s label */
1983 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
1984 goto fail;
1985 }
1986 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
1987 break;
1988 }
1989 /* We're looking to unconditionally modify Rn with the
1990 result of the comparison, within the delay slot of
1991 the branch. This is used by older gcc. */
1992 NEXT_INSN;
1993 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
1994 mt_dst = B11_8;
1995 } else {
1996 goto fail;
1997 }
1998 break;
1999
2000 default:
2001 goto fail;
2002 }
2003 break;
2004
2005 case 0x2008: /* tst Rm,Rn */
2006 /* Looking for a compare-and-swap against zero. */
2007 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2008 goto fail;
2009 }
2010 op_opc = INDEX_op_setcond_i32;
950b91be 2011 op_arg = tcg_constant_i32(0);
d6a6cffd
RH
2012
2013 NEXT_INSN;
2014 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2015 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2016 goto fail;
2017 }
2018 break;
2019
2020 default:
2021 /* Put back and re-examine as store. */
2022 --i;
2023 }
2024
2025 /*
2026 * Expect the store.
2027 */
2028 /* The store must be the last insn. */
2029 if (i != max_insns - 1) {
2030 goto fail;
2031 }
2032 NEXT_INSN;
2033 switch (ctx->opcode & 0xf00f) {
2034 case 0x2000: /* mov.b Rm,@Rn */
2035 st_mop = MO_UB;
2036 break;
2037 case 0x2001: /* mov.w Rm,@Rn */
2038 st_mop = MO_UW;
2039 break;
2040 case 0x2002: /* mov.l Rm,@Rn */
2041 st_mop = MO_UL;
2042 break;
2043 default:
2044 goto fail;
2045 }
2046 /* The store must match the load. */
2047 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2048 goto fail;
2049 }
2050 st_src = B7_4;
2051
2052#undef NEXT_INSN
2053
2054 /*
2055 * Emit the operation.
2056 */
d6a6cffd
RH
2057 switch (op_opc) {
2058 case -1:
2059 /* No operation found. Look for exchange pattern. */
2060 if (st_src == ld_dst || mv_src >= 0) {
2061 goto fail;
2062 }
2063 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2064 ctx->memidx, ld_mop);
2065 break;
2066
2067 case INDEX_op_add_i32:
2068 if (op_dst != st_src) {
2069 goto fail;
2070 }
2071 if (op_dst == ld_dst && st_mop == MO_UL) {
2072 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2073 op_arg, ctx->memidx, ld_mop);
2074 } else {
2075 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2076 op_arg, ctx->memidx, ld_mop);
2077 if (op_dst != ld_dst) {
2078 /* Note that mop sizes < 4 cannot use add_fetch
2079 because it won't carry into the higher bits. */
2080 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2081 }
2082 }
2083 break;
2084
2085 case INDEX_op_and_i32:
2086 if (op_dst != st_src) {
2087 goto fail;
2088 }
2089 if (op_dst == ld_dst) {
2090 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2091 op_arg, ctx->memidx, ld_mop);
2092 } else {
2093 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2094 op_arg, ctx->memidx, ld_mop);
2095 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2096 }
2097 break;
2098
2099 case INDEX_op_or_i32:
2100 if (op_dst != st_src) {
2101 goto fail;
2102 }
2103 if (op_dst == ld_dst) {
2104 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2105 op_arg, ctx->memidx, ld_mop);
2106 } else {
2107 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2108 op_arg, ctx->memidx, ld_mop);
2109 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2110 }
2111 break;
2112
2113 case INDEX_op_xor_i32:
2114 if (op_dst != st_src) {
2115 goto fail;
2116 }
2117 if (op_dst == ld_dst) {
2118 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2119 op_arg, ctx->memidx, ld_mop);
2120 } else {
2121 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2122 op_arg, ctx->memidx, ld_mop);
2123 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2124 }
2125 break;
2126
2127 case INDEX_op_setcond_i32:
2128 if (st_src == ld_dst) {
2129 goto fail;
2130 }
2131 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2132 REG(st_src), ctx->memidx, ld_mop);
2133 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2134 if (mt_dst >= 0) {
2135 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2136 }
2137 break;
2138
2139 default:
2140 g_assert_not_reached();
2141 }
2142
d6a6cffd 2143 /* The entire region has been translated. */
ab419fd8 2144 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
6f1c2af6 2145 ctx->base.pc_next = pc_end;
be0e3d7a
RH
2146 ctx->base.num_insns += max_insns - 1;
2147 return;
d6a6cffd
RH
2148
2149 fail:
4bfa602b
RH
2150 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2151 pc, pc_end);
2152
2153 /* Restart with the EXCLUSIVE bit set, within a TB run via
2154 cpu_exec_step_atomic holding the exclusive lock. */
ab419fd8 2155 ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
4bfa602b
RH
2156 gen_save_cpu_state(ctx, false);
2157 gen_helper_exclusive(cpu_env);
6f1c2af6 2158 ctx->base.is_jmp = DISAS_NORETURN;
4bfa602b
RH
2159
2160 /* We're not executing an instruction, but we must report one for the
2161 purposes of accounting within the TB. We might as well report the
6f1c2af6
RH
2162 entire region consumed via ctx->base.pc_next so that it's immediately
2163 available in the disassembly dump. */
2164 ctx->base.pc_next = pc_end;
be0e3d7a 2165 ctx->base.num_insns += max_insns - 1;
fdf9b3e8 2166}
4bfa602b 2167#endif
fdf9b3e8 2168
fd1b3d38 2169static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
fdf9b3e8 2170{
fd1b3d38 2171 DisasContext *ctx = container_of(dcbase, DisasContext, base);
9c489ea6 2172 CPUSH4State *env = cs->env_ptr;
be0e3d7a 2173 uint32_t tbflags;
fd1b3d38
EC
2174 int bound;
2175
be0e3d7a
RH
2176 ctx->tbflags = tbflags = ctx->base.tb->flags;
2177 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2178 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2179 /* We don't know if the delayed pc came from a dynamic or static branch,
2180 so assume it is a dynamic branch. */
fd1b3d38
EC
2181 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2182 ctx->features = env->features;
be0e3d7a
RH
2183 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2184 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2185 (tbflags & (1 << SR_RB))) * 0x10;
2186 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2187
ab419fd8
RH
2188#ifdef CONFIG_USER_ONLY
2189 if (tbflags & TB_FLAG_GUSA_MASK) {
2190 /* In gUSA exclusive region. */
be0e3d7a
RH
2191 uint32_t pc = ctx->base.pc_next;
2192 uint32_t pc_end = ctx->base.tb->cs_base;
ab419fd8 2193 int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
be0e3d7a
RH
2194 int max_insns = (pc_end - pc) / 2;
2195
2196 if (pc != pc_end + backup || max_insns < 2) {
2197 /* This is a malformed gUSA region. Don't do anything special,
2198 since the interpreter is likely to get confused. */
ab419fd8
RH
2199 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2200 } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
be0e3d7a
RH
2201 /* Regardless of single-stepping or the end of the page,
2202 we must complete execution of the gUSA region while
2203 holding the exclusive lock. */
2204 ctx->base.max_insns = max_insns;
2205 return;
2206 }
2207 }
ab419fd8 2208#endif
4448a836
RH
2209
2210 /* Since the ISA is fixed-width, we can bound by the number
2211 of instructions remaining on the page. */
fd1b3d38
EC
2212 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2213 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2214}
4448a836 2215
fd1b3d38
EC
2216static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2217{
fd1b3d38 2218}
4bfa602b 2219
fd1b3d38
EC
2220static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2221{
2222 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2223
fd1b3d38
EC
2224 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2225}
b933066a 2226
fd1b3d38
EC
2227static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2228{
2229 CPUSH4State *env = cs->env_ptr;
2230 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4bfa602b 2231
be0e3d7a 2232#ifdef CONFIG_USER_ONLY
ab419fd8
RH
2233 if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2234 && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
be0e3d7a
RH
2235 /* We're in an gUSA region, and we have not already fallen
2236 back on using an exclusive region. Attempt to parse the
2237 region into a single supported atomic operation. Failure
2238 is handled within the parser by raising an exception to
2239 retry using an exclusive region. */
2240 decode_gusa(ctx, env);
2241 return;
2242 }
2243#endif
2244
4e116893 2245 ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
fd1b3d38
EC
2246 decode_opc(ctx);
2247 ctx->base.pc_next += 2;
2248}
2249
2250static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2251{
2252 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2253
ab419fd8 2254 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b 2255 /* Ending the region of exclusivity. Clear the bits. */
ab419fd8 2256 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b
RH
2257 }
2258
fd1b3d38 2259 switch (ctx->base.is_jmp) {
34cf5678 2260 case DISAS_STOP:
fd1b3d38 2261 gen_save_cpu_state(ctx, true);
52df5adc 2262 tcg_gen_exit_tb(NULL, 0);
34cf5678
RH
2263 break;
2264 case DISAS_NEXT:
fd1b3d38
EC
2265 case DISAS_TOO_MANY:
2266 gen_save_cpu_state(ctx, false);
2267 gen_goto_tb(ctx, 0, ctx->base.pc_next);
34cf5678
RH
2268 break;
2269 case DISAS_NORETURN:
2270 break;
2271 default:
2272 g_assert_not_reached();
fdf9b3e8 2273 }
fd1b3d38 2274}
823029f9 2275
8eb806a7
RH
2276static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2277 CPUState *cs, FILE *logfile)
fd1b3d38 2278{
8eb806a7
RH
2279 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2280 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
fd1b3d38 2281}
0a7df5da 2282
fd1b3d38
EC
2283static const TranslatorOps sh4_tr_ops = {
2284 .init_disas_context = sh4_tr_init_disas_context,
2285 .tb_start = sh4_tr_tb_start,
2286 .insn_start = sh4_tr_insn_start,
fd1b3d38
EC
2287 .translate_insn = sh4_tr_translate_insn,
2288 .tb_stop = sh4_tr_tb_stop,
2289 .disas_log = sh4_tr_disas_log,
2290};
2291
597f9b2d 2292void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 2293 target_ulong pc, void *host_pc)
fd1b3d38
EC
2294{
2295 DisasContext ctx;
fdf9b3e8 2296
306c8721 2297 translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
fdf9b3e8 2298}