]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
qemu-print: New qemu_fprintf(), qemu_vfprintf()
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8
FB
19
20#define DEBUG_DISAS
fdf9b3e8 21
9d4c9946 22#include "qemu/osdep.h"
fdf9b3e8 23#include "cpu.h"
76cad711 24#include "disas/disas.h"
63c91552 25#include "exec/exec-all.h"
57fec1fe 26#include "tcg-op.h"
f08b6170 27#include "exec/cpu_ldst.h"
2ef6175a
RH
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
4834871b 30#include "exec/translator.h"
a7e30d84 31#include "trace-tcg.h"
508127e2 32#include "exec/log.h"
a7e30d84
LV
33
34
fdf9b3e8 35typedef struct DisasContext {
6f1c2af6
RH
36 DisasContextBase base;
37
38 uint32_t tbflags; /* should stay unmodified during the TB translation */
39 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
fdf9b3e8 40 int memidx;
3a3bb8d2 41 int gbank;
5c13bad9 42 int fbank;
fdf9b3e8 43 uint32_t delayed_pc;
71968fa6 44 uint32_t features;
6f1c2af6
RH
45
46 uint16_t opcode;
47
48 bool has_movcal;
fdf9b3e8
FB
49} DisasContext;
50
fe25591e
AJ
51#if defined(CONFIG_USER_ONLY)
52#define IS_USER(ctx) 1
53#else
a6215749 54#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
fe25591e
AJ
55#endif
56
6f1c2af6 57/* Target-specific values for ctx->base.is_jmp. */
4834871b
RH
58/* We want to exit back to the cpu loop for some reason.
59 Usually this is to recognize interrupts immediately. */
60#define DISAS_STOP DISAS_TARGET_0
823029f9 61
1e8864f7 62/* global register indexes */
3a3bb8d2 63static TCGv cpu_gregs[32];
1d565b21
AJ
64static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
65static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 66static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
f85da308
RH
67static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
68static TCGv cpu_lock_addr, cpu_lock_value;
66ba317c 69static TCGv cpu_fregs[32];
1000822b
AJ
70
71/* internal register indexes */
47b9f4d5 72static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 73
022c62cb 74#include "exec/gen-icount.h"
2e70f6ef 75
aa7408ec 76void sh4_translate_init(void)
2e70f6ef 77{
1e8864f7 78 int i;
559dd74d 79 static const char * const gregnames[24] = {
1e8864f7
AJ
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 };
66ba317c
AJ
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
95 };
1e8864f7 96
3a3bb8d2 97 for (i = 0; i < 24; i++) {
e1ccc054 98 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 99 offsetof(CPUSH4State, gregs[i]),
66ba317c 100 gregnames[i]);
3a3bb8d2
RH
101 }
102 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 103
e1ccc054 104 cpu_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 105 offsetof(CPUSH4State, pc), "PC");
e1ccc054 106 cpu_sr = tcg_global_mem_new_i32(cpu_env,
73e5716c 107 offsetof(CPUSH4State, sr), "SR");
e1ccc054
RH
108 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
109 offsetof(CPUSH4State, sr_m), "SR_M");
110 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
111 offsetof(CPUSH4State, sr_q), "SR_Q");
112 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
113 offsetof(CPUSH4State, sr_t), "SR_T");
114 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
73e5716c 115 offsetof(CPUSH4State, ssr), "SSR");
e1ccc054 116 cpu_spc = tcg_global_mem_new_i32(cpu_env,
73e5716c 117 offsetof(CPUSH4State, spc), "SPC");
e1ccc054 118 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 119 offsetof(CPUSH4State, gbr), "GBR");
e1ccc054 120 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 121 offsetof(CPUSH4State, vbr), "VBR");
e1ccc054 122 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
73e5716c 123 offsetof(CPUSH4State, sgr), "SGR");
e1ccc054 124 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 125 offsetof(CPUSH4State, dbr), "DBR");
e1ccc054 126 cpu_mach = tcg_global_mem_new_i32(cpu_env,
73e5716c 127 offsetof(CPUSH4State, mach), "MACH");
e1ccc054 128 cpu_macl = tcg_global_mem_new_i32(cpu_env,
73e5716c 129 offsetof(CPUSH4State, macl), "MACL");
e1ccc054 130 cpu_pr = tcg_global_mem_new_i32(cpu_env,
73e5716c 131 offsetof(CPUSH4State, pr), "PR");
e1ccc054 132 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
73e5716c 133 offsetof(CPUSH4State, fpscr), "FPSCR");
e1ccc054 134 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
73e5716c 135 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 136
e1ccc054 137 cpu_flags = tcg_global_mem_new_i32(cpu_env,
73e5716c 138 offsetof(CPUSH4State, flags), "_flags_");
e1ccc054 139 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 140 offsetof(CPUSH4State, delayed_pc),
a7812ae4 141 "_delayed_pc_");
47b9f4d5
AJ
142 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
143 offsetof(CPUSH4State,
144 delayed_cond),
145 "_delayed_cond_");
f85da308
RH
146 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
147 offsetof(CPUSH4State, lock_addr),
148 "_lock_addr_");
149 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
150 offsetof(CPUSH4State, lock_value),
151 "_lock_value_");
1000822b 152
66ba317c 153 for (i = 0; i < 32; i++)
e1ccc054 154 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 155 offsetof(CPUSH4State, fregs[i]),
66ba317c 156 fregnames[i]);
2e70f6ef
PB
157}
158
878096ee
AF
159void superh_cpu_dump_state(CPUState *cs, FILE *f,
160 fprintf_function cpu_fprintf, int flags)
fdf9b3e8 161{
878096ee
AF
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
fdf9b3e8 164 int i;
eda9b09b 165 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
34086945 166 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
274a9e70
AJ
167 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
168 env->spc, env->ssr, env->gbr, env->vbr);
169 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
170 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8
FB
171 for (i = 0; i < 24; i += 4) {
172 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
173 i, env->gregs[i], i + 1, env->gregs[i + 1],
174 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
175 }
176 if (env->flags & DELAY_SLOT) {
177 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
178 env->delayed_pc);
179 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
180 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
181 env->delayed_pc);
be53081a
AJ
182 } else if (env->flags & DELAY_SLOT_RTE) {
183 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
184 env->delayed_pc);
fdf9b3e8
FB
185 }
186}
187
34086945
AJ
188static void gen_read_sr(TCGv dst)
189{
1d565b21
AJ
190 TCGv t0 = tcg_temp_new();
191 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
192 tcg_gen_or_i32(dst, dst, t0);
193 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
194 tcg_gen_or_i32(dst, dst, t0);
195 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
196 tcg_gen_or_i32(dst, cpu_sr, t0);
197 tcg_temp_free_i32(t0);
34086945
AJ
198}
199
200static void gen_write_sr(TCGv src)
201{
1d565b21
AJ
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
207}
208
ac9707ea
AJ
209static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
210{
211 if (save_pc) {
6f1c2af6 212 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
ac9707ea
AJ
213 }
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
216 }
e1933d14 217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
219 }
220}
221
ec2eb22e
RH
222static inline bool use_exit_tb(DisasContext *ctx)
223{
224 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
225}
226
90aa39a1 227static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 228{
ec2eb22e 229 /* Use a direct jump if in same page and singlestep not enabled */
6f1c2af6 230 if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
4bfa602b
RH
231 return false;
232 }
90aa39a1 233#ifndef CONFIG_USER_ONLY
6f1c2af6 234 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
235#else
236 return true;
237#endif
238}
fdf9b3e8 239
90aa39a1
SF
240static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
241{
242 if (use_goto_tb(ctx, dest)) {
57fec1fe 243 tcg_gen_goto_tb(n);
3a8a44c4 244 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 245 tcg_gen_exit_tb(ctx->base.tb, n);
fdf9b3e8 246 } else {
3a8a44c4 247 tcg_gen_movi_i32(cpu_pc, dest);
6f1c2af6 248 if (ctx->base.singlestep_enabled) {
485d0035 249 gen_helper_debug(cpu_env);
ec2eb22e 250 } else if (use_exit_tb(ctx)) {
07ea28b4 251 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 252 } else {
7f11636d 253 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 254 }
fdf9b3e8 255 }
6f1c2af6 256 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
257}
258
fdf9b3e8
FB
259static void gen_jump(DisasContext * ctx)
260{
ec2eb22e 261 if (ctx->delayed_pc == -1) {
fdf9b3e8
FB
262 /* Target is not statically known, it comes necessarily from a
263 delayed jump as immediate jump are conditinal jumps */
1000822b 264 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 265 tcg_gen_discard_i32(cpu_delayed_pc);
6f1c2af6 266 if (ctx->base.singlestep_enabled) {
485d0035 267 gen_helper_debug(cpu_env);
ec2eb22e 268 } else if (use_exit_tb(ctx)) {
07ea28b4 269 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 270 } else {
7f11636d 271 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 272 }
6f1c2af6 273 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
274 } else {
275 gen_goto_tb(ctx, 0, ctx->delayed_pc);
276 }
277}
278
279/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
280static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
281 bool jump_if_true)
fdf9b3e8 282{
34086945 283 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
284 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
285
286 if (ctx->tbflags & GUSA_EXCLUSIVE) {
287 /* When in an exclusive region, we must continue to the end.
288 Therefore, exit the region on a taken branch, but otherwise
289 fall through to the next instruction. */
290 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
291 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
292 /* Note that this won't actually use a goto_tb opcode because we
293 disallow it in use_goto_tb, but it handles exit + singlestep. */
294 gen_goto_tb(ctx, 0, dest);
295 gen_set_label(l1);
5b38d026 296 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
297 return;
298 }
299
ac9707ea 300 gen_save_cpu_state(ctx, false);
4bfa602b
RH
301 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
302 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 303 gen_set_label(l1);
6f1c2af6
RH
304 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
305 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
306}
307
308/* Delayed conditional jump (bt or bf) */
309static void gen_delayed_conditional_jump(DisasContext * ctx)
310{
4bfa602b
RH
311 TCGLabel *l1 = gen_new_label();
312 TCGv ds = tcg_temp_new();
fdf9b3e8 313
47b9f4d5
AJ
314 tcg_gen_mov_i32(ds, cpu_delayed_cond);
315 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b
RH
316
317 if (ctx->tbflags & GUSA_EXCLUSIVE) {
318 /* When in an exclusive region, we must continue to the end.
319 Therefore, exit the region on a taken branch, but otherwise
320 fall through to the next instruction. */
321 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
322
323 /* Leave the gUSA region. */
324 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
325 gen_jump(ctx);
326
327 gen_set_label(l1);
6f1c2af6 328 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
329 return;
330 }
331
6f396c8f 332 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
6f1c2af6 333 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
fdf9b3e8 334 gen_set_label(l1);
9c2a9ea1 335 gen_jump(ctx);
fdf9b3e8
FB
336}
337
e5d8053e 338static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 339{
1e0b21d8
RH
340 /* We have already signaled illegal instruction for odd Dr. */
341 tcg_debug_assert((reg & 1) == 0);
342 reg ^= ctx->fbank;
66ba317c 343 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
344}
345
e5d8053e 346static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 347{
1e0b21d8
RH
348 /* We have already signaled illegal instruction for odd Dr. */
349 tcg_debug_assert((reg & 1) == 0);
350 reg ^= ctx->fbank;
58d2a9ae 351 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
352}
353
fdf9b3e8
FB
354#define B3_0 (ctx->opcode & 0xf)
355#define B6_4 ((ctx->opcode >> 4) & 0x7)
356#define B7_4 ((ctx->opcode >> 4) & 0xf)
357#define B7_0 (ctx->opcode & 0xff)
358#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
359#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
360 (ctx->opcode & 0xfff))
361#define B11_8 ((ctx->opcode >> 8) & 0xf)
362#define B15_12 ((ctx->opcode >> 12) & 0xf)
363
3a3bb8d2
RH
364#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
365#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 366#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 367
f09111e0 368#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 369
fdf9b3e8 370#define CHECK_NOT_DELAY_SLOT \
dec16c6e
RH
371 if (ctx->envflags & DELAY_SLOT_MASK) { \
372 goto do_illegal_slot; \
a6215749
AJ
373 }
374
6b98213d
RH
375#define CHECK_PRIVILEGED \
376 if (IS_USER(ctx)) { \
377 goto do_illegal; \
a6215749
AJ
378 }
379
dec4f042
RH
380#define CHECK_FPU_ENABLED \
381 if (ctx->tbflags & (1u << SR_FD)) { \
382 goto do_fpu_disabled; \
a6215749 383 }
d8299bcc 384
7e9f7ca8
RH
385#define CHECK_FPSCR_PR_0 \
386 if (ctx->tbflags & FPSCR_PR) { \
387 goto do_illegal; \
388 }
389
390#define CHECK_FPSCR_PR_1 \
391 if (!(ctx->tbflags & FPSCR_PR)) { \
392 goto do_illegal; \
393 }
394
ccae24d4
RH
395#define CHECK_SH4A \
396 if (!(ctx->features & SH_FEATURE_SH4A)) { \
397 goto do_illegal; \
398 }
399
b1d8e52e 400static void _decode_opc(DisasContext * ctx)
fdf9b3e8 401{
852d481f
EI
402 /* This code tries to make movcal emulation sufficiently
403 accurate for Linux purposes. This instruction writes
404 memory, and prior to that, always allocates a cache line.
405 It is used in two contexts:
406 - in memcpy, where data is copied in blocks, the first write
407 of to a block uses movca.l for performance.
408 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
409 to flush the cache. Here, the data written by movcal.l is never
410 written to memory, and the data written is just bogus.
411
412 To simulate this, we simulate movcal.l, we store the value to memory,
413 but we also remember the previous content. If we see ocbi, we check
414 if movcal.l for that address was done previously. If so, the write should
415 not have hit the memory, so we restore the previous content.
416 When we see an instruction that is neither movca.l
417 nor ocbi, the previous content is discarded.
418
419 To optimize, we only try to flush stores when we're at the start of
420 TB, or if we already saw movca.l in this TB and did not flush stores
421 yet. */
422 if (ctx->has_movcal)
423 {
424 int opcode = ctx->opcode & 0xf0ff;
425 if (opcode != 0x0093 /* ocbi */
426 && opcode != 0x00c3 /* movca.l */)
427 {
485d0035 428 gen_helper_discard_movcal_backup(cpu_env);
852d481f
EI
429 ctx->has_movcal = 0;
430 }
431 }
432
fdf9b3e8
FB
433#if 0
434 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
435#endif
f6198371 436
fdf9b3e8
FB
437 switch (ctx->opcode) {
438 case 0x0019: /* div0u */
1d565b21
AJ
439 tcg_gen_movi_i32(cpu_sr_m, 0);
440 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 441 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
442 return;
443 case 0x000b: /* rts */
1000822b
AJ
444 CHECK_NOT_DELAY_SLOT
445 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
a6215749 446 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
447 ctx->delayed_pc = (uint32_t) - 1;
448 return;
449 case 0x0028: /* clrmac */
3a8a44c4
AJ
450 tcg_gen_movi_i32(cpu_mach, 0);
451 tcg_gen_movi_i32(cpu_macl, 0);
fdf9b3e8
FB
452 return;
453 case 0x0048: /* clrs */
5ed9a259 454 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
fdf9b3e8
FB
455 return;
456 case 0x0008: /* clrt */
34086945 457 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
458 return;
459 case 0x0038: /* ldtlb */
fe25591e 460 CHECK_PRIVILEGED
485d0035 461 gen_helper_ldtlb(cpu_env);
fdf9b3e8 462 return;
c5e814b2 463 case 0x002b: /* rte */
fe25591e 464 CHECK_PRIVILEGED
1000822b 465 CHECK_NOT_DELAY_SLOT
34086945 466 gen_write_sr(cpu_ssr);
1000822b 467 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
be53081a 468 ctx->envflags |= DELAY_SLOT_RTE;
fdf9b3e8 469 ctx->delayed_pc = (uint32_t) - 1;
6f1c2af6 470 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8
FB
471 return;
472 case 0x0058: /* sets */
5ed9a259 473 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
fdf9b3e8
FB
474 return;
475 case 0x0018: /* sett */
34086945 476 tcg_gen_movi_i32(cpu_sr_t, 1);
fdf9b3e8 477 return;
24988dc2 478 case 0xfbfd: /* frchg */
61dedf2a 479 CHECK_FPSCR_PR_0
6f06939b 480 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
6f1c2af6 481 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 482 return;
24988dc2 483 case 0xf3fd: /* fschg */
61dedf2a 484 CHECK_FPSCR_PR_0
7a64244f 485 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
6f1c2af6 486 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 487 return;
907759f9
RH
488 case 0xf7fd: /* fpchg */
489 CHECK_SH4A
490 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
6f1c2af6 491 ctx->base.is_jmp = DISAS_STOP;
907759f9 492 return;
fdf9b3e8
FB
493 case 0x0009: /* nop */
494 return;
495 case 0x001b: /* sleep */
fe25591e 496 CHECK_PRIVILEGED
6f1c2af6 497 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
10127400 498 gen_helper_sleep(cpu_env);
fdf9b3e8
FB
499 return;
500 }
501
502 switch (ctx->opcode & 0xf000) {
503 case 0x1000: /* mov.l Rm,@(disp,Rn) */
c55497ec 504 {
a7812ae4 505 TCGv addr = tcg_temp_new();
c55497ec 506 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
3376f415 507 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
508 tcg_temp_free(addr);
509 }
fdf9b3e8
FB
510 return;
511 case 0x5000: /* mov.l @(disp,Rm),Rn */
c55497ec 512 {
a7812ae4 513 TCGv addr = tcg_temp_new();
c55497ec 514 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
3376f415 515 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
516 tcg_temp_free(addr);
517 }
fdf9b3e8 518 return;
24988dc2 519 case 0xe000: /* mov #imm,Rn */
4bfa602b
RH
520#ifdef CONFIG_USER_ONLY
521 /* Detect the start of a gUSA region. If so, update envflags
522 and end the TB. This will allow us to see the end of the
523 region (stored in R0) in the next TB. */
6f1c2af6
RH
524 if (B11_8 == 15 && B7_0s < 0 &&
525 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
4bfa602b 526 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
6f1c2af6 527 ctx->base.is_jmp = DISAS_STOP;
4bfa602b
RH
528 }
529#endif
7efbe241 530 tcg_gen_movi_i32(REG(B11_8), B7_0s);
fdf9b3e8
FB
531 return;
532 case 0x9000: /* mov.w @(disp,PC),Rn */
c55497ec 533 {
6f1c2af6 534 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
3376f415 535 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
536 tcg_temp_free(addr);
537 }
fdf9b3e8
FB
538 return;
539 case 0xd000: /* mov.l @(disp,PC),Rn */
c55497ec 540 {
6f1c2af6 541 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
3376f415 542 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
543 tcg_temp_free(addr);
544 }
fdf9b3e8 545 return;
24988dc2 546 case 0x7000: /* add #imm,Rn */
7efbe241 547 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
fdf9b3e8
FB
548 return;
549 case 0xa000: /* bra disp */
550 CHECK_NOT_DELAY_SLOT
6f1c2af6 551 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
a6215749 552 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
553 return;
554 case 0xb000: /* bsr disp */
555 CHECK_NOT_DELAY_SLOT
6f1c2af6
RH
556 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
557 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
a6215749 558 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
559 return;
560 }
561
562 switch (ctx->opcode & 0xf00f) {
563 case 0x6003: /* mov Rm,Rn */
7efbe241 564 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
565 return;
566 case 0x2000: /* mov.b Rm,@Rn */
3376f415 567 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
fdf9b3e8
FB
568 return;
569 case 0x2001: /* mov.w Rm,@Rn */
3376f415 570 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
fdf9b3e8
FB
571 return;
572 case 0x2002: /* mov.l Rm,@Rn */
3376f415 573 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
fdf9b3e8
FB
574 return;
575 case 0x6000: /* mov.b @Rm,Rn */
3376f415 576 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
fdf9b3e8
FB
577 return;
578 case 0x6001: /* mov.w @Rm,Rn */
3376f415 579 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
fdf9b3e8
FB
580 return;
581 case 0x6002: /* mov.l @Rm,Rn */
3376f415 582 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
fdf9b3e8
FB
583 return;
584 case 0x2004: /* mov.b Rm,@-Rn */
c55497ec 585 {
a7812ae4 586 TCGv addr = tcg_temp_new();
c55497ec 587 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
588 /* might cause re-execution */
589 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
3101e99c 590 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
c55497ec
AJ
591 tcg_temp_free(addr);
592 }
fdf9b3e8
FB
593 return;
594 case 0x2005: /* mov.w Rm,@-Rn */
c55497ec 595 {
a7812ae4 596 TCGv addr = tcg_temp_new();
c55497ec 597 tcg_gen_subi_i32(addr, REG(B11_8), 2);
3376f415 598 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
3101e99c 599 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
600 tcg_temp_free(addr);
601 }
fdf9b3e8
FB
602 return;
603 case 0x2006: /* mov.l Rm,@-Rn */
c55497ec 604 {
a7812ae4 605 TCGv addr = tcg_temp_new();
c55497ec 606 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 607 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
3101e99c 608 tcg_gen_mov_i32(REG(B11_8), addr);
e691e0ed 609 tcg_temp_free(addr);
c55497ec 610 }
fdf9b3e8 611 return;
eda9b09b 612 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 613 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
24988dc2 614 if ( B11_8 != B7_4 )
7efbe241 615 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
fdf9b3e8
FB
616 return;
617 case 0x6005: /* mov.w @Rm+,Rn */
3376f415 618 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
24988dc2 619 if ( B11_8 != B7_4 )
7efbe241 620 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
fdf9b3e8
FB
621 return;
622 case 0x6006: /* mov.l @Rm+,Rn */
3376f415 623 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
24988dc2 624 if ( B11_8 != B7_4 )
7efbe241 625 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
fdf9b3e8
FB
626 return;
627 case 0x0004: /* mov.b Rm,@(R0,Rn) */
c55497ec 628 {
a7812ae4 629 TCGv addr = tcg_temp_new();
c55497ec 630 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 631 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
c55497ec
AJ
632 tcg_temp_free(addr);
633 }
fdf9b3e8
FB
634 return;
635 case 0x0005: /* mov.w Rm,@(R0,Rn) */
c55497ec 636 {
a7812ae4 637 TCGv addr = tcg_temp_new();
c55497ec 638 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 639 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
640 tcg_temp_free(addr);
641 }
fdf9b3e8
FB
642 return;
643 case 0x0006: /* mov.l Rm,@(R0,Rn) */
c55497ec 644 {
a7812ae4 645 TCGv addr = tcg_temp_new();
c55497ec 646 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 647 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
648 tcg_temp_free(addr);
649 }
fdf9b3e8
FB
650 return;
651 case 0x000c: /* mov.b @(R0,Rm),Rn */
c55497ec 652 {
a7812ae4 653 TCGv addr = tcg_temp_new();
c55497ec 654 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 655 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
c55497ec
AJ
656 tcg_temp_free(addr);
657 }
fdf9b3e8
FB
658 return;
659 case 0x000d: /* mov.w @(R0,Rm),Rn */
c55497ec 660 {
a7812ae4 661 TCGv addr = tcg_temp_new();
c55497ec 662 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 663 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
664 tcg_temp_free(addr);
665 }
fdf9b3e8
FB
666 return;
667 case 0x000e: /* mov.l @(R0,Rm),Rn */
c55497ec 668 {
a7812ae4 669 TCGv addr = tcg_temp_new();
c55497ec 670 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 671 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
672 tcg_temp_free(addr);
673 }
fdf9b3e8
FB
674 return;
675 case 0x6008: /* swap.b Rm,Rn */
c55497ec 676 {
3c254ab8 677 TCGv low = tcg_temp_new();
3101e99c
AJ
678 tcg_gen_ext16u_i32(low, REG(B7_4));
679 tcg_gen_bswap16_i32(low, low);
218fd730 680 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
c55497ec 681 tcg_temp_free(low);
c55497ec 682 }
fdf9b3e8
FB
683 return;
684 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 685 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
fdf9b3e8
FB
686 return;
687 case 0x200d: /* xtrct Rm,Rn */
c55497ec
AJ
688 {
689 TCGv high, low;
a7812ae4 690 high = tcg_temp_new();
3101e99c 691 tcg_gen_shli_i32(high, REG(B7_4), 16);
a7812ae4 692 low = tcg_temp_new();
c55497ec 693 tcg_gen_shri_i32(low, REG(B11_8), 16);
c55497ec
AJ
694 tcg_gen_or_i32(REG(B11_8), high, low);
695 tcg_temp_free(low);
696 tcg_temp_free(high);
697 }
fdf9b3e8
FB
698 return;
699 case 0x300c: /* add Rm,Rn */
7efbe241 700 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
701 return;
702 case 0x300e: /* addc Rm,Rn */
22b88fd7 703 {
34086945 704 TCGv t0, t1;
a2368e01 705 t0 = tcg_const_tl(0);
22b88fd7 706 t1 = tcg_temp_new();
a2368e01
AJ
707 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
708 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
709 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 710 tcg_temp_free(t0);
34086945 711 tcg_temp_free(t1);
22b88fd7 712 }
fdf9b3e8
FB
713 return;
714 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
715 {
716 TCGv t0, t1, t2;
717 t0 = tcg_temp_new();
718 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
719 t1 = tcg_temp_new();
720 tcg_gen_xor_i32(t1, t0, REG(B11_8));
721 t2 = tcg_temp_new();
722 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 723 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
ad8d25a1 724 tcg_temp_free(t2);
34086945 725 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1
AJ
726 tcg_temp_free(t1);
727 tcg_gen_mov_i32(REG(B7_4), t0);
728 tcg_temp_free(t0);
729 }
fdf9b3e8
FB
730 return;
731 case 0x2009: /* and Rm,Rn */
7efbe241 732 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
733 return;
734 case 0x3000: /* cmp/eq Rm,Rn */
34086945 735 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
736 return;
737 case 0x3003: /* cmp/ge Rm,Rn */
34086945 738 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
739 return;
740 case 0x3007: /* cmp/gt Rm,Rn */
34086945 741 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
742 return;
743 case 0x3006: /* cmp/hi Rm,Rn */
34086945 744 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
745 return;
746 case 0x3002: /* cmp/hs Rm,Rn */
34086945 747 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
748 return;
749 case 0x200c: /* cmp/str Rm,Rn */
69d6275b 750 {
c5c19137
AJ
751 TCGv cmp1 = tcg_temp_new();
752 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
753 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
754 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
755 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
756 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
757 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
c55497ec
AJ
758 tcg_temp_free(cmp2);
759 tcg_temp_free(cmp1);
69d6275b 760 }
fdf9b3e8
FB
761 return;
762 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
763 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
764 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
765 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
fdf9b3e8
FB
766 return;
767 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
768 {
769 TCGv t0 = tcg_temp_new();
770 TCGv t1 = tcg_temp_new();
771 TCGv t2 = tcg_temp_new();
772 TCGv zero = tcg_const_i32(0);
773
774 /* shift left arg1, saving the bit being pushed out and inserting
775 T on the right */
776 tcg_gen_shri_i32(t0, REG(B11_8), 31);
777 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
778 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
779
780 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
781 using 64-bit temps, we compute arg0's high part from q ^ m, so
782 that it is 0x00000000 when adding the value or 0xffffffff when
783 subtracting it. */
784 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
785 tcg_gen_subi_i32(t1, t1, 1);
786 tcg_gen_neg_i32(t2, REG(B7_4));
787 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
788 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
789
790 /* compute T and Q depending on carry */
791 tcg_gen_andi_i32(t1, t1, 1);
792 tcg_gen_xor_i32(t1, t1, t0);
793 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
794 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
795
796 tcg_temp_free(zero);
797 tcg_temp_free(t2);
798 tcg_temp_free(t1);
799 tcg_temp_free(t0);
800 }
fdf9b3e8
FB
801 return;
802 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 803 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
804 return;
805 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 806 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
807 return;
808 case 0x600e: /* exts.b Rm,Rn */
7efbe241 809 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
810 return;
811 case 0x600f: /* exts.w Rm,Rn */
7efbe241 812 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
813 return;
814 case 0x600c: /* extu.b Rm,Rn */
7efbe241 815 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
816 return;
817 case 0x600d: /* extu.w Rm,Rn */
7efbe241 818 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8 819 return;
24988dc2 820 case 0x000f: /* mac.l @Rm+,@Rn+ */
c55497ec
AJ
821 {
822 TCGv arg0, arg1;
a7812ae4 823 arg0 = tcg_temp_new();
3376f415 824 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 825 arg1 = tcg_temp_new();
3376f415 826 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 827 gen_helper_macl(cpu_env, arg0, arg1);
c55497ec
AJ
828 tcg_temp_free(arg1);
829 tcg_temp_free(arg0);
830 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
831 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
832 }
fdf9b3e8
FB
833 return;
834 case 0x400f: /* mac.w @Rm+,@Rn+ */
c55497ec
AJ
835 {
836 TCGv arg0, arg1;
a7812ae4 837 arg0 = tcg_temp_new();
3376f415 838 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 839 arg1 = tcg_temp_new();
3376f415 840 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 841 gen_helper_macw(cpu_env, arg0, arg1);
c55497ec
AJ
842 tcg_temp_free(arg1);
843 tcg_temp_free(arg0);
844 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
845 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
846 }
fdf9b3e8
FB
847 return;
848 case 0x0007: /* mul.l Rm,Rn */
7efbe241 849 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
850 return;
851 case 0x200f: /* muls.w Rm,Rn */
c55497ec
AJ
852 {
853 TCGv arg0, arg1;
a7812ae4 854 arg0 = tcg_temp_new();
c55497ec 855 tcg_gen_ext16s_i32(arg0, REG(B7_4));
a7812ae4 856 arg1 = tcg_temp_new();
c55497ec
AJ
857 tcg_gen_ext16s_i32(arg1, REG(B11_8));
858 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
859 tcg_temp_free(arg1);
860 tcg_temp_free(arg0);
861 }
fdf9b3e8
FB
862 return;
863 case 0x200e: /* mulu.w Rm,Rn */
c55497ec
AJ
864 {
865 TCGv arg0, arg1;
a7812ae4 866 arg0 = tcg_temp_new();
c55497ec 867 tcg_gen_ext16u_i32(arg0, REG(B7_4));
a7812ae4 868 arg1 = tcg_temp_new();
c55497ec
AJ
869 tcg_gen_ext16u_i32(arg1, REG(B11_8));
870 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
871 tcg_temp_free(arg1);
872 tcg_temp_free(arg0);
873 }
fdf9b3e8
FB
874 return;
875 case 0x600b: /* neg Rm,Rn */
7efbe241 876 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
877 return;
878 case 0x600a: /* negc Rm,Rn */
b2d9eda5 879 {
60eb27fe
AJ
880 TCGv t0 = tcg_const_i32(0);
881 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
882 REG(B7_4), t0, cpu_sr_t, t0);
883 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
884 t0, t0, REG(B11_8), cpu_sr_t);
885 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 886 tcg_temp_free(t0);
b2d9eda5 887 }
fdf9b3e8
FB
888 return;
889 case 0x6007: /* not Rm,Rn */
7efbe241 890 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
891 return;
892 case 0x200b: /* or Rm,Rn */
7efbe241 893 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
894 return;
895 case 0x400c: /* shad Rm,Rn */
69d6275b 896 {
be654c83
AJ
897 TCGv t0 = tcg_temp_new();
898 TCGv t1 = tcg_temp_new();
899 TCGv t2 = tcg_temp_new();
900
901 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
902
903 /* positive case: shift to the left */
904 tcg_gen_shl_i32(t1, REG(B11_8), t0);
905
906 /* negative case: shift to the right in two steps to
907 correctly handle the -32 case */
908 tcg_gen_xori_i32(t0, t0, 0x1f);
909 tcg_gen_sar_i32(t2, REG(B11_8), t0);
910 tcg_gen_sari_i32(t2, t2, 1);
911
912 /* select between the two cases */
913 tcg_gen_movi_i32(t0, 0);
914 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
915
916 tcg_temp_free(t0);
917 tcg_temp_free(t1);
918 tcg_temp_free(t2);
69d6275b 919 }
fdf9b3e8
FB
920 return;
921 case 0x400d: /* shld Rm,Rn */
69d6275b 922 {
57760161
AJ
923 TCGv t0 = tcg_temp_new();
924 TCGv t1 = tcg_temp_new();
925 TCGv t2 = tcg_temp_new();
926
927 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
928
929 /* positive case: shift to the left */
930 tcg_gen_shl_i32(t1, REG(B11_8), t0);
931
932 /* negative case: shift to the right in two steps to
933 correctly handle the -32 case */
934 tcg_gen_xori_i32(t0, t0, 0x1f);
935 tcg_gen_shr_i32(t2, REG(B11_8), t0);
936 tcg_gen_shri_i32(t2, t2, 1);
937
938 /* select between the two cases */
939 tcg_gen_movi_i32(t0, 0);
940 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
941
942 tcg_temp_free(t0);
943 tcg_temp_free(t1);
944 tcg_temp_free(t2);
69d6275b 945 }
fdf9b3e8
FB
946 return;
947 case 0x3008: /* sub Rm,Rn */
7efbe241 948 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
949 return;
950 case 0x300a: /* subc Rm,Rn */
22b88fd7 951 {
d0f44a55
AJ
952 TCGv t0, t1;
953 t0 = tcg_const_tl(0);
22b88fd7 954 t1 = tcg_temp_new();
d0f44a55
AJ
955 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
956 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
957 REG(B11_8), t0, t1, cpu_sr_t);
958 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 959 tcg_temp_free(t0);
d0f44a55 960 tcg_temp_free(t1);
22b88fd7 961 }
fdf9b3e8
FB
962 return;
963 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
964 {
965 TCGv t0, t1, t2;
966 t0 = tcg_temp_new();
967 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
968 t1 = tcg_temp_new();
969 tcg_gen_xor_i32(t1, t0, REG(B7_4));
970 t2 = tcg_temp_new();
971 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
972 tcg_gen_and_i32(t1, t1, t2);
973 tcg_temp_free(t2);
34086945 974 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1
AJ
975 tcg_temp_free(t1);
976 tcg_gen_mov_i32(REG(B11_8), t0);
977 tcg_temp_free(t0);
978 }
fdf9b3e8
FB
979 return;
980 case 0x2008: /* tst Rm,Rn */
c55497ec 981 {
a7812ae4 982 TCGv val = tcg_temp_new();
c55497ec 983 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 984 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
985 tcg_temp_free(val);
986 }
fdf9b3e8
FB
987 return;
988 case 0x200a: /* xor Rm,Rn */
7efbe241 989 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8 990 return;
e67888a7 991 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 992 CHECK_FPU_ENABLED
a6215749 993 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
994 int xsrc = XHACK(B7_4);
995 int xdst = XHACK(B11_8);
996 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
997 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
eda9b09b 998 } else {
7c9f7038 999 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
eda9b09b
FB
1000 }
1001 return;
e67888a7 1002 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
f6198371 1003 CHECK_FPU_ENABLED
a6215749 1004 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1005 TCGv_i64 fp = tcg_temp_new_i64();
1006 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1007 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1008 tcg_temp_free_i64(fp);
eda9b09b 1009 } else {
7c9f7038 1010 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
eda9b09b
FB
1011 }
1012 return;
e67888a7 1013 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1014 CHECK_FPU_ENABLED
a6215749 1015 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1016 TCGv_i64 fp = tcg_temp_new_i64();
1017 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1018 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1019 tcg_temp_free_i64(fp);
eda9b09b 1020 } else {
7c9f7038 1021 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
eda9b09b
FB
1022 }
1023 return;
e67888a7 1024 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1025 CHECK_FPU_ENABLED
a6215749 1026 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1027 TCGv_i64 fp = tcg_temp_new_i64();
1028 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1029 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1030 tcg_temp_free_i64(fp);
1031 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
eda9b09b 1032 } else {
7c9f7038 1033 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
cc4ba6a9 1034 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
eda9b09b
FB
1035 }
1036 return;
e67888a7 1037 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
f6198371 1038 CHECK_FPU_ENABLED
4d57fa50
RH
1039 {
1040 TCGv addr = tcg_temp_new_i32();
1041 if (ctx->tbflags & FPSCR_SZ) {
1042 TCGv_i64 fp = tcg_temp_new_i64();
1043 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1044 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1045 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1046 tcg_temp_free_i64(fp);
1047 } else {
1048 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1049 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1050 }
1051 tcg_gen_mov_i32(REG(B11_8), addr);
1052 tcg_temp_free(addr);
1053 }
eda9b09b 1054 return;
e67888a7 1055 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
f6198371 1056 CHECK_FPU_ENABLED
cc4ba6a9 1057 {
a7812ae4 1058 TCGv addr = tcg_temp_new_i32();
cc4ba6a9 1059 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1060 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1061 TCGv_i64 fp = tcg_temp_new_i64();
1062 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1063 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1064 tcg_temp_free_i64(fp);
cc4ba6a9 1065 } else {
7c9f7038 1066 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1067 }
1068 tcg_temp_free(addr);
eda9b09b
FB
1069 }
1070 return;
e67888a7 1071 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
f6198371 1072 CHECK_FPU_ENABLED
cc4ba6a9 1073 {
a7812ae4 1074 TCGv addr = tcg_temp_new();
cc4ba6a9 1075 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1076 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1077 TCGv_i64 fp = tcg_temp_new_i64();
1078 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1079 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1080 tcg_temp_free_i64(fp);
cc4ba6a9 1081 } else {
7c9f7038 1082 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1083 }
1084 tcg_temp_free(addr);
eda9b09b
FB
1085 }
1086 return;
e67888a7
TS
1087 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1088 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1089 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1090 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1091 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1092 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
cc4ba6a9 1093 {
f6198371 1094 CHECK_FPU_ENABLED
a6215749 1095 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1096 TCGv_i64 fp0, fp1;
1097
93dc9c89
RH
1098 if (ctx->opcode & 0x0110) {
1099 goto do_illegal;
1100 }
a7812ae4
PB
1101 fp0 = tcg_temp_new_i64();
1102 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1103 gen_load_fpr64(ctx, fp0, B11_8);
1104 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4
PB
1105 switch (ctx->opcode & 0xf00f) {
1106 case 0xf000: /* fadd Rm,Rn */
485d0035 1107 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1108 break;
1109 case 0xf001: /* fsub Rm,Rn */
485d0035 1110 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1111 break;
1112 case 0xf002: /* fmul Rm,Rn */
485d0035 1113 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1114 break;
1115 case 0xf003: /* fdiv Rm,Rn */
485d0035 1116 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1117 break;
1118 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1119 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1120 return;
1121 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1122 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1123 return;
1124 }
1e0b21d8 1125 gen_store_fpr64(ctx, fp0, B11_8);
a7812ae4
PB
1126 tcg_temp_free_i64(fp0);
1127 tcg_temp_free_i64(fp1);
1128 } else {
a7812ae4
PB
1129 switch (ctx->opcode & 0xf00f) {
1130 case 0xf000: /* fadd Rm,Rn */
7c9f7038
RH
1131 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1132 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1133 break;
1134 case 0xf001: /* fsub Rm,Rn */
7c9f7038
RH
1135 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1136 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1137 break;
1138 case 0xf002: /* fmul Rm,Rn */
7c9f7038
RH
1139 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1140 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1141 break;
1142 case 0xf003: /* fdiv Rm,Rn */
7c9f7038
RH
1143 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1144 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1145 break;
1146 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1147 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
7c9f7038 1148 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1149 return;
1150 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1151 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
7c9f7038 1152 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1153 return;
1154 }
cc4ba6a9 1155 }
ea6cf6be
TS
1156 }
1157 return;
5b7141a1 1158 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1159 CHECK_FPU_ENABLED
1160 CHECK_FPSCR_PR_0
1161 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1162 FREG(0), FREG(B7_4), FREG(B11_8));
1163 return;
fdf9b3e8
FB
1164 }
1165
1166 switch (ctx->opcode & 0xff00) {
1167 case 0xc900: /* and #imm,R0 */
7efbe241 1168 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1169 return;
24988dc2 1170 case 0xcd00: /* and.b #imm,@(R0,GBR) */
c55497ec
AJ
1171 {
1172 TCGv addr, val;
a7812ae4 1173 addr = tcg_temp_new();
c55497ec 1174 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1175 val = tcg_temp_new();
3376f415 1176 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1177 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1178 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1179 tcg_temp_free(val);
1180 tcg_temp_free(addr);
1181 }
fdf9b3e8
FB
1182 return;
1183 case 0x8b00: /* bf label */
1184 CHECK_NOT_DELAY_SLOT
6f1c2af6 1185 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
fdf9b3e8
FB
1186 return;
1187 case 0x8f00: /* bf/s label */
1188 CHECK_NOT_DELAY_SLOT
ac9707ea 1189 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
6f1c2af6 1190 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
a6215749 1191 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
fdf9b3e8
FB
1192 return;
1193 case 0x8900: /* bt label */
1194 CHECK_NOT_DELAY_SLOT
6f1c2af6 1195 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
fdf9b3e8
FB
1196 return;
1197 case 0x8d00: /* bt/s label */
1198 CHECK_NOT_DELAY_SLOT
ac9707ea 1199 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
6f1c2af6 1200 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
a6215749 1201 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
fdf9b3e8
FB
1202 return;
1203 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1204 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
fdf9b3e8
FB
1205 return;
1206 case 0xc400: /* mov.b @(disp,GBR),R0 */
c55497ec 1207 {
a7812ae4 1208 TCGv addr = tcg_temp_new();
c55497ec 1209 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1210 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1211 tcg_temp_free(addr);
1212 }
fdf9b3e8
FB
1213 return;
1214 case 0xc500: /* mov.w @(disp,GBR),R0 */
c55497ec 1215 {
a7812ae4 1216 TCGv addr = tcg_temp_new();
c55497ec 1217 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1218 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1219 tcg_temp_free(addr);
1220 }
fdf9b3e8
FB
1221 return;
1222 case 0xc600: /* mov.l @(disp,GBR),R0 */
c55497ec 1223 {
a7812ae4 1224 TCGv addr = tcg_temp_new();
c55497ec 1225 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1226 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
1227 tcg_temp_free(addr);
1228 }
fdf9b3e8
FB
1229 return;
1230 case 0xc000: /* mov.b R0,@(disp,GBR) */
c55497ec 1231 {
a7812ae4 1232 TCGv addr = tcg_temp_new();
c55497ec 1233 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1234 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1235 tcg_temp_free(addr);
1236 }
fdf9b3e8
FB
1237 return;
1238 case 0xc100: /* mov.w R0,@(disp,GBR) */
c55497ec 1239 {
a7812ae4 1240 TCGv addr = tcg_temp_new();
c55497ec 1241 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1242 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1243 tcg_temp_free(addr);
1244 }
fdf9b3e8
FB
1245 return;
1246 case 0xc200: /* mov.l R0,@(disp,GBR) */
c55497ec 1247 {
a7812ae4 1248 TCGv addr = tcg_temp_new();
c55497ec 1249 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1250 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
1251 tcg_temp_free(addr);
1252 }
fdf9b3e8
FB
1253 return;
1254 case 0x8000: /* mov.b R0,@(disp,Rn) */
c55497ec 1255 {
a7812ae4 1256 TCGv addr = tcg_temp_new();
c55497ec 1257 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1258 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1259 tcg_temp_free(addr);
1260 }
fdf9b3e8
FB
1261 return;
1262 case 0x8100: /* mov.w R0,@(disp,Rn) */
c55497ec 1263 {
a7812ae4 1264 TCGv addr = tcg_temp_new();
c55497ec 1265 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
3376f415 1266 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1267 tcg_temp_free(addr);
1268 }
fdf9b3e8
FB
1269 return;
1270 case 0x8400: /* mov.b @(disp,Rn),R0 */
c55497ec 1271 {
a7812ae4 1272 TCGv addr = tcg_temp_new();
c55497ec 1273 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1274 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1275 tcg_temp_free(addr);
1276 }
fdf9b3e8
FB
1277 return;
1278 case 0x8500: /* mov.w @(disp,Rn),R0 */
c55497ec 1279 {
a7812ae4 1280 TCGv addr = tcg_temp_new();
c55497ec 1281 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
3376f415 1282 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1283 tcg_temp_free(addr);
1284 }
fdf9b3e8
FB
1285 return;
1286 case 0xc700: /* mova @(disp,PC),R0 */
6f1c2af6
RH
1287 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1288 4 + B7_0 * 4) & ~3);
fdf9b3e8
FB
1289 return;
1290 case 0xcb00: /* or #imm,R0 */
7efbe241 1291 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1292 return;
24988dc2 1293 case 0xcf00: /* or.b #imm,@(R0,GBR) */
c55497ec
AJ
1294 {
1295 TCGv addr, val;
a7812ae4 1296 addr = tcg_temp_new();
c55497ec 1297 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1298 val = tcg_temp_new();
3376f415 1299 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1300 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1301 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1302 tcg_temp_free(val);
1303 tcg_temp_free(addr);
1304 }
fdf9b3e8
FB
1305 return;
1306 case 0xc300: /* trapa #imm */
c55497ec
AJ
1307 {
1308 TCGv imm;
1309 CHECK_NOT_DELAY_SLOT
ac9707ea 1310 gen_save_cpu_state(ctx, true);
c55497ec 1311 imm = tcg_const_i32(B7_0);
485d0035 1312 gen_helper_trapa(cpu_env, imm);
c55497ec 1313 tcg_temp_free(imm);
6f1c2af6 1314 ctx->base.is_jmp = DISAS_NORETURN;
c55497ec 1315 }
fdf9b3e8
FB
1316 return;
1317 case 0xc800: /* tst #imm,R0 */
c55497ec 1318 {
a7812ae4 1319 TCGv val = tcg_temp_new();
c55497ec 1320 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1321 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1322 tcg_temp_free(val);
1323 }
fdf9b3e8 1324 return;
24988dc2 1325 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
c55497ec 1326 {
a7812ae4 1327 TCGv val = tcg_temp_new();
c55497ec 1328 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1329 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
c55497ec 1330 tcg_gen_andi_i32(val, val, B7_0);
34086945 1331 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1332 tcg_temp_free(val);
1333 }
fdf9b3e8
FB
1334 return;
1335 case 0xca00: /* xor #imm,R0 */
7efbe241 1336 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1337 return;
24988dc2 1338 case 0xce00: /* xor.b #imm,@(R0,GBR) */
c55497ec
AJ
1339 {
1340 TCGv addr, val;
a7812ae4 1341 addr = tcg_temp_new();
c55497ec 1342 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1343 val = tcg_temp_new();
3376f415 1344 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1345 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1346 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1347 tcg_temp_free(val);
1348 tcg_temp_free(addr);
1349 }
fdf9b3e8
FB
1350 return;
1351 }
1352
1353 switch (ctx->opcode & 0xf08f) {
1354 case 0x408e: /* ldc Rm,Rn_BANK */
fe25591e 1355 CHECK_PRIVILEGED
7efbe241 1356 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
fdf9b3e8
FB
1357 return;
1358 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
fe25591e 1359 CHECK_PRIVILEGED
3376f415 1360 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
7efbe241 1361 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
fdf9b3e8
FB
1362 return;
1363 case 0x0082: /* stc Rm_BANK,Rn */
fe25591e 1364 CHECK_PRIVILEGED
7efbe241 1365 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
fdf9b3e8
FB
1366 return;
1367 case 0x4083: /* stc.l Rm_BANK,@-Rn */
fe25591e 1368 CHECK_PRIVILEGED
c55497ec 1369 {
a7812ae4 1370 TCGv addr = tcg_temp_new();
c55497ec 1371 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1372 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
3101e99c 1373 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1374 tcg_temp_free(addr);
c55497ec 1375 }
fdf9b3e8
FB
1376 return;
1377 }
1378
1379 switch (ctx->opcode & 0xf0ff) {
1380 case 0x0023: /* braf Rn */
7efbe241 1381 CHECK_NOT_DELAY_SLOT
6f1c2af6 1382 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
a6215749 1383 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1384 ctx->delayed_pc = (uint32_t) - 1;
1385 return;
1386 case 0x0003: /* bsrf Rn */
7efbe241 1387 CHECK_NOT_DELAY_SLOT
6f1c2af6 1388 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1389 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
a6215749 1390 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1391 ctx->delayed_pc = (uint32_t) - 1;
1392 return;
1393 case 0x4015: /* cmp/pl Rn */
34086945 1394 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1395 return;
1396 case 0x4011: /* cmp/pz Rn */
34086945 1397 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1398 return;
1399 case 0x4010: /* dt Rn */
7efbe241 1400 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1401 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1402 return;
1403 case 0x402b: /* jmp @Rn */
7efbe241
AJ
1404 CHECK_NOT_DELAY_SLOT
1405 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
a6215749 1406 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1407 ctx->delayed_pc = (uint32_t) - 1;
1408 return;
1409 case 0x400b: /* jsr @Rn */
7efbe241 1410 CHECK_NOT_DELAY_SLOT
6f1c2af6 1411 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1412 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
a6215749 1413 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1414 ctx->delayed_pc = (uint32_t) - 1;
1415 return;
fe25591e
AJ
1416 case 0x400e: /* ldc Rm,SR */
1417 CHECK_PRIVILEGED
34086945
AJ
1418 {
1419 TCGv val = tcg_temp_new();
1420 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1421 gen_write_sr(val);
1422 tcg_temp_free(val);
6f1c2af6 1423 ctx->base.is_jmp = DISAS_STOP;
34086945 1424 }
390af821 1425 return;
fe25591e
AJ
1426 case 0x4007: /* ldc.l @Rm+,SR */
1427 CHECK_PRIVILEGED
c55497ec 1428 {
a7812ae4 1429 TCGv val = tcg_temp_new();
3376f415 1430 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
34086945
AJ
1431 tcg_gen_andi_i32(val, val, 0x700083f3);
1432 gen_write_sr(val);
c55497ec
AJ
1433 tcg_temp_free(val);
1434 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
6f1c2af6 1435 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1436 }
390af821 1437 return;
fe25591e
AJ
1438 case 0x0002: /* stc SR,Rn */
1439 CHECK_PRIVILEGED
34086945 1440 gen_read_sr(REG(B11_8));
390af821 1441 return;
fe25591e
AJ
1442 case 0x4003: /* stc SR,@-Rn */
1443 CHECK_PRIVILEGED
c55497ec 1444 {
a7812ae4 1445 TCGv addr = tcg_temp_new();
34086945 1446 TCGv val = tcg_temp_new();
c55497ec 1447 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945
AJ
1448 gen_read_sr(val);
1449 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1450 tcg_gen_mov_i32(REG(B11_8), addr);
34086945 1451 tcg_temp_free(val);
c55497ec 1452 tcg_temp_free(addr);
c55497ec 1453 }
390af821 1454 return;
8e9b0678 1455#define LD(reg,ldnum,ldpnum,prechk) \
fdf9b3e8 1456 case ldnum: \
fe25591e 1457 prechk \
7efbe241 1458 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
fdf9b3e8
FB
1459 return; \
1460 case ldpnum: \
fe25591e 1461 prechk \
3376f415 1462 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
7efbe241 1463 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678
AC
1464 return;
1465#define ST(reg,stnum,stpnum,prechk) \
fdf9b3e8 1466 case stnum: \
fe25591e 1467 prechk \
7efbe241 1468 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
fdf9b3e8
FB
1469 return; \
1470 case stpnum: \
fe25591e 1471 prechk \
c55497ec 1472 { \
3101e99c 1473 TCGv addr = tcg_temp_new(); \
c55497ec 1474 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
3376f415 1475 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
3101e99c 1476 tcg_gen_mov_i32(REG(B11_8), addr); \
c55497ec 1477 tcg_temp_free(addr); \
86e0abc7 1478 } \
fdf9b3e8 1479 return;
8e9b0678
AC
1480#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1481 LD(reg,ldnum,ldpnum,prechk) \
1482 ST(reg,stnum,stpnum,prechk)
fe25591e
AJ
1483 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1484 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1485 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1486 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
935fc175 1487 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1488 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
fe25591e
AJ
1489 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1490 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1491 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1492 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
d8299bcc 1493 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
390af821 1494 case 0x406a: /* lds Rm,FPSCR */
d8299bcc 1495 CHECK_FPU_ENABLED
485d0035 1496 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
6f1c2af6 1497 ctx->base.is_jmp = DISAS_STOP;
390af821
AJ
1498 return;
1499 case 0x4066: /* lds.l @Rm+,FPSCR */
d8299bcc 1500 CHECK_FPU_ENABLED
c55497ec 1501 {
a7812ae4 1502 TCGv addr = tcg_temp_new();
3376f415 1503 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
c55497ec 1504 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
485d0035 1505 gen_helper_ld_fpscr(cpu_env, addr);
c55497ec 1506 tcg_temp_free(addr);
6f1c2af6 1507 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1508 }
390af821
AJ
1509 return;
1510 case 0x006a: /* sts FPSCR,Rn */
d8299bcc 1511 CHECK_FPU_ENABLED
c55497ec 1512 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
390af821
AJ
1513 return;
1514 case 0x4062: /* sts FPSCR,@-Rn */
d8299bcc 1515 CHECK_FPU_ENABLED
c55497ec
AJ
1516 {
1517 TCGv addr, val;
a7812ae4 1518 val = tcg_temp_new();
c55497ec 1519 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
a7812ae4 1520 addr = tcg_temp_new();
c55497ec 1521 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1522 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1523 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
1524 tcg_temp_free(addr);
1525 tcg_temp_free(val);
c55497ec 1526 }
390af821 1527 return;
fdf9b3e8 1528 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1529 {
1530 TCGv val = tcg_temp_new();
3376f415 1531 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
485d0035 1532 gen_helper_movcal(cpu_env, REG(B11_8), val);
3376f415 1533 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
e691e0ed 1534 tcg_temp_free(val);
852d481f
EI
1535 }
1536 ctx->has_movcal = 1;
fdf9b3e8 1537 return;
143021b2 1538 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1539 CHECK_SH4A
143021b2 1540 /* Load non-boundary-aligned data */
ccae24d4
RH
1541 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1542 MO_TEUL | MO_UNALN);
1543 return;
143021b2
AJ
1544 break;
1545 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1546 CHECK_SH4A
143021b2 1547 /* Load non-boundary-aligned data */
ccae24d4
RH
1548 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1549 MO_TEUL | MO_UNALN);
1550 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1551 return;
143021b2 1552 break;
fdf9b3e8 1553 case 0x0029: /* movt Rn */
34086945 1554 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
fdf9b3e8 1555 return;
66c7c806
AJ
1556 case 0x0073:
1557 /* MOVCO.L
f85da308
RH
1558 * LDST -> T
1559 * If (T == 1) R0 -> (Rn)
1560 * 0 -> LDST
1561 *
1562 * The above description doesn't work in a parallel context.
1563 * Since we currently support no smp boards, this implies user-mode.
1564 * But we can still support the official mechanism while user-mode
1565 * is single-threaded. */
ccae24d4
RH
1566 CHECK_SH4A
1567 {
f85da308
RH
1568 TCGLabel *fail = gen_new_label();
1569 TCGLabel *done = gen_new_label();
1570
6f1c2af6 1571 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1572 TCGv tmp;
1573
1574 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1575 cpu_lock_addr, fail);
1576 tmp = tcg_temp_new();
1577 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1578 REG(0), ctx->memidx, MO_TEUL);
1579 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1580 tcg_temp_free(tmp);
1581 } else {
1582 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1583 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1584 tcg_gen_movi_i32(cpu_sr_t, 1);
1585 }
1586 tcg_gen_br(done);
1587
1588 gen_set_label(fail);
1589 tcg_gen_movi_i32(cpu_sr_t, 0);
1590
1591 gen_set_label(done);
1592 tcg_gen_movi_i32(cpu_lock_addr, -1);
ccae24d4 1593 }
f85da308 1594 return;
66c7c806
AJ
1595 case 0x0063:
1596 /* MOVLI.L @Rm,R0
f85da308
RH
1597 * 1 -> LDST
1598 * (Rm) -> R0
1599 * When interrupt/exception
1600 * occurred 0 -> LDST
1601 *
1602 * In a parallel context, we must also save the loaded value
1603 * for use with the cmpxchg that we'll use with movco.l. */
ccae24d4 1604 CHECK_SH4A
6f1c2af6 1605 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1606 TCGv tmp = tcg_temp_new();
1607 tcg_gen_mov_i32(tmp, REG(B11_8));
1608 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1609 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1610 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1611 tcg_temp_free(tmp);
1612 } else {
1613 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1614 tcg_gen_movi_i32(cpu_lock_addr, 0);
1615 }
ccae24d4 1616 return;
fdf9b3e8 1617 case 0x0093: /* ocbi @Rn */
c55497ec 1618 {
485d0035 1619 gen_helper_ocbi(cpu_env, REG(B11_8));
c55497ec 1620 }
fdf9b3e8 1621 return;
24988dc2 1622 case 0x00a3: /* ocbp @Rn */
fdf9b3e8 1623 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1624 /* These instructions are supposed to do nothing in case of
1625 a cache miss. Given that we only partially emulate caches
1626 it is safe to simply ignore them. */
fdf9b3e8
FB
1627 return;
1628 case 0x0083: /* pref @Rn */
1629 return;
71968fa6 1630 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1631 CHECK_SH4A
1632 return;
71968fa6 1633 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1634 CHECK_SH4A
1635 return;
71968fa6 1636 case 0x00ab: /* synco */
ccae24d4
RH
1637 CHECK_SH4A
1638 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1639 return;
aa351317 1640 break;
fdf9b3e8 1641 case 0x4024: /* rotcl Rn */
c55497ec 1642 {
a7812ae4 1643 TCGv tmp = tcg_temp_new();
34086945
AJ
1644 tcg_gen_mov_i32(tmp, cpu_sr_t);
1645 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
c55497ec 1646 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1647 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1648 tcg_temp_free(tmp);
1649 }
fdf9b3e8
FB
1650 return;
1651 case 0x4025: /* rotcr Rn */
c55497ec 1652 {
a7812ae4 1653 TCGv tmp = tcg_temp_new();
34086945
AJ
1654 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1655 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
c55497ec 1656 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1657 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1658 tcg_temp_free(tmp);
1659 }
fdf9b3e8
FB
1660 return;
1661 case 0x4004: /* rotl Rn */
2411fde9 1662 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1663 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1664 return;
1665 case 0x4005: /* rotr Rn */
34086945 1666 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
2411fde9 1667 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1668 return;
1669 case 0x4000: /* shll Rn */
1670 case 0x4020: /* shal Rn */
34086945 1671 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
7efbe241 1672 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1673 return;
1674 case 0x4021: /* shar Rn */
34086945 1675 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1676 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1677 return;
1678 case 0x4001: /* shlr Rn */
34086945 1679 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1680 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1681 return;
1682 case 0x4008: /* shll2 Rn */
7efbe241 1683 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1684 return;
1685 case 0x4018: /* shll8 Rn */
7efbe241 1686 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1687 return;
1688 case 0x4028: /* shll16 Rn */
7efbe241 1689 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1690 return;
1691 case 0x4009: /* shlr2 Rn */
7efbe241 1692 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1693 return;
1694 case 0x4019: /* shlr8 Rn */
7efbe241 1695 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1696 return;
1697 case 0x4029: /* shlr16 Rn */
7efbe241 1698 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1699 return;
1700 case 0x401b: /* tas.b @Rn */
cb32f179
AJ
1701 {
1702 TCGv val = tcg_const_i32(0x80);
1703 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1704 ctx->memidx, MO_UB);
34086945 1705 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
cb32f179
AJ
1706 tcg_temp_free(val);
1707 }
1708 return;
e67888a7 1709 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
f6198371 1710 CHECK_FPU_ENABLED
7c9f7038 1711 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
eda9b09b 1712 return;
e67888a7 1713 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
f6198371 1714 CHECK_FPU_ENABLED
7c9f7038 1715 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
eda9b09b 1716 return;
e67888a7 1717 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
f6198371 1718 CHECK_FPU_ENABLED
a6215749 1719 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1720 TCGv_i64 fp;
93dc9c89
RH
1721 if (ctx->opcode & 0x0100) {
1722 goto do_illegal;
1723 }
a7812ae4 1724 fp = tcg_temp_new_i64();
485d0035 1725 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1726 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1727 tcg_temp_free_i64(fp);
ea6cf6be
TS
1728 }
1729 else {
7c9f7038 1730 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
ea6cf6be
TS
1731 }
1732 return;
e67888a7 1733 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
f6198371 1734 CHECK_FPU_ENABLED
a6215749 1735 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1736 TCGv_i64 fp;
93dc9c89
RH
1737 if (ctx->opcode & 0x0100) {
1738 goto do_illegal;
1739 }
a7812ae4 1740 fp = tcg_temp_new_i64();
1e0b21d8 1741 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1742 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
a7812ae4 1743 tcg_temp_free_i64(fp);
ea6cf6be
TS
1744 }
1745 else {
7c9f7038 1746 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
ea6cf6be
TS
1747 }
1748 return;
24988dc2 1749 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
f6198371 1750 CHECK_FPU_ENABLED
7c9f7038 1751 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
24988dc2 1752 return;
57f5c1b0 1753 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
f6198371 1754 CHECK_FPU_ENABLED
7c9f7038 1755 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
24988dc2
AJ
1756 return;
1757 case 0xf06d: /* fsqrt FRn */
f6198371 1758 CHECK_FPU_ENABLED
a6215749 1759 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1760 if (ctx->opcode & 0x0100) {
1761 goto do_illegal;
1762 }
a7812ae4 1763 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1764 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1765 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1e0b21d8 1766 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1767 tcg_temp_free_i64(fp);
24988dc2 1768 } else {
7c9f7038 1769 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2
AJ
1770 }
1771 return;
1772 case 0xf07d: /* fsrra FRn */
f6198371 1773 CHECK_FPU_ENABLED
11b7aa23
RH
1774 CHECK_FPSCR_PR_0
1775 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2 1776 break;
e67888a7 1777 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
f6198371 1778 CHECK_FPU_ENABLED
7e9f7ca8
RH
1779 CHECK_FPSCR_PR_0
1780 tcg_gen_movi_i32(FREG(B11_8), 0);
1781 return;
e67888a7 1782 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
f6198371 1783 CHECK_FPU_ENABLED
7e9f7ca8
RH
1784 CHECK_FPSCR_PR_0
1785 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1786 return;
24988dc2 1787 case 0xf0ad: /* fcnvsd FPUL,DRn */
f6198371 1788 CHECK_FPU_ENABLED
cc4ba6a9 1789 {
a7812ae4 1790 TCGv_i64 fp = tcg_temp_new_i64();
485d0035 1791 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1792 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1793 tcg_temp_free_i64(fp);
cc4ba6a9 1794 }
24988dc2
AJ
1795 return;
1796 case 0xf0bd: /* fcnvds DRn,FPUL */
f6198371 1797 CHECK_FPU_ENABLED
cc4ba6a9 1798 {
a7812ae4 1799 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1800 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1801 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
a7812ae4 1802 tcg_temp_free_i64(fp);
cc4ba6a9 1803 }
24988dc2 1804 return;
af8c2bde
AJ
1805 case 0xf0ed: /* fipr FVm,FVn */
1806 CHECK_FPU_ENABLED
7e9f7ca8
RH
1807 CHECK_FPSCR_PR_1
1808 {
1809 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1810 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1811 gen_helper_fipr(cpu_env, m, n);
af8c2bde
AJ
1812 tcg_temp_free(m);
1813 tcg_temp_free(n);
1814 return;
1815 }
1816 break;
17075f10
AJ
1817 case 0xf0fd: /* ftrv XMTRX,FVn */
1818 CHECK_FPU_ENABLED
7e9f7ca8
RH
1819 CHECK_FPSCR_PR_1
1820 {
1821 if ((ctx->opcode & 0x0300) != 0x0100) {
1822 goto do_illegal;
1823 }
1824 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1825 gen_helper_ftrv(cpu_env, n);
17075f10
AJ
1826 tcg_temp_free(n);
1827 return;
1828 }
1829 break;
fdf9b3e8 1830 }
bacc637a 1831#if 0
fdf9b3e8 1832 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
6f1c2af6 1833 ctx->opcode, ctx->base.pc_next);
bacc637a
AJ
1834 fflush(stderr);
1835#endif
6b98213d 1836 do_illegal:
9a562ae7 1837 if (ctx->envflags & DELAY_SLOT_MASK) {
dec16c6e
RH
1838 do_illegal_slot:
1839 gen_save_cpu_state(ctx, true);
485d0035 1840 gen_helper_raise_slot_illegal_instruction(cpu_env);
86865c5f 1841 } else {
dec16c6e 1842 gen_save_cpu_state(ctx, true);
485d0035 1843 gen_helper_raise_illegal_instruction(cpu_env);
86865c5f 1844 }
6f1c2af6 1845 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042
RH
1846 return;
1847
1848 do_fpu_disabled:
1849 gen_save_cpu_state(ctx, true);
1850 if (ctx->envflags & DELAY_SLOT_MASK) {
1851 gen_helper_raise_slot_fpu_disable(cpu_env);
1852 } else {
1853 gen_helper_raise_fpu_disable(cpu_env);
1854 }
6f1c2af6 1855 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042 1856 return;
823029f9
TS
1857}
1858
b1d8e52e 1859static void decode_opc(DisasContext * ctx)
823029f9 1860{
a6215749 1861 uint32_t old_flags = ctx->envflags;
823029f9
TS
1862
1863 _decode_opc(ctx);
1864
9a562ae7 1865 if (old_flags & DELAY_SLOT_MASK) {
39682608 1866 /* go out of the delay slot */
9a562ae7 1867 ctx->envflags &= ~DELAY_SLOT_MASK;
4bfa602b
RH
1868
1869 /* When in an exclusive region, we must continue to the end
1870 for conditional branches. */
1871 if (ctx->tbflags & GUSA_EXCLUSIVE
1872 && old_flags & DELAY_SLOT_CONDITIONAL) {
1873 gen_delayed_conditional_jump(ctx);
1874 return;
1875 }
1876 /* Otherwise this is probably an invalid gUSA region.
1877 Drop the GUSA bits so the next TB doesn't see them. */
1878 ctx->envflags &= ~GUSA_MASK;
1879
ac9707ea 1880 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
823029f9
TS
1881 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1882 gen_delayed_conditional_jump(ctx);
be53081a 1883 } else {
823029f9
TS
1884 gen_jump(ctx);
1885 }
4bfa602b
RH
1886 }
1887}
1888
1889#ifdef CONFIG_USER_ONLY
1890/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1891 Upon an interrupt, a real kernel would simply notice magic values in
1892 the registers and reset the PC to the start of the sequence.
1893
1894 For QEMU, we cannot do this in quite the same way. Instead, we notice
1895 the normal start of such a sequence (mov #-x,r15). While we can handle
1896 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1897 sequences and transform them into atomic operations as seen by the host.
1898*/
be0e3d7a 1899static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
4bfa602b 1900{
d6a6cffd
RH
1901 uint16_t insns[5];
1902 int ld_adr, ld_dst, ld_mop;
1903 int op_dst, op_src, op_opc;
1904 int mv_src, mt_dst, st_src, st_mop;
1905 TCGv op_arg;
6f1c2af6
RH
1906 uint32_t pc = ctx->base.pc_next;
1907 uint32_t pc_end = ctx->base.tb->cs_base;
4bfa602b 1908 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1909 int i;
4bfa602b 1910
d6a6cffd
RH
1911 /* The state machine below will consume only a few insns.
1912 If there are more than that in a region, fail now. */
1913 if (max_insns > ARRAY_SIZE(insns)) {
1914 goto fail;
1915 }
1916
1917 /* Read all of the insns for the region. */
1918 for (i = 0; i < max_insns; ++i) {
1919 insns[i] = cpu_lduw_code(env, pc + i * 2);
1920 }
1921
1922 ld_adr = ld_dst = ld_mop = -1;
1923 mv_src = -1;
1924 op_dst = op_src = op_opc = -1;
1925 mt_dst = -1;
1926 st_src = st_mop = -1;
f764718d 1927 op_arg = NULL;
d6a6cffd
RH
1928 i = 0;
1929
1930#define NEXT_INSN \
1931 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1932
1933 /*
1934 * Expect a load to begin the region.
1935 */
1936 NEXT_INSN;
1937 switch (ctx->opcode & 0xf00f) {
1938 case 0x6000: /* mov.b @Rm,Rn */
1939 ld_mop = MO_SB;
1940 break;
1941 case 0x6001: /* mov.w @Rm,Rn */
1942 ld_mop = MO_TESW;
1943 break;
1944 case 0x6002: /* mov.l @Rm,Rn */
1945 ld_mop = MO_TESL;
1946 break;
1947 default:
1948 goto fail;
1949 }
1950 ld_adr = B7_4;
1951 ld_dst = B11_8;
1952 if (ld_adr == ld_dst) {
1953 goto fail;
1954 }
1955 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1956 op_dst = ld_dst;
1957
1958 /*
1959 * Expect an optional register move.
1960 */
1961 NEXT_INSN;
1962 switch (ctx->opcode & 0xf00f) {
1963 case 0x6003: /* mov Rm,Rn */
1964 /* Here we want to recognize ld_dst being saved for later consumtion,
1965 or for another input register being copied so that ld_dst need not
1966 be clobbered during the operation. */
1967 op_dst = B11_8;
1968 mv_src = B7_4;
1969 if (op_dst == ld_dst) {
1970 /* Overwriting the load output. */
1971 goto fail;
1972 }
1973 if (mv_src != ld_dst) {
1974 /* Copying a new input; constrain op_src to match the load. */
1975 op_src = ld_dst;
1976 }
1977 break;
1978
1979 default:
1980 /* Put back and re-examine as operation. */
1981 --i;
1982 }
1983
1984 /*
1985 * Expect the operation.
1986 */
1987 NEXT_INSN;
1988 switch (ctx->opcode & 0xf00f) {
1989 case 0x300c: /* add Rm,Rn */
1990 op_opc = INDEX_op_add_i32;
1991 goto do_reg_op;
1992 case 0x2009: /* and Rm,Rn */
1993 op_opc = INDEX_op_and_i32;
1994 goto do_reg_op;
1995 case 0x200a: /* xor Rm,Rn */
1996 op_opc = INDEX_op_xor_i32;
1997 goto do_reg_op;
1998 case 0x200b: /* or Rm,Rn */
1999 op_opc = INDEX_op_or_i32;
2000 do_reg_op:
2001 /* The operation register should be as expected, and the
2002 other input cannot depend on the load. */
2003 if (op_dst != B11_8) {
2004 goto fail;
2005 }
2006 if (op_src < 0) {
2007 /* Unconstrainted input. */
2008 op_src = B7_4;
2009 } else if (op_src == B7_4) {
2010 /* Constrained input matched load. All operations are
2011 commutative; "swap" them by "moving" the load output
2012 to the (implicit) first argument and the move source
2013 to the (explicit) second argument. */
2014 op_src = mv_src;
2015 } else {
2016 goto fail;
2017 }
2018 op_arg = REG(op_src);
2019 break;
2020
2021 case 0x6007: /* not Rm,Rn */
2022 if (ld_dst != B7_4 || mv_src >= 0) {
2023 goto fail;
2024 }
2025 op_dst = B11_8;
2026 op_opc = INDEX_op_xor_i32;
2027 op_arg = tcg_const_i32(-1);
2028 break;
2029
2030 case 0x7000 ... 0x700f: /* add #imm,Rn */
2031 if (op_dst != B11_8 || mv_src >= 0) {
2032 goto fail;
2033 }
2034 op_opc = INDEX_op_add_i32;
2035 op_arg = tcg_const_i32(B7_0s);
2036 break;
2037
2038 case 0x3000: /* cmp/eq Rm,Rn */
2039 /* Looking for the middle of a compare-and-swap sequence,
2040 beginning with the compare. Operands can be either order,
2041 but with only one overlapping the load. */
2042 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2043 goto fail;
2044 }
2045 op_opc = INDEX_op_setcond_i32; /* placeholder */
2046 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2047 op_arg = REG(op_src);
2048
2049 NEXT_INSN;
2050 switch (ctx->opcode & 0xff00) {
2051 case 0x8b00: /* bf label */
2052 case 0x8f00: /* bf/s label */
2053 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2054 goto fail;
2055 }
2056 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2057 break;
2058 }
2059 /* We're looking to unconditionally modify Rn with the
2060 result of the comparison, within the delay slot of
2061 the branch. This is used by older gcc. */
2062 NEXT_INSN;
2063 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2064 mt_dst = B11_8;
2065 } else {
2066 goto fail;
2067 }
2068 break;
2069
2070 default:
2071 goto fail;
2072 }
2073 break;
2074
2075 case 0x2008: /* tst Rm,Rn */
2076 /* Looking for a compare-and-swap against zero. */
2077 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2078 goto fail;
2079 }
2080 op_opc = INDEX_op_setcond_i32;
2081 op_arg = tcg_const_i32(0);
2082
2083 NEXT_INSN;
2084 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2085 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2086 goto fail;
2087 }
2088 break;
2089
2090 default:
2091 /* Put back and re-examine as store. */
2092 --i;
2093 }
2094
2095 /*
2096 * Expect the store.
2097 */
2098 /* The store must be the last insn. */
2099 if (i != max_insns - 1) {
2100 goto fail;
2101 }
2102 NEXT_INSN;
2103 switch (ctx->opcode & 0xf00f) {
2104 case 0x2000: /* mov.b Rm,@Rn */
2105 st_mop = MO_UB;
2106 break;
2107 case 0x2001: /* mov.w Rm,@Rn */
2108 st_mop = MO_UW;
2109 break;
2110 case 0x2002: /* mov.l Rm,@Rn */
2111 st_mop = MO_UL;
2112 break;
2113 default:
2114 goto fail;
2115 }
2116 /* The store must match the load. */
2117 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2118 goto fail;
2119 }
2120 st_src = B7_4;
2121
2122#undef NEXT_INSN
2123
2124 /*
2125 * Emit the operation.
2126 */
d6a6cffd
RH
2127 switch (op_opc) {
2128 case -1:
2129 /* No operation found. Look for exchange pattern. */
2130 if (st_src == ld_dst || mv_src >= 0) {
2131 goto fail;
2132 }
2133 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2134 ctx->memidx, ld_mop);
2135 break;
2136
2137 case INDEX_op_add_i32:
2138 if (op_dst != st_src) {
2139 goto fail;
2140 }
2141 if (op_dst == ld_dst && st_mop == MO_UL) {
2142 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2143 op_arg, ctx->memidx, ld_mop);
2144 } else {
2145 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2146 op_arg, ctx->memidx, ld_mop);
2147 if (op_dst != ld_dst) {
2148 /* Note that mop sizes < 4 cannot use add_fetch
2149 because it won't carry into the higher bits. */
2150 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2151 }
2152 }
2153 break;
2154
2155 case INDEX_op_and_i32:
2156 if (op_dst != st_src) {
2157 goto fail;
2158 }
2159 if (op_dst == ld_dst) {
2160 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2161 op_arg, ctx->memidx, ld_mop);
2162 } else {
2163 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2164 op_arg, ctx->memidx, ld_mop);
2165 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2166 }
2167 break;
2168
2169 case INDEX_op_or_i32:
2170 if (op_dst != st_src) {
2171 goto fail;
2172 }
2173 if (op_dst == ld_dst) {
2174 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2175 op_arg, ctx->memidx, ld_mop);
2176 } else {
2177 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2178 op_arg, ctx->memidx, ld_mop);
2179 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2180 }
2181 break;
2182
2183 case INDEX_op_xor_i32:
2184 if (op_dst != st_src) {
2185 goto fail;
2186 }
2187 if (op_dst == ld_dst) {
2188 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2189 op_arg, ctx->memidx, ld_mop);
2190 } else {
2191 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2192 op_arg, ctx->memidx, ld_mop);
2193 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2194 }
2195 break;
2196
2197 case INDEX_op_setcond_i32:
2198 if (st_src == ld_dst) {
2199 goto fail;
2200 }
2201 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2202 REG(st_src), ctx->memidx, ld_mop);
2203 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2204 if (mt_dst >= 0) {
2205 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2206 }
2207 break;
2208
2209 default:
2210 g_assert_not_reached();
2211 }
2212
2213 /* If op_src is not a valid register, then op_arg was a constant. */
f764718d 2214 if (op_src < 0 && op_arg) {
d6a6cffd
RH
2215 tcg_temp_free_i32(op_arg);
2216 }
2217
2218 /* The entire region has been translated. */
2219 ctx->envflags &= ~GUSA_MASK;
6f1c2af6 2220 ctx->base.pc_next = pc_end;
be0e3d7a
RH
2221 ctx->base.num_insns += max_insns - 1;
2222 return;
d6a6cffd
RH
2223
2224 fail:
4bfa602b
RH
2225 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2226 pc, pc_end);
2227
2228 /* Restart with the EXCLUSIVE bit set, within a TB run via
2229 cpu_exec_step_atomic holding the exclusive lock. */
4bfa602b
RH
2230 ctx->envflags |= GUSA_EXCLUSIVE;
2231 gen_save_cpu_state(ctx, false);
2232 gen_helper_exclusive(cpu_env);
6f1c2af6 2233 ctx->base.is_jmp = DISAS_NORETURN;
4bfa602b
RH
2234
2235 /* We're not executing an instruction, but we must report one for the
2236 purposes of accounting within the TB. We might as well report the
6f1c2af6
RH
2237 entire region consumed via ctx->base.pc_next so that it's immediately
2238 available in the disassembly dump. */
2239 ctx->base.pc_next = pc_end;
be0e3d7a 2240 ctx->base.num_insns += max_insns - 1;
fdf9b3e8 2241}
4bfa602b 2242#endif
fdf9b3e8 2243
fd1b3d38 2244static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
fdf9b3e8 2245{
fd1b3d38 2246 DisasContext *ctx = container_of(dcbase, DisasContext, base);
9c489ea6 2247 CPUSH4State *env = cs->env_ptr;
be0e3d7a 2248 uint32_t tbflags;
fd1b3d38
EC
2249 int bound;
2250
be0e3d7a
RH
2251 ctx->tbflags = tbflags = ctx->base.tb->flags;
2252 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2253 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2254 /* We don't know if the delayed pc came from a dynamic or static branch,
2255 so assume it is a dynamic branch. */
fd1b3d38
EC
2256 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2257 ctx->features = env->features;
be0e3d7a
RH
2258 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2259 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2260 (tbflags & (1 << SR_RB))) * 0x10;
2261 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2262
2263 if (tbflags & GUSA_MASK) {
2264 uint32_t pc = ctx->base.pc_next;
2265 uint32_t pc_end = ctx->base.tb->cs_base;
2266 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
2267 int max_insns = (pc_end - pc) / 2;
2268
2269 if (pc != pc_end + backup || max_insns < 2) {
2270 /* This is a malformed gUSA region. Don't do anything special,
2271 since the interpreter is likely to get confused. */
2272 ctx->envflags &= ~GUSA_MASK;
2273 } else if (tbflags & GUSA_EXCLUSIVE) {
2274 /* Regardless of single-stepping or the end of the page,
2275 we must complete execution of the gUSA region while
2276 holding the exclusive lock. */
2277 ctx->base.max_insns = max_insns;
2278 return;
2279 }
2280 }
4448a836
RH
2281
2282 /* Since the ISA is fixed-width, we can bound by the number
2283 of instructions remaining on the page. */
fd1b3d38
EC
2284 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2285 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2286}
4448a836 2287
fd1b3d38
EC
2288static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2289{
fd1b3d38 2290}
4bfa602b 2291
fd1b3d38
EC
2292static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2293{
2294 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2295
fd1b3d38
EC
2296 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2297}
b933066a 2298
fd1b3d38
EC
2299static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2300 const CPUBreakpoint *bp)
2301{
2302 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2303
fd1b3d38
EC
2304 /* We have hit a breakpoint - make sure PC is up-to-date */
2305 gen_save_cpu_state(ctx, true);
2306 gen_helper_debug(cpu_env);
2307 ctx->base.is_jmp = DISAS_NORETURN;
2308 /* The address covered by the breakpoint must be included in
2309 [tb->pc, tb->pc + tb->size) in order to for it to be
2310 properly cleared -- thus we increment the PC here so that
2311 the logic setting tb->size below does the right thing. */
2312 ctx->base.pc_next += 2;
2313 return true;
2314}
2315
2316static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2317{
2318 CPUSH4State *env = cs->env_ptr;
2319 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4bfa602b 2320
be0e3d7a
RH
2321#ifdef CONFIG_USER_ONLY
2322 if (unlikely(ctx->envflags & GUSA_MASK)
2323 && !(ctx->envflags & GUSA_EXCLUSIVE)) {
2324 /* We're in an gUSA region, and we have not already fallen
2325 back on using an exclusive region. Attempt to parse the
2326 region into a single supported atomic operation. Failure
2327 is handled within the parser by raising an exception to
2328 retry using an exclusive region. */
2329 decode_gusa(ctx, env);
2330 return;
2331 }
2332#endif
2333
fd1b3d38
EC
2334 ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
2335 decode_opc(ctx);
2336 ctx->base.pc_next += 2;
2337}
2338
2339static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2340{
2341 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2342
2343 if (ctx->tbflags & GUSA_EXCLUSIVE) {
4bfa602b 2344 /* Ending the region of exclusivity. Clear the bits. */
fd1b3d38 2345 ctx->envflags &= ~GUSA_MASK;
4bfa602b
RH
2346 }
2347
fd1b3d38 2348 switch (ctx->base.is_jmp) {
34cf5678 2349 case DISAS_STOP:
fd1b3d38
EC
2350 gen_save_cpu_state(ctx, true);
2351 if (ctx->base.singlestep_enabled) {
34cf5678
RH
2352 gen_helper_debug(cpu_env);
2353 } else {
07ea28b4 2354 tcg_gen_exit_tb(NULL, 0);
34cf5678
RH
2355 }
2356 break;
2357 case DISAS_NEXT:
fd1b3d38
EC
2358 case DISAS_TOO_MANY:
2359 gen_save_cpu_state(ctx, false);
2360 gen_goto_tb(ctx, 0, ctx->base.pc_next);
34cf5678
RH
2361 break;
2362 case DISAS_NORETURN:
2363 break;
2364 default:
2365 g_assert_not_reached();
fdf9b3e8 2366 }
fd1b3d38 2367}
823029f9 2368
fd1b3d38
EC
2369static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2370{
2371 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2372 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2373}
0a7df5da 2374
fd1b3d38
EC
2375static const TranslatorOps sh4_tr_ops = {
2376 .init_disas_context = sh4_tr_init_disas_context,
2377 .tb_start = sh4_tr_tb_start,
2378 .insn_start = sh4_tr_insn_start,
2379 .breakpoint_check = sh4_tr_breakpoint_check,
2380 .translate_insn = sh4_tr_translate_insn,
2381 .tb_stop = sh4_tr_tb_stop,
2382 .disas_log = sh4_tr_disas_log,
2383};
2384
2385void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
2386{
2387 DisasContext ctx;
fdf9b3e8 2388
fd1b3d38 2389 translator_loop(&sh4_tr_ops, &ctx.base, cs, tb);
fdf9b3e8
FB
2390}
2391
bad729e2
RH
2392void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2393 target_ulong *data)
d2856f1a 2394{
bad729e2
RH
2395 env->pc = data[0];
2396 env->flags = data[1];
ac9707ea
AJ
2397 /* Theoretically delayed_pc should also be restored. In practice the
2398 branch instruction is re-executed after exception, so the delayed
2399 branch target will be recomputed. */
d2856f1a 2400}