]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
i386/cpu: Consolidate die-id validity in smp context
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
6faf2b6c 9 * version 2.1 of the License, or (at your option) any later version.
fdf9b3e8
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8
FB
19
20#define DEBUG_DISAS
fdf9b3e8 21
9d4c9946 22#include "qemu/osdep.h"
fdf9b3e8 23#include "cpu.h"
76cad711 24#include "disas/disas.h"
63c91552 25#include "exec/exec-all.h"
57fec1fe 26#include "tcg-op.h"
f08b6170 27#include "exec/cpu_ldst.h"
2ef6175a
RH
28#include "exec/helper-proto.h"
29#include "exec/helper-gen.h"
4834871b 30#include "exec/translator.h"
a7e30d84 31#include "trace-tcg.h"
508127e2 32#include "exec/log.h"
90c84c56 33#include "qemu/qemu-print.h"
a7e30d84
LV
34
35
fdf9b3e8 36typedef struct DisasContext {
6f1c2af6
RH
37 DisasContextBase base;
38
39 uint32_t tbflags; /* should stay unmodified during the TB translation */
40 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
fdf9b3e8 41 int memidx;
3a3bb8d2 42 int gbank;
5c13bad9 43 int fbank;
fdf9b3e8 44 uint32_t delayed_pc;
71968fa6 45 uint32_t features;
6f1c2af6
RH
46
47 uint16_t opcode;
48
49 bool has_movcal;
fdf9b3e8
FB
50} DisasContext;
51
fe25591e
AJ
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(ctx) 1
54#else
a6215749 55#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
fe25591e
AJ
56#endif
57
6f1c2af6 58/* Target-specific values for ctx->base.is_jmp. */
4834871b
RH
59/* We want to exit back to the cpu loop for some reason.
60 Usually this is to recognize interrupts immediately. */
61#define DISAS_STOP DISAS_TARGET_0
823029f9 62
1e8864f7 63/* global register indexes */
3a3bb8d2 64static TCGv cpu_gregs[32];
1d565b21
AJ
65static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
66static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 67static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
f85da308
RH
68static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
69static TCGv cpu_lock_addr, cpu_lock_value;
66ba317c 70static TCGv cpu_fregs[32];
1000822b
AJ
71
72/* internal register indexes */
47b9f4d5 73static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 74
022c62cb 75#include "exec/gen-icount.h"
2e70f6ef 76
aa7408ec 77void sh4_translate_init(void)
2e70f6ef 78{
1e8864f7 79 int i;
559dd74d 80 static const char * const gregnames[24] = {
1e8864f7
AJ
81 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 };
66ba317c
AJ
87 static const char * const fregnames[32] = {
88 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
89 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
90 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
93 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
94 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96 };
1e8864f7 97
3a3bb8d2 98 for (i = 0; i < 24; i++) {
e1ccc054 99 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 100 offsetof(CPUSH4State, gregs[i]),
66ba317c 101 gregnames[i]);
3a3bb8d2
RH
102 }
103 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 104
e1ccc054 105 cpu_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 106 offsetof(CPUSH4State, pc), "PC");
e1ccc054 107 cpu_sr = tcg_global_mem_new_i32(cpu_env,
73e5716c 108 offsetof(CPUSH4State, sr), "SR");
e1ccc054
RH
109 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
110 offsetof(CPUSH4State, sr_m), "SR_M");
111 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
112 offsetof(CPUSH4State, sr_q), "SR_Q");
113 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
114 offsetof(CPUSH4State, sr_t), "SR_T");
115 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
73e5716c 116 offsetof(CPUSH4State, ssr), "SSR");
e1ccc054 117 cpu_spc = tcg_global_mem_new_i32(cpu_env,
73e5716c 118 offsetof(CPUSH4State, spc), "SPC");
e1ccc054 119 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 120 offsetof(CPUSH4State, gbr), "GBR");
e1ccc054 121 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 122 offsetof(CPUSH4State, vbr), "VBR");
e1ccc054 123 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
73e5716c 124 offsetof(CPUSH4State, sgr), "SGR");
e1ccc054 125 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 126 offsetof(CPUSH4State, dbr), "DBR");
e1ccc054 127 cpu_mach = tcg_global_mem_new_i32(cpu_env,
73e5716c 128 offsetof(CPUSH4State, mach), "MACH");
e1ccc054 129 cpu_macl = tcg_global_mem_new_i32(cpu_env,
73e5716c 130 offsetof(CPUSH4State, macl), "MACL");
e1ccc054 131 cpu_pr = tcg_global_mem_new_i32(cpu_env,
73e5716c 132 offsetof(CPUSH4State, pr), "PR");
e1ccc054 133 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
73e5716c 134 offsetof(CPUSH4State, fpscr), "FPSCR");
e1ccc054 135 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
73e5716c 136 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 137
e1ccc054 138 cpu_flags = tcg_global_mem_new_i32(cpu_env,
73e5716c 139 offsetof(CPUSH4State, flags), "_flags_");
e1ccc054 140 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 141 offsetof(CPUSH4State, delayed_pc),
a7812ae4 142 "_delayed_pc_");
47b9f4d5
AJ
143 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
144 offsetof(CPUSH4State,
145 delayed_cond),
146 "_delayed_cond_");
f85da308
RH
147 cpu_lock_addr = tcg_global_mem_new_i32(cpu_env,
148 offsetof(CPUSH4State, lock_addr),
149 "_lock_addr_");
150 cpu_lock_value = tcg_global_mem_new_i32(cpu_env,
151 offsetof(CPUSH4State, lock_value),
152 "_lock_value_");
1000822b 153
66ba317c 154 for (i = 0; i < 32; i++)
e1ccc054 155 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 156 offsetof(CPUSH4State, fregs[i]),
66ba317c 157 fregnames[i]);
2e70f6ef
PB
158}
159
90c84c56 160void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
fdf9b3e8 161{
878096ee
AF
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
fdf9b3e8 164 int i;
90c84c56
MA
165
166 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169 env->spc, env->ssr, env->gbr, env->vbr);
170 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8 172 for (i = 0; i < 24; i += 4) {
90c84c56 173 qemu_printf("r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
fdf9b3e8
FB
174 i, env->gregs[i], i + 1, env->gregs[i + 1],
175 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
176 }
177 if (env->flags & DELAY_SLOT) {
90c84c56 178 qemu_printf("in delay slot (delayed_pc=0x%08x)\n",
fdf9b3e8
FB
179 env->delayed_pc);
180 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
90c84c56 181 qemu_printf("in conditional delay slot (delayed_pc=0x%08x)\n",
fdf9b3e8 182 env->delayed_pc);
be53081a 183 } else if (env->flags & DELAY_SLOT_RTE) {
90c84c56
MA
184 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
185 env->delayed_pc);
fdf9b3e8
FB
186 }
187}
188
34086945
AJ
189static void gen_read_sr(TCGv dst)
190{
1d565b21
AJ
191 TCGv t0 = tcg_temp_new();
192 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
193 tcg_gen_or_i32(dst, dst, t0);
194 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
195 tcg_gen_or_i32(dst, dst, t0);
196 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
197 tcg_gen_or_i32(dst, cpu_sr, t0);
198 tcg_temp_free_i32(t0);
34086945
AJ
199}
200
201static void gen_write_sr(TCGv src)
202{
1d565b21
AJ
203 tcg_gen_andi_i32(cpu_sr, src,
204 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
205 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
206 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
207 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
208}
209
ac9707ea
AJ
210static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
211{
212 if (save_pc) {
6f1c2af6 213 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
ac9707ea
AJ
214 }
215 if (ctx->delayed_pc != (uint32_t) -1) {
216 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
217 }
e1933d14 218 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
219 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
220 }
221}
222
ec2eb22e
RH
223static inline bool use_exit_tb(DisasContext *ctx)
224{
225 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
226}
227
90aa39a1 228static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 229{
ec2eb22e 230 /* Use a direct jump if in same page and singlestep not enabled */
6f1c2af6 231 if (unlikely(ctx->base.singlestep_enabled || use_exit_tb(ctx))) {
4bfa602b
RH
232 return false;
233 }
90aa39a1 234#ifndef CONFIG_USER_ONLY
6f1c2af6 235 return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
90aa39a1
SF
236#else
237 return true;
238#endif
239}
fdf9b3e8 240
90aa39a1
SF
241static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
242{
243 if (use_goto_tb(ctx, dest)) {
57fec1fe 244 tcg_gen_goto_tb(n);
3a8a44c4 245 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 246 tcg_gen_exit_tb(ctx->base.tb, n);
fdf9b3e8 247 } else {
3a8a44c4 248 tcg_gen_movi_i32(cpu_pc, dest);
6f1c2af6 249 if (ctx->base.singlestep_enabled) {
485d0035 250 gen_helper_debug(cpu_env);
ec2eb22e 251 } else if (use_exit_tb(ctx)) {
07ea28b4 252 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 253 } else {
7f11636d 254 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 255 }
fdf9b3e8 256 }
6f1c2af6 257 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
258}
259
fdf9b3e8
FB
260static void gen_jump(DisasContext * ctx)
261{
ec2eb22e 262 if (ctx->delayed_pc == -1) {
fdf9b3e8
FB
263 /* Target is not statically known, it comes necessarily from a
264 delayed jump as immediate jump are conditinal jumps */
1000822b 265 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 266 tcg_gen_discard_i32(cpu_delayed_pc);
6f1c2af6 267 if (ctx->base.singlestep_enabled) {
485d0035 268 gen_helper_debug(cpu_env);
ec2eb22e 269 } else if (use_exit_tb(ctx)) {
07ea28b4 270 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 271 } else {
7f11636d 272 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 273 }
6f1c2af6 274 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
275 } else {
276 gen_goto_tb(ctx, 0, ctx->delayed_pc);
277 }
278}
279
280/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
281static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
282 bool jump_if_true)
fdf9b3e8 283{
34086945 284 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
285 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
286
287 if (ctx->tbflags & GUSA_EXCLUSIVE) {
288 /* When in an exclusive region, we must continue to the end.
289 Therefore, exit the region on a taken branch, but otherwise
290 fall through to the next instruction. */
291 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
292 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
293 /* Note that this won't actually use a goto_tb opcode because we
294 disallow it in use_goto_tb, but it handles exit + singlestep. */
295 gen_goto_tb(ctx, 0, dest);
296 gen_set_label(l1);
5b38d026 297 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
298 return;
299 }
300
ac9707ea 301 gen_save_cpu_state(ctx, false);
4bfa602b
RH
302 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
303 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 304 gen_set_label(l1);
6f1c2af6
RH
305 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
306 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
307}
308
309/* Delayed conditional jump (bt or bf) */
310static void gen_delayed_conditional_jump(DisasContext * ctx)
311{
4bfa602b
RH
312 TCGLabel *l1 = gen_new_label();
313 TCGv ds = tcg_temp_new();
fdf9b3e8 314
47b9f4d5
AJ
315 tcg_gen_mov_i32(ds, cpu_delayed_cond);
316 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b
RH
317
318 if (ctx->tbflags & GUSA_EXCLUSIVE) {
319 /* When in an exclusive region, we must continue to the end.
320 Therefore, exit the region on a taken branch, but otherwise
321 fall through to the next instruction. */
322 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
323
324 /* Leave the gUSA region. */
325 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
326 gen_jump(ctx);
327
328 gen_set_label(l1);
6f1c2af6 329 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
330 return;
331 }
332
6f396c8f 333 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
6f1c2af6 334 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
fdf9b3e8 335 gen_set_label(l1);
9c2a9ea1 336 gen_jump(ctx);
fdf9b3e8
FB
337}
338
e5d8053e 339static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 340{
1e0b21d8
RH
341 /* We have already signaled illegal instruction for odd Dr. */
342 tcg_debug_assert((reg & 1) == 0);
343 reg ^= ctx->fbank;
66ba317c 344 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
345}
346
e5d8053e 347static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 348{
1e0b21d8
RH
349 /* We have already signaled illegal instruction for odd Dr. */
350 tcg_debug_assert((reg & 1) == 0);
351 reg ^= ctx->fbank;
58d2a9ae 352 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
353}
354
fdf9b3e8
FB
355#define B3_0 (ctx->opcode & 0xf)
356#define B6_4 ((ctx->opcode >> 4) & 0x7)
357#define B7_4 ((ctx->opcode >> 4) & 0xf)
358#define B7_0 (ctx->opcode & 0xff)
359#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
360#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
361 (ctx->opcode & 0xfff))
362#define B11_8 ((ctx->opcode >> 8) & 0xf)
363#define B15_12 ((ctx->opcode >> 12) & 0xf)
364
3a3bb8d2
RH
365#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
366#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 367#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 368
f09111e0 369#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 370
fdf9b3e8 371#define CHECK_NOT_DELAY_SLOT \
dec16c6e
RH
372 if (ctx->envflags & DELAY_SLOT_MASK) { \
373 goto do_illegal_slot; \
a6215749
AJ
374 }
375
6b98213d
RH
376#define CHECK_PRIVILEGED \
377 if (IS_USER(ctx)) { \
378 goto do_illegal; \
a6215749
AJ
379 }
380
dec4f042
RH
381#define CHECK_FPU_ENABLED \
382 if (ctx->tbflags & (1u << SR_FD)) { \
383 goto do_fpu_disabled; \
a6215749 384 }
d8299bcc 385
7e9f7ca8
RH
386#define CHECK_FPSCR_PR_0 \
387 if (ctx->tbflags & FPSCR_PR) { \
388 goto do_illegal; \
389 }
390
391#define CHECK_FPSCR_PR_1 \
392 if (!(ctx->tbflags & FPSCR_PR)) { \
393 goto do_illegal; \
394 }
395
ccae24d4
RH
396#define CHECK_SH4A \
397 if (!(ctx->features & SH_FEATURE_SH4A)) { \
398 goto do_illegal; \
399 }
400
b1d8e52e 401static void _decode_opc(DisasContext * ctx)
fdf9b3e8 402{
852d481f
EI
403 /* This code tries to make movcal emulation sufficiently
404 accurate for Linux purposes. This instruction writes
405 memory, and prior to that, always allocates a cache line.
406 It is used in two contexts:
407 - in memcpy, where data is copied in blocks, the first write
408 of to a block uses movca.l for performance.
409 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
410 to flush the cache. Here, the data written by movcal.l is never
411 written to memory, and the data written is just bogus.
412
413 To simulate this, we simulate movcal.l, we store the value to memory,
414 but we also remember the previous content. If we see ocbi, we check
415 if movcal.l for that address was done previously. If so, the write should
416 not have hit the memory, so we restore the previous content.
417 When we see an instruction that is neither movca.l
418 nor ocbi, the previous content is discarded.
419
420 To optimize, we only try to flush stores when we're at the start of
421 TB, or if we already saw movca.l in this TB and did not flush stores
422 yet. */
423 if (ctx->has_movcal)
424 {
425 int opcode = ctx->opcode & 0xf0ff;
426 if (opcode != 0x0093 /* ocbi */
427 && opcode != 0x00c3 /* movca.l */)
428 {
485d0035 429 gen_helper_discard_movcal_backup(cpu_env);
852d481f
EI
430 ctx->has_movcal = 0;
431 }
432 }
433
fdf9b3e8
FB
434#if 0
435 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
436#endif
f6198371 437
fdf9b3e8
FB
438 switch (ctx->opcode) {
439 case 0x0019: /* div0u */
1d565b21
AJ
440 tcg_gen_movi_i32(cpu_sr_m, 0);
441 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 442 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
443 return;
444 case 0x000b: /* rts */
1000822b
AJ
445 CHECK_NOT_DELAY_SLOT
446 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
a6215749 447 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
448 ctx->delayed_pc = (uint32_t) - 1;
449 return;
450 case 0x0028: /* clrmac */
3a8a44c4
AJ
451 tcg_gen_movi_i32(cpu_mach, 0);
452 tcg_gen_movi_i32(cpu_macl, 0);
fdf9b3e8
FB
453 return;
454 case 0x0048: /* clrs */
5ed9a259 455 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
fdf9b3e8
FB
456 return;
457 case 0x0008: /* clrt */
34086945 458 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
459 return;
460 case 0x0038: /* ldtlb */
fe25591e 461 CHECK_PRIVILEGED
485d0035 462 gen_helper_ldtlb(cpu_env);
fdf9b3e8 463 return;
c5e814b2 464 case 0x002b: /* rte */
fe25591e 465 CHECK_PRIVILEGED
1000822b 466 CHECK_NOT_DELAY_SLOT
34086945 467 gen_write_sr(cpu_ssr);
1000822b 468 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
be53081a 469 ctx->envflags |= DELAY_SLOT_RTE;
fdf9b3e8 470 ctx->delayed_pc = (uint32_t) - 1;
6f1c2af6 471 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8
FB
472 return;
473 case 0x0058: /* sets */
5ed9a259 474 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
fdf9b3e8
FB
475 return;
476 case 0x0018: /* sett */
34086945 477 tcg_gen_movi_i32(cpu_sr_t, 1);
fdf9b3e8 478 return;
24988dc2 479 case 0xfbfd: /* frchg */
61dedf2a 480 CHECK_FPSCR_PR_0
6f06939b 481 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
6f1c2af6 482 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 483 return;
24988dc2 484 case 0xf3fd: /* fschg */
61dedf2a 485 CHECK_FPSCR_PR_0
7a64244f 486 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
6f1c2af6 487 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 488 return;
907759f9
RH
489 case 0xf7fd: /* fpchg */
490 CHECK_SH4A
491 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
6f1c2af6 492 ctx->base.is_jmp = DISAS_STOP;
907759f9 493 return;
fdf9b3e8
FB
494 case 0x0009: /* nop */
495 return;
496 case 0x001b: /* sleep */
fe25591e 497 CHECK_PRIVILEGED
6f1c2af6 498 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
10127400 499 gen_helper_sleep(cpu_env);
fdf9b3e8
FB
500 return;
501 }
502
503 switch (ctx->opcode & 0xf000) {
504 case 0x1000: /* mov.l Rm,@(disp,Rn) */
c55497ec 505 {
a7812ae4 506 TCGv addr = tcg_temp_new();
c55497ec 507 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
3376f415 508 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
509 tcg_temp_free(addr);
510 }
fdf9b3e8
FB
511 return;
512 case 0x5000: /* mov.l @(disp,Rm),Rn */
c55497ec 513 {
a7812ae4 514 TCGv addr = tcg_temp_new();
c55497ec 515 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
3376f415 516 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
517 tcg_temp_free(addr);
518 }
fdf9b3e8 519 return;
24988dc2 520 case 0xe000: /* mov #imm,Rn */
4bfa602b
RH
521#ifdef CONFIG_USER_ONLY
522 /* Detect the start of a gUSA region. If so, update envflags
523 and end the TB. This will allow us to see the end of the
524 region (stored in R0) in the next TB. */
6f1c2af6
RH
525 if (B11_8 == 15 && B7_0s < 0 &&
526 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
4bfa602b 527 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
6f1c2af6 528 ctx->base.is_jmp = DISAS_STOP;
4bfa602b
RH
529 }
530#endif
7efbe241 531 tcg_gen_movi_i32(REG(B11_8), B7_0s);
fdf9b3e8
FB
532 return;
533 case 0x9000: /* mov.w @(disp,PC),Rn */
c55497ec 534 {
6f1c2af6 535 TCGv addr = tcg_const_i32(ctx->base.pc_next + 4 + B7_0 * 2);
3376f415 536 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
537 tcg_temp_free(addr);
538 }
fdf9b3e8
FB
539 return;
540 case 0xd000: /* mov.l @(disp,PC),Rn */
c55497ec 541 {
6f1c2af6 542 TCGv addr = tcg_const_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
3376f415 543 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
544 tcg_temp_free(addr);
545 }
fdf9b3e8 546 return;
24988dc2 547 case 0x7000: /* add #imm,Rn */
7efbe241 548 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
fdf9b3e8
FB
549 return;
550 case 0xa000: /* bra disp */
551 CHECK_NOT_DELAY_SLOT
6f1c2af6 552 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
a6215749 553 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
554 return;
555 case 0xb000: /* bsr disp */
556 CHECK_NOT_DELAY_SLOT
6f1c2af6
RH
557 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
558 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
a6215749 559 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
560 return;
561 }
562
563 switch (ctx->opcode & 0xf00f) {
564 case 0x6003: /* mov Rm,Rn */
7efbe241 565 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
566 return;
567 case 0x2000: /* mov.b Rm,@Rn */
3376f415 568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
fdf9b3e8
FB
569 return;
570 case 0x2001: /* mov.w Rm,@Rn */
3376f415 571 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
fdf9b3e8
FB
572 return;
573 case 0x2002: /* mov.l Rm,@Rn */
3376f415 574 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
fdf9b3e8
FB
575 return;
576 case 0x6000: /* mov.b @Rm,Rn */
3376f415 577 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
fdf9b3e8
FB
578 return;
579 case 0x6001: /* mov.w @Rm,Rn */
3376f415 580 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
fdf9b3e8
FB
581 return;
582 case 0x6002: /* mov.l @Rm,Rn */
3376f415 583 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
fdf9b3e8
FB
584 return;
585 case 0x2004: /* mov.b Rm,@-Rn */
c55497ec 586 {
a7812ae4 587 TCGv addr = tcg_temp_new();
c55497ec 588 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
589 /* might cause re-execution */
590 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
3101e99c 591 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
c55497ec
AJ
592 tcg_temp_free(addr);
593 }
fdf9b3e8
FB
594 return;
595 case 0x2005: /* mov.w Rm,@-Rn */
c55497ec 596 {
a7812ae4 597 TCGv addr = tcg_temp_new();
c55497ec 598 tcg_gen_subi_i32(addr, REG(B11_8), 2);
3376f415 599 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
3101e99c 600 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
601 tcg_temp_free(addr);
602 }
fdf9b3e8
FB
603 return;
604 case 0x2006: /* mov.l Rm,@-Rn */
c55497ec 605 {
a7812ae4 606 TCGv addr = tcg_temp_new();
c55497ec 607 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 608 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
3101e99c 609 tcg_gen_mov_i32(REG(B11_8), addr);
e691e0ed 610 tcg_temp_free(addr);
c55497ec 611 }
fdf9b3e8 612 return;
eda9b09b 613 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 614 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
24988dc2 615 if ( B11_8 != B7_4 )
7efbe241 616 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
fdf9b3e8
FB
617 return;
618 case 0x6005: /* mov.w @Rm+,Rn */
3376f415 619 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
24988dc2 620 if ( B11_8 != B7_4 )
7efbe241 621 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
fdf9b3e8
FB
622 return;
623 case 0x6006: /* mov.l @Rm+,Rn */
3376f415 624 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
24988dc2 625 if ( B11_8 != B7_4 )
7efbe241 626 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
fdf9b3e8
FB
627 return;
628 case 0x0004: /* mov.b Rm,@(R0,Rn) */
c55497ec 629 {
a7812ae4 630 TCGv addr = tcg_temp_new();
c55497ec 631 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 632 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
c55497ec
AJ
633 tcg_temp_free(addr);
634 }
fdf9b3e8
FB
635 return;
636 case 0x0005: /* mov.w Rm,@(R0,Rn) */
c55497ec 637 {
a7812ae4 638 TCGv addr = tcg_temp_new();
c55497ec 639 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 640 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
641 tcg_temp_free(addr);
642 }
fdf9b3e8
FB
643 return;
644 case 0x0006: /* mov.l Rm,@(R0,Rn) */
c55497ec 645 {
a7812ae4 646 TCGv addr = tcg_temp_new();
c55497ec 647 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 648 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
649 tcg_temp_free(addr);
650 }
fdf9b3e8
FB
651 return;
652 case 0x000c: /* mov.b @(R0,Rm),Rn */
c55497ec 653 {
a7812ae4 654 TCGv addr = tcg_temp_new();
c55497ec 655 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 656 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
c55497ec
AJ
657 tcg_temp_free(addr);
658 }
fdf9b3e8
FB
659 return;
660 case 0x000d: /* mov.w @(R0,Rm),Rn */
c55497ec 661 {
a7812ae4 662 TCGv addr = tcg_temp_new();
c55497ec 663 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 664 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
665 tcg_temp_free(addr);
666 }
fdf9b3e8
FB
667 return;
668 case 0x000e: /* mov.l @(R0,Rm),Rn */
c55497ec 669 {
a7812ae4 670 TCGv addr = tcg_temp_new();
c55497ec 671 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 672 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
673 tcg_temp_free(addr);
674 }
fdf9b3e8
FB
675 return;
676 case 0x6008: /* swap.b Rm,Rn */
c55497ec 677 {
3c254ab8 678 TCGv low = tcg_temp_new();
3101e99c
AJ
679 tcg_gen_ext16u_i32(low, REG(B7_4));
680 tcg_gen_bswap16_i32(low, low);
218fd730 681 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
c55497ec 682 tcg_temp_free(low);
c55497ec 683 }
fdf9b3e8
FB
684 return;
685 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 686 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
fdf9b3e8
FB
687 return;
688 case 0x200d: /* xtrct Rm,Rn */
c55497ec
AJ
689 {
690 TCGv high, low;
a7812ae4 691 high = tcg_temp_new();
3101e99c 692 tcg_gen_shli_i32(high, REG(B7_4), 16);
a7812ae4 693 low = tcg_temp_new();
c55497ec 694 tcg_gen_shri_i32(low, REG(B11_8), 16);
c55497ec
AJ
695 tcg_gen_or_i32(REG(B11_8), high, low);
696 tcg_temp_free(low);
697 tcg_temp_free(high);
698 }
fdf9b3e8
FB
699 return;
700 case 0x300c: /* add Rm,Rn */
7efbe241 701 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
702 return;
703 case 0x300e: /* addc Rm,Rn */
22b88fd7 704 {
34086945 705 TCGv t0, t1;
a2368e01 706 t0 = tcg_const_tl(0);
22b88fd7 707 t1 = tcg_temp_new();
a2368e01
AJ
708 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
709 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
710 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 711 tcg_temp_free(t0);
34086945 712 tcg_temp_free(t1);
22b88fd7 713 }
fdf9b3e8
FB
714 return;
715 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
716 {
717 TCGv t0, t1, t2;
718 t0 = tcg_temp_new();
719 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
720 t1 = tcg_temp_new();
721 tcg_gen_xor_i32(t1, t0, REG(B11_8));
722 t2 = tcg_temp_new();
723 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 724 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
ad8d25a1 725 tcg_temp_free(t2);
34086945 726 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1
AJ
727 tcg_temp_free(t1);
728 tcg_gen_mov_i32(REG(B7_4), t0);
729 tcg_temp_free(t0);
730 }
fdf9b3e8
FB
731 return;
732 case 0x2009: /* and Rm,Rn */
7efbe241 733 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
734 return;
735 case 0x3000: /* cmp/eq Rm,Rn */
34086945 736 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
737 return;
738 case 0x3003: /* cmp/ge Rm,Rn */
34086945 739 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
740 return;
741 case 0x3007: /* cmp/gt Rm,Rn */
34086945 742 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
743 return;
744 case 0x3006: /* cmp/hi Rm,Rn */
34086945 745 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
746 return;
747 case 0x3002: /* cmp/hs Rm,Rn */
34086945 748 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
749 return;
750 case 0x200c: /* cmp/str Rm,Rn */
69d6275b 751 {
c5c19137
AJ
752 TCGv cmp1 = tcg_temp_new();
753 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
754 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
755 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
756 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
757 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
758 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
c55497ec
AJ
759 tcg_temp_free(cmp2);
760 tcg_temp_free(cmp1);
69d6275b 761 }
fdf9b3e8
FB
762 return;
763 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
764 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
765 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
766 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
fdf9b3e8
FB
767 return;
768 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
769 {
770 TCGv t0 = tcg_temp_new();
771 TCGv t1 = tcg_temp_new();
772 TCGv t2 = tcg_temp_new();
773 TCGv zero = tcg_const_i32(0);
774
775 /* shift left arg1, saving the bit being pushed out and inserting
776 T on the right */
777 tcg_gen_shri_i32(t0, REG(B11_8), 31);
778 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
779 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
780
781 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
782 using 64-bit temps, we compute arg0's high part from q ^ m, so
783 that it is 0x00000000 when adding the value or 0xffffffff when
784 subtracting it. */
785 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
786 tcg_gen_subi_i32(t1, t1, 1);
787 tcg_gen_neg_i32(t2, REG(B7_4));
788 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
789 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
790
791 /* compute T and Q depending on carry */
792 tcg_gen_andi_i32(t1, t1, 1);
793 tcg_gen_xor_i32(t1, t1, t0);
794 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
795 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
796
797 tcg_temp_free(zero);
798 tcg_temp_free(t2);
799 tcg_temp_free(t1);
800 tcg_temp_free(t0);
801 }
fdf9b3e8
FB
802 return;
803 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 804 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
805 return;
806 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 807 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
808 return;
809 case 0x600e: /* exts.b Rm,Rn */
7efbe241 810 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
811 return;
812 case 0x600f: /* exts.w Rm,Rn */
7efbe241 813 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
814 return;
815 case 0x600c: /* extu.b Rm,Rn */
7efbe241 816 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
817 return;
818 case 0x600d: /* extu.w Rm,Rn */
7efbe241 819 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8 820 return;
24988dc2 821 case 0x000f: /* mac.l @Rm+,@Rn+ */
c55497ec
AJ
822 {
823 TCGv arg0, arg1;
a7812ae4 824 arg0 = tcg_temp_new();
3376f415 825 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 826 arg1 = tcg_temp_new();
3376f415 827 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 828 gen_helper_macl(cpu_env, arg0, arg1);
c55497ec
AJ
829 tcg_temp_free(arg1);
830 tcg_temp_free(arg0);
831 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
832 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
833 }
fdf9b3e8
FB
834 return;
835 case 0x400f: /* mac.w @Rm+,@Rn+ */
c55497ec
AJ
836 {
837 TCGv arg0, arg1;
a7812ae4 838 arg0 = tcg_temp_new();
3376f415 839 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 840 arg1 = tcg_temp_new();
3376f415 841 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 842 gen_helper_macw(cpu_env, arg0, arg1);
c55497ec
AJ
843 tcg_temp_free(arg1);
844 tcg_temp_free(arg0);
845 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
846 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
847 }
fdf9b3e8
FB
848 return;
849 case 0x0007: /* mul.l Rm,Rn */
7efbe241 850 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
851 return;
852 case 0x200f: /* muls.w Rm,Rn */
c55497ec
AJ
853 {
854 TCGv arg0, arg1;
a7812ae4 855 arg0 = tcg_temp_new();
c55497ec 856 tcg_gen_ext16s_i32(arg0, REG(B7_4));
a7812ae4 857 arg1 = tcg_temp_new();
c55497ec
AJ
858 tcg_gen_ext16s_i32(arg1, REG(B11_8));
859 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
860 tcg_temp_free(arg1);
861 tcg_temp_free(arg0);
862 }
fdf9b3e8
FB
863 return;
864 case 0x200e: /* mulu.w Rm,Rn */
c55497ec
AJ
865 {
866 TCGv arg0, arg1;
a7812ae4 867 arg0 = tcg_temp_new();
c55497ec 868 tcg_gen_ext16u_i32(arg0, REG(B7_4));
a7812ae4 869 arg1 = tcg_temp_new();
c55497ec
AJ
870 tcg_gen_ext16u_i32(arg1, REG(B11_8));
871 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
872 tcg_temp_free(arg1);
873 tcg_temp_free(arg0);
874 }
fdf9b3e8
FB
875 return;
876 case 0x600b: /* neg Rm,Rn */
7efbe241 877 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
878 return;
879 case 0x600a: /* negc Rm,Rn */
b2d9eda5 880 {
60eb27fe
AJ
881 TCGv t0 = tcg_const_i32(0);
882 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
883 REG(B7_4), t0, cpu_sr_t, t0);
884 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
885 t0, t0, REG(B11_8), cpu_sr_t);
886 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 887 tcg_temp_free(t0);
b2d9eda5 888 }
fdf9b3e8
FB
889 return;
890 case 0x6007: /* not Rm,Rn */
7efbe241 891 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
892 return;
893 case 0x200b: /* or Rm,Rn */
7efbe241 894 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
895 return;
896 case 0x400c: /* shad Rm,Rn */
69d6275b 897 {
be654c83
AJ
898 TCGv t0 = tcg_temp_new();
899 TCGv t1 = tcg_temp_new();
900 TCGv t2 = tcg_temp_new();
901
902 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
903
904 /* positive case: shift to the left */
905 tcg_gen_shl_i32(t1, REG(B11_8), t0);
906
907 /* negative case: shift to the right in two steps to
908 correctly handle the -32 case */
909 tcg_gen_xori_i32(t0, t0, 0x1f);
910 tcg_gen_sar_i32(t2, REG(B11_8), t0);
911 tcg_gen_sari_i32(t2, t2, 1);
912
913 /* select between the two cases */
914 tcg_gen_movi_i32(t0, 0);
915 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
916
917 tcg_temp_free(t0);
918 tcg_temp_free(t1);
919 tcg_temp_free(t2);
69d6275b 920 }
fdf9b3e8
FB
921 return;
922 case 0x400d: /* shld Rm,Rn */
69d6275b 923 {
57760161
AJ
924 TCGv t0 = tcg_temp_new();
925 TCGv t1 = tcg_temp_new();
926 TCGv t2 = tcg_temp_new();
927
928 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
929
930 /* positive case: shift to the left */
931 tcg_gen_shl_i32(t1, REG(B11_8), t0);
932
933 /* negative case: shift to the right in two steps to
934 correctly handle the -32 case */
935 tcg_gen_xori_i32(t0, t0, 0x1f);
936 tcg_gen_shr_i32(t2, REG(B11_8), t0);
937 tcg_gen_shri_i32(t2, t2, 1);
938
939 /* select between the two cases */
940 tcg_gen_movi_i32(t0, 0);
941 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
942
943 tcg_temp_free(t0);
944 tcg_temp_free(t1);
945 tcg_temp_free(t2);
69d6275b 946 }
fdf9b3e8
FB
947 return;
948 case 0x3008: /* sub Rm,Rn */
7efbe241 949 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
950 return;
951 case 0x300a: /* subc Rm,Rn */
22b88fd7 952 {
d0f44a55
AJ
953 TCGv t0, t1;
954 t0 = tcg_const_tl(0);
22b88fd7 955 t1 = tcg_temp_new();
d0f44a55
AJ
956 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
957 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
958 REG(B11_8), t0, t1, cpu_sr_t);
959 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 960 tcg_temp_free(t0);
d0f44a55 961 tcg_temp_free(t1);
22b88fd7 962 }
fdf9b3e8
FB
963 return;
964 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
965 {
966 TCGv t0, t1, t2;
967 t0 = tcg_temp_new();
968 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
969 t1 = tcg_temp_new();
970 tcg_gen_xor_i32(t1, t0, REG(B7_4));
971 t2 = tcg_temp_new();
972 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
973 tcg_gen_and_i32(t1, t1, t2);
974 tcg_temp_free(t2);
34086945 975 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1
AJ
976 tcg_temp_free(t1);
977 tcg_gen_mov_i32(REG(B11_8), t0);
978 tcg_temp_free(t0);
979 }
fdf9b3e8
FB
980 return;
981 case 0x2008: /* tst Rm,Rn */
c55497ec 982 {
a7812ae4 983 TCGv val = tcg_temp_new();
c55497ec 984 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 985 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
986 tcg_temp_free(val);
987 }
fdf9b3e8
FB
988 return;
989 case 0x200a: /* xor Rm,Rn */
7efbe241 990 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8 991 return;
e67888a7 992 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 993 CHECK_FPU_ENABLED
a6215749 994 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
995 int xsrc = XHACK(B7_4);
996 int xdst = XHACK(B11_8);
997 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
998 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
eda9b09b 999 } else {
7c9f7038 1000 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
eda9b09b
FB
1001 }
1002 return;
e67888a7 1003 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
f6198371 1004 CHECK_FPU_ENABLED
a6215749 1005 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1006 TCGv_i64 fp = tcg_temp_new_i64();
1007 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1008 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1009 tcg_temp_free_i64(fp);
eda9b09b 1010 } else {
7c9f7038 1011 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
eda9b09b
FB
1012 }
1013 return;
e67888a7 1014 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1015 CHECK_FPU_ENABLED
a6215749 1016 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1017 TCGv_i64 fp = tcg_temp_new_i64();
1018 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1019 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1020 tcg_temp_free_i64(fp);
eda9b09b 1021 } else {
7c9f7038 1022 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
eda9b09b
FB
1023 }
1024 return;
e67888a7 1025 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1026 CHECK_FPU_ENABLED
a6215749 1027 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1028 TCGv_i64 fp = tcg_temp_new_i64();
1029 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1030 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1031 tcg_temp_free_i64(fp);
1032 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
eda9b09b 1033 } else {
7c9f7038 1034 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
cc4ba6a9 1035 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
eda9b09b
FB
1036 }
1037 return;
e67888a7 1038 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
f6198371 1039 CHECK_FPU_ENABLED
4d57fa50
RH
1040 {
1041 TCGv addr = tcg_temp_new_i32();
1042 if (ctx->tbflags & FPSCR_SZ) {
1043 TCGv_i64 fp = tcg_temp_new_i64();
1044 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1045 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1046 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1047 tcg_temp_free_i64(fp);
1048 } else {
1049 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1050 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1051 }
1052 tcg_gen_mov_i32(REG(B11_8), addr);
1053 tcg_temp_free(addr);
1054 }
eda9b09b 1055 return;
e67888a7 1056 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
f6198371 1057 CHECK_FPU_ENABLED
cc4ba6a9 1058 {
a7812ae4 1059 TCGv addr = tcg_temp_new_i32();
cc4ba6a9 1060 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1061 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1062 TCGv_i64 fp = tcg_temp_new_i64();
1063 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1064 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1065 tcg_temp_free_i64(fp);
cc4ba6a9 1066 } else {
7c9f7038 1067 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1068 }
1069 tcg_temp_free(addr);
eda9b09b
FB
1070 }
1071 return;
e67888a7 1072 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
f6198371 1073 CHECK_FPU_ENABLED
cc4ba6a9 1074 {
a7812ae4 1075 TCGv addr = tcg_temp_new();
cc4ba6a9 1076 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1077 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1078 TCGv_i64 fp = tcg_temp_new_i64();
1079 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1080 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1081 tcg_temp_free_i64(fp);
cc4ba6a9 1082 } else {
7c9f7038 1083 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1084 }
1085 tcg_temp_free(addr);
eda9b09b
FB
1086 }
1087 return;
e67888a7
TS
1088 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1089 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1090 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1091 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1092 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1093 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
cc4ba6a9 1094 {
f6198371 1095 CHECK_FPU_ENABLED
a6215749 1096 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1097 TCGv_i64 fp0, fp1;
1098
93dc9c89
RH
1099 if (ctx->opcode & 0x0110) {
1100 goto do_illegal;
1101 }
a7812ae4
PB
1102 fp0 = tcg_temp_new_i64();
1103 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1104 gen_load_fpr64(ctx, fp0, B11_8);
1105 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4
PB
1106 switch (ctx->opcode & 0xf00f) {
1107 case 0xf000: /* fadd Rm,Rn */
485d0035 1108 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1109 break;
1110 case 0xf001: /* fsub Rm,Rn */
485d0035 1111 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1112 break;
1113 case 0xf002: /* fmul Rm,Rn */
485d0035 1114 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1115 break;
1116 case 0xf003: /* fdiv Rm,Rn */
485d0035 1117 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1118 break;
1119 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1120 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1121 return;
1122 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1123 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1124 return;
1125 }
1e0b21d8 1126 gen_store_fpr64(ctx, fp0, B11_8);
a7812ae4
PB
1127 tcg_temp_free_i64(fp0);
1128 tcg_temp_free_i64(fp1);
1129 } else {
a7812ae4
PB
1130 switch (ctx->opcode & 0xf00f) {
1131 case 0xf000: /* fadd Rm,Rn */
7c9f7038
RH
1132 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1133 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1134 break;
1135 case 0xf001: /* fsub Rm,Rn */
7c9f7038
RH
1136 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1137 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1138 break;
1139 case 0xf002: /* fmul Rm,Rn */
7c9f7038
RH
1140 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1141 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1142 break;
1143 case 0xf003: /* fdiv Rm,Rn */
7c9f7038
RH
1144 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1145 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1146 break;
1147 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1148 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
7c9f7038 1149 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1150 return;
1151 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1152 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
7c9f7038 1153 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1154 return;
1155 }
cc4ba6a9 1156 }
ea6cf6be
TS
1157 }
1158 return;
5b7141a1 1159 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1160 CHECK_FPU_ENABLED
1161 CHECK_FPSCR_PR_0
1162 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1163 FREG(0), FREG(B7_4), FREG(B11_8));
1164 return;
fdf9b3e8
FB
1165 }
1166
1167 switch (ctx->opcode & 0xff00) {
1168 case 0xc900: /* and #imm,R0 */
7efbe241 1169 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1170 return;
24988dc2 1171 case 0xcd00: /* and.b #imm,@(R0,GBR) */
c55497ec
AJ
1172 {
1173 TCGv addr, val;
a7812ae4 1174 addr = tcg_temp_new();
c55497ec 1175 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1176 val = tcg_temp_new();
3376f415 1177 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1178 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1179 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1180 tcg_temp_free(val);
1181 tcg_temp_free(addr);
1182 }
fdf9b3e8
FB
1183 return;
1184 case 0x8b00: /* bf label */
1185 CHECK_NOT_DELAY_SLOT
6f1c2af6 1186 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
fdf9b3e8
FB
1187 return;
1188 case 0x8f00: /* bf/s label */
1189 CHECK_NOT_DELAY_SLOT
ac9707ea 1190 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
6f1c2af6 1191 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
a6215749 1192 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
fdf9b3e8
FB
1193 return;
1194 case 0x8900: /* bt label */
1195 CHECK_NOT_DELAY_SLOT
6f1c2af6 1196 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
fdf9b3e8
FB
1197 return;
1198 case 0x8d00: /* bt/s label */
1199 CHECK_NOT_DELAY_SLOT
ac9707ea 1200 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
6f1c2af6 1201 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
a6215749 1202 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
fdf9b3e8
FB
1203 return;
1204 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1205 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
fdf9b3e8
FB
1206 return;
1207 case 0xc400: /* mov.b @(disp,GBR),R0 */
c55497ec 1208 {
a7812ae4 1209 TCGv addr = tcg_temp_new();
c55497ec 1210 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1211 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1212 tcg_temp_free(addr);
1213 }
fdf9b3e8
FB
1214 return;
1215 case 0xc500: /* mov.w @(disp,GBR),R0 */
c55497ec 1216 {
a7812ae4 1217 TCGv addr = tcg_temp_new();
c55497ec 1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1219 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1220 tcg_temp_free(addr);
1221 }
fdf9b3e8
FB
1222 return;
1223 case 0xc600: /* mov.l @(disp,GBR),R0 */
c55497ec 1224 {
a7812ae4 1225 TCGv addr = tcg_temp_new();
c55497ec 1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1227 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
1228 tcg_temp_free(addr);
1229 }
fdf9b3e8
FB
1230 return;
1231 case 0xc000: /* mov.b R0,@(disp,GBR) */
c55497ec 1232 {
a7812ae4 1233 TCGv addr = tcg_temp_new();
c55497ec 1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1235 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1236 tcg_temp_free(addr);
1237 }
fdf9b3e8
FB
1238 return;
1239 case 0xc100: /* mov.w R0,@(disp,GBR) */
c55497ec 1240 {
a7812ae4 1241 TCGv addr = tcg_temp_new();
c55497ec 1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1243 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1244 tcg_temp_free(addr);
1245 }
fdf9b3e8
FB
1246 return;
1247 case 0xc200: /* mov.l R0,@(disp,GBR) */
c55497ec 1248 {
a7812ae4 1249 TCGv addr = tcg_temp_new();
c55497ec 1250 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1251 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
1252 tcg_temp_free(addr);
1253 }
fdf9b3e8
FB
1254 return;
1255 case 0x8000: /* mov.b R0,@(disp,Rn) */
c55497ec 1256 {
a7812ae4 1257 TCGv addr = tcg_temp_new();
c55497ec 1258 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1259 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1260 tcg_temp_free(addr);
1261 }
fdf9b3e8
FB
1262 return;
1263 case 0x8100: /* mov.w R0,@(disp,Rn) */
c55497ec 1264 {
a7812ae4 1265 TCGv addr = tcg_temp_new();
c55497ec 1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
3376f415 1267 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1268 tcg_temp_free(addr);
1269 }
fdf9b3e8
FB
1270 return;
1271 case 0x8400: /* mov.b @(disp,Rn),R0 */
c55497ec 1272 {
a7812ae4 1273 TCGv addr = tcg_temp_new();
c55497ec 1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1275 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1276 tcg_temp_free(addr);
1277 }
fdf9b3e8
FB
1278 return;
1279 case 0x8500: /* mov.w @(disp,Rn),R0 */
c55497ec 1280 {
a7812ae4 1281 TCGv addr = tcg_temp_new();
c55497ec 1282 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
3376f415 1283 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1284 tcg_temp_free(addr);
1285 }
fdf9b3e8
FB
1286 return;
1287 case 0xc700: /* mova @(disp,PC),R0 */
6f1c2af6
RH
1288 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1289 4 + B7_0 * 4) & ~3);
fdf9b3e8
FB
1290 return;
1291 case 0xcb00: /* or #imm,R0 */
7efbe241 1292 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1293 return;
24988dc2 1294 case 0xcf00: /* or.b #imm,@(R0,GBR) */
c55497ec
AJ
1295 {
1296 TCGv addr, val;
a7812ae4 1297 addr = tcg_temp_new();
c55497ec 1298 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1299 val = tcg_temp_new();
3376f415 1300 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1301 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1302 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1303 tcg_temp_free(val);
1304 tcg_temp_free(addr);
1305 }
fdf9b3e8
FB
1306 return;
1307 case 0xc300: /* trapa #imm */
c55497ec
AJ
1308 {
1309 TCGv imm;
1310 CHECK_NOT_DELAY_SLOT
ac9707ea 1311 gen_save_cpu_state(ctx, true);
c55497ec 1312 imm = tcg_const_i32(B7_0);
485d0035 1313 gen_helper_trapa(cpu_env, imm);
c55497ec 1314 tcg_temp_free(imm);
6f1c2af6 1315 ctx->base.is_jmp = DISAS_NORETURN;
c55497ec 1316 }
fdf9b3e8
FB
1317 return;
1318 case 0xc800: /* tst #imm,R0 */
c55497ec 1319 {
a7812ae4 1320 TCGv val = tcg_temp_new();
c55497ec 1321 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1322 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1323 tcg_temp_free(val);
1324 }
fdf9b3e8 1325 return;
24988dc2 1326 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
c55497ec 1327 {
a7812ae4 1328 TCGv val = tcg_temp_new();
c55497ec 1329 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1330 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
c55497ec 1331 tcg_gen_andi_i32(val, val, B7_0);
34086945 1332 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1333 tcg_temp_free(val);
1334 }
fdf9b3e8
FB
1335 return;
1336 case 0xca00: /* xor #imm,R0 */
7efbe241 1337 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1338 return;
24988dc2 1339 case 0xce00: /* xor.b #imm,@(R0,GBR) */
c55497ec
AJ
1340 {
1341 TCGv addr, val;
a7812ae4 1342 addr = tcg_temp_new();
c55497ec 1343 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1344 val = tcg_temp_new();
3376f415 1345 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1346 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1347 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1348 tcg_temp_free(val);
1349 tcg_temp_free(addr);
1350 }
fdf9b3e8
FB
1351 return;
1352 }
1353
1354 switch (ctx->opcode & 0xf08f) {
1355 case 0x408e: /* ldc Rm,Rn_BANK */
fe25591e 1356 CHECK_PRIVILEGED
7efbe241 1357 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
fdf9b3e8
FB
1358 return;
1359 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
fe25591e 1360 CHECK_PRIVILEGED
3376f415 1361 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
7efbe241 1362 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
fdf9b3e8
FB
1363 return;
1364 case 0x0082: /* stc Rm_BANK,Rn */
fe25591e 1365 CHECK_PRIVILEGED
7efbe241 1366 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
fdf9b3e8
FB
1367 return;
1368 case 0x4083: /* stc.l Rm_BANK,@-Rn */
fe25591e 1369 CHECK_PRIVILEGED
c55497ec 1370 {
a7812ae4 1371 TCGv addr = tcg_temp_new();
c55497ec 1372 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1373 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
3101e99c 1374 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1375 tcg_temp_free(addr);
c55497ec 1376 }
fdf9b3e8
FB
1377 return;
1378 }
1379
1380 switch (ctx->opcode & 0xf0ff) {
1381 case 0x0023: /* braf Rn */
7efbe241 1382 CHECK_NOT_DELAY_SLOT
6f1c2af6 1383 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
a6215749 1384 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1385 ctx->delayed_pc = (uint32_t) - 1;
1386 return;
1387 case 0x0003: /* bsrf Rn */
7efbe241 1388 CHECK_NOT_DELAY_SLOT
6f1c2af6 1389 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1390 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
a6215749 1391 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1392 ctx->delayed_pc = (uint32_t) - 1;
1393 return;
1394 case 0x4015: /* cmp/pl Rn */
34086945 1395 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1396 return;
1397 case 0x4011: /* cmp/pz Rn */
34086945 1398 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1399 return;
1400 case 0x4010: /* dt Rn */
7efbe241 1401 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1402 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1403 return;
1404 case 0x402b: /* jmp @Rn */
7efbe241
AJ
1405 CHECK_NOT_DELAY_SLOT
1406 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
a6215749 1407 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1408 ctx->delayed_pc = (uint32_t) - 1;
1409 return;
1410 case 0x400b: /* jsr @Rn */
7efbe241 1411 CHECK_NOT_DELAY_SLOT
6f1c2af6 1412 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1413 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
a6215749 1414 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1415 ctx->delayed_pc = (uint32_t) - 1;
1416 return;
fe25591e
AJ
1417 case 0x400e: /* ldc Rm,SR */
1418 CHECK_PRIVILEGED
34086945
AJ
1419 {
1420 TCGv val = tcg_temp_new();
1421 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1422 gen_write_sr(val);
1423 tcg_temp_free(val);
6f1c2af6 1424 ctx->base.is_jmp = DISAS_STOP;
34086945 1425 }
390af821 1426 return;
fe25591e
AJ
1427 case 0x4007: /* ldc.l @Rm+,SR */
1428 CHECK_PRIVILEGED
c55497ec 1429 {
a7812ae4 1430 TCGv val = tcg_temp_new();
3376f415 1431 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
34086945
AJ
1432 tcg_gen_andi_i32(val, val, 0x700083f3);
1433 gen_write_sr(val);
c55497ec
AJ
1434 tcg_temp_free(val);
1435 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
6f1c2af6 1436 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1437 }
390af821 1438 return;
fe25591e
AJ
1439 case 0x0002: /* stc SR,Rn */
1440 CHECK_PRIVILEGED
34086945 1441 gen_read_sr(REG(B11_8));
390af821 1442 return;
fe25591e
AJ
1443 case 0x4003: /* stc SR,@-Rn */
1444 CHECK_PRIVILEGED
c55497ec 1445 {
a7812ae4 1446 TCGv addr = tcg_temp_new();
34086945 1447 TCGv val = tcg_temp_new();
c55497ec 1448 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945
AJ
1449 gen_read_sr(val);
1450 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1451 tcg_gen_mov_i32(REG(B11_8), addr);
34086945 1452 tcg_temp_free(val);
c55497ec 1453 tcg_temp_free(addr);
c55497ec 1454 }
390af821 1455 return;
8e9b0678 1456#define LD(reg,ldnum,ldpnum,prechk) \
fdf9b3e8 1457 case ldnum: \
fe25591e 1458 prechk \
7efbe241 1459 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
fdf9b3e8
FB
1460 return; \
1461 case ldpnum: \
fe25591e 1462 prechk \
3376f415 1463 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
7efbe241 1464 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678
AC
1465 return;
1466#define ST(reg,stnum,stpnum,prechk) \
fdf9b3e8 1467 case stnum: \
fe25591e 1468 prechk \
7efbe241 1469 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
fdf9b3e8
FB
1470 return; \
1471 case stpnum: \
fe25591e 1472 prechk \
c55497ec 1473 { \
3101e99c 1474 TCGv addr = tcg_temp_new(); \
c55497ec 1475 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
3376f415 1476 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
3101e99c 1477 tcg_gen_mov_i32(REG(B11_8), addr); \
c55497ec 1478 tcg_temp_free(addr); \
86e0abc7 1479 } \
fdf9b3e8 1480 return;
8e9b0678
AC
1481#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1482 LD(reg,ldnum,ldpnum,prechk) \
1483 ST(reg,stnum,stpnum,prechk)
fe25591e
AJ
1484 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1485 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1486 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1487 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
935fc175 1488 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1489 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
fe25591e
AJ
1490 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1491 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1492 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1493 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
d8299bcc 1494 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
390af821 1495 case 0x406a: /* lds Rm,FPSCR */
d8299bcc 1496 CHECK_FPU_ENABLED
485d0035 1497 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
6f1c2af6 1498 ctx->base.is_jmp = DISAS_STOP;
390af821
AJ
1499 return;
1500 case 0x4066: /* lds.l @Rm+,FPSCR */
d8299bcc 1501 CHECK_FPU_ENABLED
c55497ec 1502 {
a7812ae4 1503 TCGv addr = tcg_temp_new();
3376f415 1504 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
c55497ec 1505 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
485d0035 1506 gen_helper_ld_fpscr(cpu_env, addr);
c55497ec 1507 tcg_temp_free(addr);
6f1c2af6 1508 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1509 }
390af821
AJ
1510 return;
1511 case 0x006a: /* sts FPSCR,Rn */
d8299bcc 1512 CHECK_FPU_ENABLED
c55497ec 1513 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
390af821
AJ
1514 return;
1515 case 0x4062: /* sts FPSCR,@-Rn */
d8299bcc 1516 CHECK_FPU_ENABLED
c55497ec
AJ
1517 {
1518 TCGv addr, val;
a7812ae4 1519 val = tcg_temp_new();
c55497ec 1520 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
a7812ae4 1521 addr = tcg_temp_new();
c55497ec 1522 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1523 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1524 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
1525 tcg_temp_free(addr);
1526 tcg_temp_free(val);
c55497ec 1527 }
390af821 1528 return;
fdf9b3e8 1529 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1530 {
1531 TCGv val = tcg_temp_new();
3376f415 1532 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
485d0035 1533 gen_helper_movcal(cpu_env, REG(B11_8), val);
3376f415 1534 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
e691e0ed 1535 tcg_temp_free(val);
852d481f
EI
1536 }
1537 ctx->has_movcal = 1;
fdf9b3e8 1538 return;
143021b2 1539 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1540 CHECK_SH4A
143021b2 1541 /* Load non-boundary-aligned data */
ccae24d4
RH
1542 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1543 MO_TEUL | MO_UNALN);
1544 return;
143021b2
AJ
1545 break;
1546 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1547 CHECK_SH4A
143021b2 1548 /* Load non-boundary-aligned data */
ccae24d4
RH
1549 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1550 MO_TEUL | MO_UNALN);
1551 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1552 return;
143021b2 1553 break;
fdf9b3e8 1554 case 0x0029: /* movt Rn */
34086945 1555 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
fdf9b3e8 1556 return;
66c7c806
AJ
1557 case 0x0073:
1558 /* MOVCO.L
f85da308
RH
1559 * LDST -> T
1560 * If (T == 1) R0 -> (Rn)
1561 * 0 -> LDST
1562 *
1563 * The above description doesn't work in a parallel context.
1564 * Since we currently support no smp boards, this implies user-mode.
1565 * But we can still support the official mechanism while user-mode
1566 * is single-threaded. */
ccae24d4
RH
1567 CHECK_SH4A
1568 {
f85da308
RH
1569 TCGLabel *fail = gen_new_label();
1570 TCGLabel *done = gen_new_label();
1571
6f1c2af6 1572 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1573 TCGv tmp;
1574
1575 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1576 cpu_lock_addr, fail);
1577 tmp = tcg_temp_new();
1578 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
1579 REG(0), ctx->memidx, MO_TEUL);
1580 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
1581 tcg_temp_free(tmp);
1582 } else {
1583 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
1584 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1585 tcg_gen_movi_i32(cpu_sr_t, 1);
1586 }
1587 tcg_gen_br(done);
1588
1589 gen_set_label(fail);
1590 tcg_gen_movi_i32(cpu_sr_t, 0);
1591
1592 gen_set_label(done);
1593 tcg_gen_movi_i32(cpu_lock_addr, -1);
ccae24d4 1594 }
f85da308 1595 return;
66c7c806
AJ
1596 case 0x0063:
1597 /* MOVLI.L @Rm,R0
f85da308
RH
1598 * 1 -> LDST
1599 * (Rm) -> R0
1600 * When interrupt/exception
1601 * occurred 0 -> LDST
1602 *
1603 * In a parallel context, we must also save the loaded value
1604 * for use with the cmpxchg that we'll use with movco.l. */
ccae24d4 1605 CHECK_SH4A
6f1c2af6 1606 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1607 TCGv tmp = tcg_temp_new();
1608 tcg_gen_mov_i32(tmp, REG(B11_8));
1609 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1610 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1611 tcg_gen_mov_i32(cpu_lock_addr, tmp);
1612 tcg_temp_free(tmp);
1613 } else {
1614 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1615 tcg_gen_movi_i32(cpu_lock_addr, 0);
1616 }
ccae24d4 1617 return;
fdf9b3e8 1618 case 0x0093: /* ocbi @Rn */
c55497ec 1619 {
485d0035 1620 gen_helper_ocbi(cpu_env, REG(B11_8));
c55497ec 1621 }
fdf9b3e8 1622 return;
24988dc2 1623 case 0x00a3: /* ocbp @Rn */
fdf9b3e8 1624 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1625 /* These instructions are supposed to do nothing in case of
1626 a cache miss. Given that we only partially emulate caches
1627 it is safe to simply ignore them. */
fdf9b3e8
FB
1628 return;
1629 case 0x0083: /* pref @Rn */
1630 return;
71968fa6 1631 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1632 CHECK_SH4A
1633 return;
71968fa6 1634 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1635 CHECK_SH4A
1636 return;
71968fa6 1637 case 0x00ab: /* synco */
ccae24d4
RH
1638 CHECK_SH4A
1639 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1640 return;
aa351317 1641 break;
fdf9b3e8 1642 case 0x4024: /* rotcl Rn */
c55497ec 1643 {
a7812ae4 1644 TCGv tmp = tcg_temp_new();
34086945
AJ
1645 tcg_gen_mov_i32(tmp, cpu_sr_t);
1646 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
c55497ec 1647 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1648 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1649 tcg_temp_free(tmp);
1650 }
fdf9b3e8
FB
1651 return;
1652 case 0x4025: /* rotcr Rn */
c55497ec 1653 {
a7812ae4 1654 TCGv tmp = tcg_temp_new();
34086945
AJ
1655 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1656 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
c55497ec 1657 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1658 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1659 tcg_temp_free(tmp);
1660 }
fdf9b3e8
FB
1661 return;
1662 case 0x4004: /* rotl Rn */
2411fde9 1663 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1664 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1665 return;
1666 case 0x4005: /* rotr Rn */
34086945 1667 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
2411fde9 1668 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1669 return;
1670 case 0x4000: /* shll Rn */
1671 case 0x4020: /* shal Rn */
34086945 1672 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
7efbe241 1673 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1674 return;
1675 case 0x4021: /* shar Rn */
34086945 1676 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1677 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1678 return;
1679 case 0x4001: /* shlr Rn */
34086945 1680 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1681 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1682 return;
1683 case 0x4008: /* shll2 Rn */
7efbe241 1684 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1685 return;
1686 case 0x4018: /* shll8 Rn */
7efbe241 1687 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1688 return;
1689 case 0x4028: /* shll16 Rn */
7efbe241 1690 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1691 return;
1692 case 0x4009: /* shlr2 Rn */
7efbe241 1693 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1694 return;
1695 case 0x4019: /* shlr8 Rn */
7efbe241 1696 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1697 return;
1698 case 0x4029: /* shlr16 Rn */
7efbe241 1699 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1700 return;
1701 case 0x401b: /* tas.b @Rn */
cb32f179
AJ
1702 {
1703 TCGv val = tcg_const_i32(0x80);
1704 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1705 ctx->memidx, MO_UB);
34086945 1706 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
cb32f179
AJ
1707 tcg_temp_free(val);
1708 }
1709 return;
e67888a7 1710 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
f6198371 1711 CHECK_FPU_ENABLED
7c9f7038 1712 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
eda9b09b 1713 return;
e67888a7 1714 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
f6198371 1715 CHECK_FPU_ENABLED
7c9f7038 1716 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
eda9b09b 1717 return;
e67888a7 1718 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
f6198371 1719 CHECK_FPU_ENABLED
a6215749 1720 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1721 TCGv_i64 fp;
93dc9c89
RH
1722 if (ctx->opcode & 0x0100) {
1723 goto do_illegal;
1724 }
a7812ae4 1725 fp = tcg_temp_new_i64();
485d0035 1726 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1727 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1728 tcg_temp_free_i64(fp);
ea6cf6be
TS
1729 }
1730 else {
7c9f7038 1731 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
ea6cf6be
TS
1732 }
1733 return;
e67888a7 1734 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
f6198371 1735 CHECK_FPU_ENABLED
a6215749 1736 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1737 TCGv_i64 fp;
93dc9c89
RH
1738 if (ctx->opcode & 0x0100) {
1739 goto do_illegal;
1740 }
a7812ae4 1741 fp = tcg_temp_new_i64();
1e0b21d8 1742 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1743 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
a7812ae4 1744 tcg_temp_free_i64(fp);
ea6cf6be
TS
1745 }
1746 else {
7c9f7038 1747 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
ea6cf6be
TS
1748 }
1749 return;
24988dc2 1750 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
f6198371 1751 CHECK_FPU_ENABLED
7c9f7038 1752 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
24988dc2 1753 return;
57f5c1b0 1754 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
f6198371 1755 CHECK_FPU_ENABLED
7c9f7038 1756 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
24988dc2
AJ
1757 return;
1758 case 0xf06d: /* fsqrt FRn */
f6198371 1759 CHECK_FPU_ENABLED
a6215749 1760 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1761 if (ctx->opcode & 0x0100) {
1762 goto do_illegal;
1763 }
a7812ae4 1764 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1765 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1766 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1e0b21d8 1767 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1768 tcg_temp_free_i64(fp);
24988dc2 1769 } else {
7c9f7038 1770 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2
AJ
1771 }
1772 return;
1773 case 0xf07d: /* fsrra FRn */
f6198371 1774 CHECK_FPU_ENABLED
11b7aa23
RH
1775 CHECK_FPSCR_PR_0
1776 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2 1777 break;
e67888a7 1778 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
f6198371 1779 CHECK_FPU_ENABLED
7e9f7ca8
RH
1780 CHECK_FPSCR_PR_0
1781 tcg_gen_movi_i32(FREG(B11_8), 0);
1782 return;
e67888a7 1783 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
f6198371 1784 CHECK_FPU_ENABLED
7e9f7ca8
RH
1785 CHECK_FPSCR_PR_0
1786 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1787 return;
24988dc2 1788 case 0xf0ad: /* fcnvsd FPUL,DRn */
f6198371 1789 CHECK_FPU_ENABLED
cc4ba6a9 1790 {
a7812ae4 1791 TCGv_i64 fp = tcg_temp_new_i64();
485d0035 1792 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1793 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1794 tcg_temp_free_i64(fp);
cc4ba6a9 1795 }
24988dc2
AJ
1796 return;
1797 case 0xf0bd: /* fcnvds DRn,FPUL */
f6198371 1798 CHECK_FPU_ENABLED
cc4ba6a9 1799 {
a7812ae4 1800 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1801 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1802 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
a7812ae4 1803 tcg_temp_free_i64(fp);
cc4ba6a9 1804 }
24988dc2 1805 return;
af8c2bde
AJ
1806 case 0xf0ed: /* fipr FVm,FVn */
1807 CHECK_FPU_ENABLED
7e9f7ca8
RH
1808 CHECK_FPSCR_PR_1
1809 {
1810 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1811 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1812 gen_helper_fipr(cpu_env, m, n);
af8c2bde
AJ
1813 tcg_temp_free(m);
1814 tcg_temp_free(n);
1815 return;
1816 }
1817 break;
17075f10
AJ
1818 case 0xf0fd: /* ftrv XMTRX,FVn */
1819 CHECK_FPU_ENABLED
7e9f7ca8
RH
1820 CHECK_FPSCR_PR_1
1821 {
1822 if ((ctx->opcode & 0x0300) != 0x0100) {
1823 goto do_illegal;
1824 }
1825 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1826 gen_helper_ftrv(cpu_env, n);
17075f10
AJ
1827 tcg_temp_free(n);
1828 return;
1829 }
1830 break;
fdf9b3e8 1831 }
bacc637a 1832#if 0
fdf9b3e8 1833 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
6f1c2af6 1834 ctx->opcode, ctx->base.pc_next);
bacc637a
AJ
1835 fflush(stderr);
1836#endif
6b98213d 1837 do_illegal:
9a562ae7 1838 if (ctx->envflags & DELAY_SLOT_MASK) {
dec16c6e
RH
1839 do_illegal_slot:
1840 gen_save_cpu_state(ctx, true);
485d0035 1841 gen_helper_raise_slot_illegal_instruction(cpu_env);
86865c5f 1842 } else {
dec16c6e 1843 gen_save_cpu_state(ctx, true);
485d0035 1844 gen_helper_raise_illegal_instruction(cpu_env);
86865c5f 1845 }
6f1c2af6 1846 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042
RH
1847 return;
1848
1849 do_fpu_disabled:
1850 gen_save_cpu_state(ctx, true);
1851 if (ctx->envflags & DELAY_SLOT_MASK) {
1852 gen_helper_raise_slot_fpu_disable(cpu_env);
1853 } else {
1854 gen_helper_raise_fpu_disable(cpu_env);
1855 }
6f1c2af6 1856 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042 1857 return;
823029f9
TS
1858}
1859
b1d8e52e 1860static void decode_opc(DisasContext * ctx)
823029f9 1861{
a6215749 1862 uint32_t old_flags = ctx->envflags;
823029f9
TS
1863
1864 _decode_opc(ctx);
1865
9a562ae7 1866 if (old_flags & DELAY_SLOT_MASK) {
39682608 1867 /* go out of the delay slot */
9a562ae7 1868 ctx->envflags &= ~DELAY_SLOT_MASK;
4bfa602b
RH
1869
1870 /* When in an exclusive region, we must continue to the end
1871 for conditional branches. */
1872 if (ctx->tbflags & GUSA_EXCLUSIVE
1873 && old_flags & DELAY_SLOT_CONDITIONAL) {
1874 gen_delayed_conditional_jump(ctx);
1875 return;
1876 }
1877 /* Otherwise this is probably an invalid gUSA region.
1878 Drop the GUSA bits so the next TB doesn't see them. */
1879 ctx->envflags &= ~GUSA_MASK;
1880
ac9707ea 1881 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
823029f9
TS
1882 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1883 gen_delayed_conditional_jump(ctx);
be53081a 1884 } else {
823029f9
TS
1885 gen_jump(ctx);
1886 }
4bfa602b
RH
1887 }
1888}
1889
1890#ifdef CONFIG_USER_ONLY
1891/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1892 Upon an interrupt, a real kernel would simply notice magic values in
1893 the registers and reset the PC to the start of the sequence.
1894
1895 For QEMU, we cannot do this in quite the same way. Instead, we notice
1896 the normal start of such a sequence (mov #-x,r15). While we can handle
1897 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1898 sequences and transform them into atomic operations as seen by the host.
1899*/
be0e3d7a 1900static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
4bfa602b 1901{
d6a6cffd
RH
1902 uint16_t insns[5];
1903 int ld_adr, ld_dst, ld_mop;
1904 int op_dst, op_src, op_opc;
1905 int mv_src, mt_dst, st_src, st_mop;
1906 TCGv op_arg;
6f1c2af6
RH
1907 uint32_t pc = ctx->base.pc_next;
1908 uint32_t pc_end = ctx->base.tb->cs_base;
4bfa602b 1909 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1910 int i;
4bfa602b 1911
d6a6cffd
RH
1912 /* The state machine below will consume only a few insns.
1913 If there are more than that in a region, fail now. */
1914 if (max_insns > ARRAY_SIZE(insns)) {
1915 goto fail;
1916 }
1917
1918 /* Read all of the insns for the region. */
1919 for (i = 0; i < max_insns; ++i) {
1920 insns[i] = cpu_lduw_code(env, pc + i * 2);
1921 }
1922
1923 ld_adr = ld_dst = ld_mop = -1;
1924 mv_src = -1;
1925 op_dst = op_src = op_opc = -1;
1926 mt_dst = -1;
1927 st_src = st_mop = -1;
f764718d 1928 op_arg = NULL;
d6a6cffd
RH
1929 i = 0;
1930
1931#define NEXT_INSN \
1932 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1933
1934 /*
1935 * Expect a load to begin the region.
1936 */
1937 NEXT_INSN;
1938 switch (ctx->opcode & 0xf00f) {
1939 case 0x6000: /* mov.b @Rm,Rn */
1940 ld_mop = MO_SB;
1941 break;
1942 case 0x6001: /* mov.w @Rm,Rn */
1943 ld_mop = MO_TESW;
1944 break;
1945 case 0x6002: /* mov.l @Rm,Rn */
1946 ld_mop = MO_TESL;
1947 break;
1948 default:
1949 goto fail;
1950 }
1951 ld_adr = B7_4;
1952 ld_dst = B11_8;
1953 if (ld_adr == ld_dst) {
1954 goto fail;
1955 }
1956 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1957 op_dst = ld_dst;
1958
1959 /*
1960 * Expect an optional register move.
1961 */
1962 NEXT_INSN;
1963 switch (ctx->opcode & 0xf00f) {
1964 case 0x6003: /* mov Rm,Rn */
1965 /* Here we want to recognize ld_dst being saved for later consumtion,
1966 or for another input register being copied so that ld_dst need not
1967 be clobbered during the operation. */
1968 op_dst = B11_8;
1969 mv_src = B7_4;
1970 if (op_dst == ld_dst) {
1971 /* Overwriting the load output. */
1972 goto fail;
1973 }
1974 if (mv_src != ld_dst) {
1975 /* Copying a new input; constrain op_src to match the load. */
1976 op_src = ld_dst;
1977 }
1978 break;
1979
1980 default:
1981 /* Put back and re-examine as operation. */
1982 --i;
1983 }
1984
1985 /*
1986 * Expect the operation.
1987 */
1988 NEXT_INSN;
1989 switch (ctx->opcode & 0xf00f) {
1990 case 0x300c: /* add Rm,Rn */
1991 op_opc = INDEX_op_add_i32;
1992 goto do_reg_op;
1993 case 0x2009: /* and Rm,Rn */
1994 op_opc = INDEX_op_and_i32;
1995 goto do_reg_op;
1996 case 0x200a: /* xor Rm,Rn */
1997 op_opc = INDEX_op_xor_i32;
1998 goto do_reg_op;
1999 case 0x200b: /* or Rm,Rn */
2000 op_opc = INDEX_op_or_i32;
2001 do_reg_op:
2002 /* The operation register should be as expected, and the
2003 other input cannot depend on the load. */
2004 if (op_dst != B11_8) {
2005 goto fail;
2006 }
2007 if (op_src < 0) {
2008 /* Unconstrainted input. */
2009 op_src = B7_4;
2010 } else if (op_src == B7_4) {
2011 /* Constrained input matched load. All operations are
2012 commutative; "swap" them by "moving" the load output
2013 to the (implicit) first argument and the move source
2014 to the (explicit) second argument. */
2015 op_src = mv_src;
2016 } else {
2017 goto fail;
2018 }
2019 op_arg = REG(op_src);
2020 break;
2021
2022 case 0x6007: /* not Rm,Rn */
2023 if (ld_dst != B7_4 || mv_src >= 0) {
2024 goto fail;
2025 }
2026 op_dst = B11_8;
2027 op_opc = INDEX_op_xor_i32;
2028 op_arg = tcg_const_i32(-1);
2029 break;
2030
2031 case 0x7000 ... 0x700f: /* add #imm,Rn */
2032 if (op_dst != B11_8 || mv_src >= 0) {
2033 goto fail;
2034 }
2035 op_opc = INDEX_op_add_i32;
2036 op_arg = tcg_const_i32(B7_0s);
2037 break;
2038
2039 case 0x3000: /* cmp/eq Rm,Rn */
2040 /* Looking for the middle of a compare-and-swap sequence,
2041 beginning with the compare. Operands can be either order,
2042 but with only one overlapping the load. */
2043 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2044 goto fail;
2045 }
2046 op_opc = INDEX_op_setcond_i32; /* placeholder */
2047 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2048 op_arg = REG(op_src);
2049
2050 NEXT_INSN;
2051 switch (ctx->opcode & 0xff00) {
2052 case 0x8b00: /* bf label */
2053 case 0x8f00: /* bf/s label */
2054 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2055 goto fail;
2056 }
2057 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2058 break;
2059 }
2060 /* We're looking to unconditionally modify Rn with the
2061 result of the comparison, within the delay slot of
2062 the branch. This is used by older gcc. */
2063 NEXT_INSN;
2064 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2065 mt_dst = B11_8;
2066 } else {
2067 goto fail;
2068 }
2069 break;
2070
2071 default:
2072 goto fail;
2073 }
2074 break;
2075
2076 case 0x2008: /* tst Rm,Rn */
2077 /* Looking for a compare-and-swap against zero. */
2078 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2079 goto fail;
2080 }
2081 op_opc = INDEX_op_setcond_i32;
2082 op_arg = tcg_const_i32(0);
2083
2084 NEXT_INSN;
2085 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2086 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2087 goto fail;
2088 }
2089 break;
2090
2091 default:
2092 /* Put back and re-examine as store. */
2093 --i;
2094 }
2095
2096 /*
2097 * Expect the store.
2098 */
2099 /* The store must be the last insn. */
2100 if (i != max_insns - 1) {
2101 goto fail;
2102 }
2103 NEXT_INSN;
2104 switch (ctx->opcode & 0xf00f) {
2105 case 0x2000: /* mov.b Rm,@Rn */
2106 st_mop = MO_UB;
2107 break;
2108 case 0x2001: /* mov.w Rm,@Rn */
2109 st_mop = MO_UW;
2110 break;
2111 case 0x2002: /* mov.l Rm,@Rn */
2112 st_mop = MO_UL;
2113 break;
2114 default:
2115 goto fail;
2116 }
2117 /* The store must match the load. */
2118 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2119 goto fail;
2120 }
2121 st_src = B7_4;
2122
2123#undef NEXT_INSN
2124
2125 /*
2126 * Emit the operation.
2127 */
d6a6cffd
RH
2128 switch (op_opc) {
2129 case -1:
2130 /* No operation found. Look for exchange pattern. */
2131 if (st_src == ld_dst || mv_src >= 0) {
2132 goto fail;
2133 }
2134 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2135 ctx->memidx, ld_mop);
2136 break;
2137
2138 case INDEX_op_add_i32:
2139 if (op_dst != st_src) {
2140 goto fail;
2141 }
2142 if (op_dst == ld_dst && st_mop == MO_UL) {
2143 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2144 op_arg, ctx->memidx, ld_mop);
2145 } else {
2146 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2147 op_arg, ctx->memidx, ld_mop);
2148 if (op_dst != ld_dst) {
2149 /* Note that mop sizes < 4 cannot use add_fetch
2150 because it won't carry into the higher bits. */
2151 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2152 }
2153 }
2154 break;
2155
2156 case INDEX_op_and_i32:
2157 if (op_dst != st_src) {
2158 goto fail;
2159 }
2160 if (op_dst == ld_dst) {
2161 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2162 op_arg, ctx->memidx, ld_mop);
2163 } else {
2164 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2165 op_arg, ctx->memidx, ld_mop);
2166 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2167 }
2168 break;
2169
2170 case INDEX_op_or_i32:
2171 if (op_dst != st_src) {
2172 goto fail;
2173 }
2174 if (op_dst == ld_dst) {
2175 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2176 op_arg, ctx->memidx, ld_mop);
2177 } else {
2178 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2179 op_arg, ctx->memidx, ld_mop);
2180 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2181 }
2182 break;
2183
2184 case INDEX_op_xor_i32:
2185 if (op_dst != st_src) {
2186 goto fail;
2187 }
2188 if (op_dst == ld_dst) {
2189 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2190 op_arg, ctx->memidx, ld_mop);
2191 } else {
2192 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2193 op_arg, ctx->memidx, ld_mop);
2194 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2195 }
2196 break;
2197
2198 case INDEX_op_setcond_i32:
2199 if (st_src == ld_dst) {
2200 goto fail;
2201 }
2202 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2203 REG(st_src), ctx->memidx, ld_mop);
2204 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2205 if (mt_dst >= 0) {
2206 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2207 }
2208 break;
2209
2210 default:
2211 g_assert_not_reached();
2212 }
2213
2214 /* If op_src is not a valid register, then op_arg was a constant. */
f764718d 2215 if (op_src < 0 && op_arg) {
d6a6cffd
RH
2216 tcg_temp_free_i32(op_arg);
2217 }
2218
2219 /* The entire region has been translated. */
2220 ctx->envflags &= ~GUSA_MASK;
6f1c2af6 2221 ctx->base.pc_next = pc_end;
be0e3d7a
RH
2222 ctx->base.num_insns += max_insns - 1;
2223 return;
d6a6cffd
RH
2224
2225 fail:
4bfa602b
RH
2226 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2227 pc, pc_end);
2228
2229 /* Restart with the EXCLUSIVE bit set, within a TB run via
2230 cpu_exec_step_atomic holding the exclusive lock. */
4bfa602b
RH
2231 ctx->envflags |= GUSA_EXCLUSIVE;
2232 gen_save_cpu_state(ctx, false);
2233 gen_helper_exclusive(cpu_env);
6f1c2af6 2234 ctx->base.is_jmp = DISAS_NORETURN;
4bfa602b
RH
2235
2236 /* We're not executing an instruction, but we must report one for the
2237 purposes of accounting within the TB. We might as well report the
6f1c2af6
RH
2238 entire region consumed via ctx->base.pc_next so that it's immediately
2239 available in the disassembly dump. */
2240 ctx->base.pc_next = pc_end;
be0e3d7a 2241 ctx->base.num_insns += max_insns - 1;
fdf9b3e8 2242}
4bfa602b 2243#endif
fdf9b3e8 2244
fd1b3d38 2245static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
fdf9b3e8 2246{
fd1b3d38 2247 DisasContext *ctx = container_of(dcbase, DisasContext, base);
9c489ea6 2248 CPUSH4State *env = cs->env_ptr;
be0e3d7a 2249 uint32_t tbflags;
fd1b3d38
EC
2250 int bound;
2251
be0e3d7a
RH
2252 ctx->tbflags = tbflags = ctx->base.tb->flags;
2253 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2254 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2255 /* We don't know if the delayed pc came from a dynamic or static branch,
2256 so assume it is a dynamic branch. */
fd1b3d38
EC
2257 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2258 ctx->features = env->features;
be0e3d7a
RH
2259 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2260 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2261 (tbflags & (1 << SR_RB))) * 0x10;
2262 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2263
2264 if (tbflags & GUSA_MASK) {
2265 uint32_t pc = ctx->base.pc_next;
2266 uint32_t pc_end = ctx->base.tb->cs_base;
2267 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
2268 int max_insns = (pc_end - pc) / 2;
2269
2270 if (pc != pc_end + backup || max_insns < 2) {
2271 /* This is a malformed gUSA region. Don't do anything special,
2272 since the interpreter is likely to get confused. */
2273 ctx->envflags &= ~GUSA_MASK;
2274 } else if (tbflags & GUSA_EXCLUSIVE) {
2275 /* Regardless of single-stepping or the end of the page,
2276 we must complete execution of the gUSA region while
2277 holding the exclusive lock. */
2278 ctx->base.max_insns = max_insns;
2279 return;
2280 }
2281 }
4448a836
RH
2282
2283 /* Since the ISA is fixed-width, we can bound by the number
2284 of instructions remaining on the page. */
fd1b3d38
EC
2285 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2286 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2287}
4448a836 2288
fd1b3d38
EC
2289static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2290{
fd1b3d38 2291}
4bfa602b 2292
fd1b3d38
EC
2293static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2294{
2295 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2296
fd1b3d38
EC
2297 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2298}
b933066a 2299
fd1b3d38
EC
2300static bool sh4_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2301 const CPUBreakpoint *bp)
2302{
2303 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2304
fd1b3d38
EC
2305 /* We have hit a breakpoint - make sure PC is up-to-date */
2306 gen_save_cpu_state(ctx, true);
2307 gen_helper_debug(cpu_env);
2308 ctx->base.is_jmp = DISAS_NORETURN;
2309 /* The address covered by the breakpoint must be included in
2310 [tb->pc, tb->pc + tb->size) in order to for it to be
2311 properly cleared -- thus we increment the PC here so that
2312 the logic setting tb->size below does the right thing. */
2313 ctx->base.pc_next += 2;
2314 return true;
2315}
2316
2317static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2318{
2319 CPUSH4State *env = cs->env_ptr;
2320 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4bfa602b 2321
be0e3d7a
RH
2322#ifdef CONFIG_USER_ONLY
2323 if (unlikely(ctx->envflags & GUSA_MASK)
2324 && !(ctx->envflags & GUSA_EXCLUSIVE)) {
2325 /* We're in an gUSA region, and we have not already fallen
2326 back on using an exclusive region. Attempt to parse the
2327 region into a single supported atomic operation. Failure
2328 is handled within the parser by raising an exception to
2329 retry using an exclusive region. */
2330 decode_gusa(ctx, env);
2331 return;
2332 }
2333#endif
2334
fd1b3d38
EC
2335 ctx->opcode = cpu_lduw_code(env, ctx->base.pc_next);
2336 decode_opc(ctx);
2337 ctx->base.pc_next += 2;
2338}
2339
2340static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2341{
2342 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2343
2344 if (ctx->tbflags & GUSA_EXCLUSIVE) {
4bfa602b 2345 /* Ending the region of exclusivity. Clear the bits. */
fd1b3d38 2346 ctx->envflags &= ~GUSA_MASK;
4bfa602b
RH
2347 }
2348
fd1b3d38 2349 switch (ctx->base.is_jmp) {
34cf5678 2350 case DISAS_STOP:
fd1b3d38
EC
2351 gen_save_cpu_state(ctx, true);
2352 if (ctx->base.singlestep_enabled) {
34cf5678
RH
2353 gen_helper_debug(cpu_env);
2354 } else {
07ea28b4 2355 tcg_gen_exit_tb(NULL, 0);
34cf5678
RH
2356 }
2357 break;
2358 case DISAS_NEXT:
fd1b3d38
EC
2359 case DISAS_TOO_MANY:
2360 gen_save_cpu_state(ctx, false);
2361 gen_goto_tb(ctx, 0, ctx->base.pc_next);
34cf5678
RH
2362 break;
2363 case DISAS_NORETURN:
2364 break;
2365 default:
2366 g_assert_not_reached();
fdf9b3e8 2367 }
fd1b3d38 2368}
823029f9 2369
fd1b3d38
EC
2370static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2371{
2372 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2373 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2374}
0a7df5da 2375
fd1b3d38
EC
2376static const TranslatorOps sh4_tr_ops = {
2377 .init_disas_context = sh4_tr_init_disas_context,
2378 .tb_start = sh4_tr_tb_start,
2379 .insn_start = sh4_tr_insn_start,
2380 .breakpoint_check = sh4_tr_breakpoint_check,
2381 .translate_insn = sh4_tr_translate_insn,
2382 .tb_stop = sh4_tr_tb_stop,
2383 .disas_log = sh4_tr_disas_log,
2384};
2385
8b86d6d2 2386void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
fd1b3d38
EC
2387{
2388 DisasContext ctx;
fdf9b3e8 2389
8b86d6d2 2390 translator_loop(&sh4_tr_ops, &ctx.base, cs, tb, max_insns);
fdf9b3e8
FB
2391}
2392
bad729e2
RH
2393void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2394 target_ulong *data)
d2856f1a 2395{
bad729e2
RH
2396 env->pc = data[0];
2397 env->flags = data[1];
ac9707ea
AJ
2398 /* Theoretically delayed_pc should also be restored. In practice the
2399 branch instruction is re-executed after exception, so the delayed
2400 branch target will be recomputed. */
d2856f1a 2401}