]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
tcg: Rename cpu_env to tcg_env
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
6faf2b6c 9 * version 2.1 of the License, or (at your option) any later version.
fdf9b3e8
FB
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8 19
9d4c9946 20#include "qemu/osdep.h"
fdf9b3e8 21#include "cpu.h"
76cad711 22#include "disas/disas.h"
63c91552 23#include "exec/exec-all.h"
dcb32f1d 24#include "tcg/tcg-op.h"
2ef6175a
RH
25#include "exec/helper-proto.h"
26#include "exec/helper-gen.h"
4834871b 27#include "exec/translator.h"
508127e2 28#include "exec/log.h"
90c84c56 29#include "qemu/qemu-print.h"
a7e30d84 30
d53106c9
RH
31#define HELPER_H "helper.h"
32#include "exec/helper-info.c.inc"
33#undef HELPER_H
34
a7e30d84 35
fdf9b3e8 36typedef struct DisasContext {
6f1c2af6
RH
37 DisasContextBase base;
38
39 uint32_t tbflags; /* should stay unmodified during the TB translation */
40 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
fdf9b3e8 41 int memidx;
3a3bb8d2 42 int gbank;
5c13bad9 43 int fbank;
fdf9b3e8 44 uint32_t delayed_pc;
71968fa6 45 uint32_t features;
6f1c2af6
RH
46
47 uint16_t opcode;
48
49 bool has_movcal;
fdf9b3e8
FB
50} DisasContext;
51
fe25591e
AJ
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(ctx) 1
4da06fb3 54#define UNALIGN(C) (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN)
fe25591e 55#else
a6215749 56#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
4da06fb3 57#define UNALIGN(C) 0
fe25591e
AJ
58#endif
59
6f1c2af6 60/* Target-specific values for ctx->base.is_jmp. */
4834871b
RH
61/* We want to exit back to the cpu loop for some reason.
62 Usually this is to recognize interrupts immediately. */
63#define DISAS_STOP DISAS_TARGET_0
823029f9 64
1e8864f7 65/* global register indexes */
3a3bb8d2 66static TCGv cpu_gregs[32];
1d565b21
AJ
67static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
68static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 69static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
f85da308
RH
70static TCGv cpu_pr, cpu_fpscr, cpu_fpul;
71static TCGv cpu_lock_addr, cpu_lock_value;
66ba317c 72static TCGv cpu_fregs[32];
1000822b
AJ
73
74/* internal register indexes */
47b9f4d5 75static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 76
aa7408ec 77void sh4_translate_init(void)
2e70f6ef 78{
1e8864f7 79 int i;
559dd74d 80 static const char * const gregnames[24] = {
1e8864f7
AJ
81 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
82 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
83 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
84 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
85 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
86 };
66ba317c
AJ
87 static const char * const fregnames[32] = {
88 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
89 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
90 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
91 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
92 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
93 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
94 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
95 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
96 };
1e8864f7 97
3a3bb8d2 98 for (i = 0; i < 24; i++) {
ad75a51e 99 cpu_gregs[i] = tcg_global_mem_new_i32(tcg_env,
73e5716c 100 offsetof(CPUSH4State, gregs[i]),
66ba317c 101 gregnames[i]);
3a3bb8d2
RH
102 }
103 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 104
ad75a51e 105 cpu_pc = tcg_global_mem_new_i32(tcg_env,
73e5716c 106 offsetof(CPUSH4State, pc), "PC");
ad75a51e 107 cpu_sr = tcg_global_mem_new_i32(tcg_env,
73e5716c 108 offsetof(CPUSH4State, sr), "SR");
ad75a51e 109 cpu_sr_m = tcg_global_mem_new_i32(tcg_env,
e1ccc054 110 offsetof(CPUSH4State, sr_m), "SR_M");
ad75a51e 111 cpu_sr_q = tcg_global_mem_new_i32(tcg_env,
e1ccc054 112 offsetof(CPUSH4State, sr_q), "SR_Q");
ad75a51e 113 cpu_sr_t = tcg_global_mem_new_i32(tcg_env,
e1ccc054 114 offsetof(CPUSH4State, sr_t), "SR_T");
ad75a51e 115 cpu_ssr = tcg_global_mem_new_i32(tcg_env,
73e5716c 116 offsetof(CPUSH4State, ssr), "SSR");
ad75a51e 117 cpu_spc = tcg_global_mem_new_i32(tcg_env,
73e5716c 118 offsetof(CPUSH4State, spc), "SPC");
ad75a51e 119 cpu_gbr = tcg_global_mem_new_i32(tcg_env,
73e5716c 120 offsetof(CPUSH4State, gbr), "GBR");
ad75a51e 121 cpu_vbr = tcg_global_mem_new_i32(tcg_env,
73e5716c 122 offsetof(CPUSH4State, vbr), "VBR");
ad75a51e 123 cpu_sgr = tcg_global_mem_new_i32(tcg_env,
73e5716c 124 offsetof(CPUSH4State, sgr), "SGR");
ad75a51e 125 cpu_dbr = tcg_global_mem_new_i32(tcg_env,
73e5716c 126 offsetof(CPUSH4State, dbr), "DBR");
ad75a51e 127 cpu_mach = tcg_global_mem_new_i32(tcg_env,
73e5716c 128 offsetof(CPUSH4State, mach), "MACH");
ad75a51e 129 cpu_macl = tcg_global_mem_new_i32(tcg_env,
73e5716c 130 offsetof(CPUSH4State, macl), "MACL");
ad75a51e 131 cpu_pr = tcg_global_mem_new_i32(tcg_env,
73e5716c 132 offsetof(CPUSH4State, pr), "PR");
ad75a51e 133 cpu_fpscr = tcg_global_mem_new_i32(tcg_env,
73e5716c 134 offsetof(CPUSH4State, fpscr), "FPSCR");
ad75a51e 135 cpu_fpul = tcg_global_mem_new_i32(tcg_env,
73e5716c 136 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 137
ad75a51e 138 cpu_flags = tcg_global_mem_new_i32(tcg_env,
73e5716c 139 offsetof(CPUSH4State, flags), "_flags_");
ad75a51e 140 cpu_delayed_pc = tcg_global_mem_new_i32(tcg_env,
73e5716c 141 offsetof(CPUSH4State, delayed_pc),
a7812ae4 142 "_delayed_pc_");
ad75a51e 143 cpu_delayed_cond = tcg_global_mem_new_i32(tcg_env,
47b9f4d5
AJ
144 offsetof(CPUSH4State,
145 delayed_cond),
146 "_delayed_cond_");
ad75a51e 147 cpu_lock_addr = tcg_global_mem_new_i32(tcg_env,
f85da308
RH
148 offsetof(CPUSH4State, lock_addr),
149 "_lock_addr_");
ad75a51e 150 cpu_lock_value = tcg_global_mem_new_i32(tcg_env,
f85da308
RH
151 offsetof(CPUSH4State, lock_value),
152 "_lock_value_");
1000822b 153
66ba317c 154 for (i = 0; i < 32; i++)
ad75a51e 155 cpu_fregs[i] = tcg_global_mem_new_i32(tcg_env,
73e5716c 156 offsetof(CPUSH4State, fregs[i]),
66ba317c 157 fregnames[i]);
2e70f6ef
PB
158}
159
90c84c56 160void superh_cpu_dump_state(CPUState *cs, FILE *f, int flags)
fdf9b3e8 161{
878096ee
AF
162 SuperHCPU *cpu = SUPERH_CPU(cs);
163 CPUSH4State *env = &cpu->env;
fdf9b3e8 164 int i;
90c84c56
MA
165
166 qemu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
167 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
168 qemu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
169 env->spc, env->ssr, env->gbr, env->vbr);
170 qemu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
171 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8 172 for (i = 0; i < 24; i += 4) {
ad4052f1
IL
173 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
174 i, env->gregs[i], i + 1, env->gregs[i + 1],
175 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
fdf9b3e8 176 }
ab419fd8 177 if (env->flags & TB_FLAG_DELAY_SLOT) {
ad4052f1
IL
178 qemu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
179 env->delayed_pc);
ab419fd8 180 } else if (env->flags & TB_FLAG_DELAY_SLOT_COND) {
ad4052f1
IL
181 qemu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
182 env->delayed_pc);
ab419fd8 183 } else if (env->flags & TB_FLAG_DELAY_SLOT_RTE) {
90c84c56
MA
184 qemu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
185 env->delayed_pc);
fdf9b3e8
FB
186 }
187}
188
34086945
AJ
189static void gen_read_sr(TCGv dst)
190{
1d565b21
AJ
191 TCGv t0 = tcg_temp_new();
192 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
193 tcg_gen_or_i32(dst, dst, t0);
194 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
195 tcg_gen_or_i32(dst, dst, t0);
196 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
197 tcg_gen_or_i32(dst, cpu_sr, t0);
34086945
AJ
198}
199
200static void gen_write_sr(TCGv src)
201{
1d565b21
AJ
202 tcg_gen_andi_i32(cpu_sr, src,
203 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
204 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
205 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
206 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
207}
208
ac9707ea
AJ
209static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
210{
211 if (save_pc) {
6f1c2af6 212 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
ac9707ea
AJ
213 }
214 if (ctx->delayed_pc != (uint32_t) -1) {
215 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
216 }
e1933d14 217 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
218 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
219 }
220}
221
ec2eb22e
RH
222static inline bool use_exit_tb(DisasContext *ctx)
223{
ab419fd8 224 return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0;
ec2eb22e
RH
225}
226
3f1e2098 227static bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 228{
3f1e2098 229 if (use_exit_tb(ctx)) {
4bfa602b
RH
230 return false;
231 }
3f1e2098 232 return translator_use_goto_tb(&ctx->base, dest);
90aa39a1 233}
fdf9b3e8 234
90aa39a1
SF
235static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
236{
237 if (use_goto_tb(ctx, dest)) {
57fec1fe 238 tcg_gen_goto_tb(n);
3a8a44c4 239 tcg_gen_movi_i32(cpu_pc, dest);
07ea28b4 240 tcg_gen_exit_tb(ctx->base.tb, n);
fdf9b3e8 241 } else {
3a8a44c4 242 tcg_gen_movi_i32(cpu_pc, dest);
52df5adc 243 if (use_exit_tb(ctx)) {
07ea28b4 244 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 245 } else {
7f11636d 246 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 247 }
fdf9b3e8 248 }
6f1c2af6 249 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
250}
251
fdf9b3e8
FB
252static void gen_jump(DisasContext * ctx)
253{
ec2eb22e 254 if (ctx->delayed_pc == -1) {
fdf9b3e8
FB
255 /* Target is not statically known, it comes necessarily from a
256 delayed jump as immediate jump are conditinal jumps */
1000822b 257 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 258 tcg_gen_discard_i32(cpu_delayed_pc);
52df5adc 259 if (use_exit_tb(ctx)) {
07ea28b4 260 tcg_gen_exit_tb(NULL, 0);
ec2eb22e 261 } else {
7f11636d 262 tcg_gen_lookup_and_goto_ptr();
ec2eb22e 263 }
6f1c2af6 264 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
265 } else {
266 gen_goto_tb(ctx, 0, ctx->delayed_pc);
267 }
268}
269
270/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
271static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
272 bool jump_if_true)
fdf9b3e8 273{
34086945 274 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
275 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
276
ab419fd8 277 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
278 /* When in an exclusive region, we must continue to the end.
279 Therefore, exit the region on a taken branch, but otherwise
280 fall through to the next instruction. */
281 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
ab419fd8 282 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
283 /* Note that this won't actually use a goto_tb opcode because we
284 disallow it in use_goto_tb, but it handles exit + singlestep. */
285 gen_goto_tb(ctx, 0, dest);
286 gen_set_label(l1);
5b38d026 287 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
288 return;
289 }
290
ac9707ea 291 gen_save_cpu_state(ctx, false);
4bfa602b
RH
292 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
293 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 294 gen_set_label(l1);
6f1c2af6
RH
295 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
296 ctx->base.is_jmp = DISAS_NORETURN;
fdf9b3e8
FB
297}
298
299/* Delayed conditional jump (bt or bf) */
300static void gen_delayed_conditional_jump(DisasContext * ctx)
301{
4bfa602b
RH
302 TCGLabel *l1 = gen_new_label();
303 TCGv ds = tcg_temp_new();
fdf9b3e8 304
47b9f4d5
AJ
305 tcg_gen_mov_i32(ds, cpu_delayed_cond);
306 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b 307
ab419fd8 308 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b
RH
309 /* When in an exclusive region, we must continue to the end.
310 Therefore, exit the region on a taken branch, but otherwise
311 fall through to the next instruction. */
312 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
313
314 /* Leave the gUSA region. */
ab419fd8 315 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~TB_FLAG_GUSA_MASK);
4bfa602b
RH
316 gen_jump(ctx);
317
318 gen_set_label(l1);
6f1c2af6 319 ctx->base.is_jmp = DISAS_NEXT;
4bfa602b
RH
320 return;
321 }
322
6f396c8f 323 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
6f1c2af6 324 gen_goto_tb(ctx, 1, ctx->base.pc_next + 2);
fdf9b3e8 325 gen_set_label(l1);
9c2a9ea1 326 gen_jump(ctx);
fdf9b3e8
FB
327}
328
e5d8053e 329static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 330{
1e0b21d8
RH
331 /* We have already signaled illegal instruction for odd Dr. */
332 tcg_debug_assert((reg & 1) == 0);
333 reg ^= ctx->fbank;
66ba317c 334 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
335}
336
e5d8053e 337static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 338{
1e0b21d8
RH
339 /* We have already signaled illegal instruction for odd Dr. */
340 tcg_debug_assert((reg & 1) == 0);
341 reg ^= ctx->fbank;
58d2a9ae 342 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
343}
344
fdf9b3e8
FB
345#define B3_0 (ctx->opcode & 0xf)
346#define B6_4 ((ctx->opcode >> 4) & 0x7)
347#define B7_4 ((ctx->opcode >> 4) & 0xf)
348#define B7_0 (ctx->opcode & 0xff)
349#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
350#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
351 (ctx->opcode & 0xfff))
352#define B11_8 ((ctx->opcode >> 8) & 0xf)
353#define B15_12 ((ctx->opcode >> 12) & 0xf)
354
3a3bb8d2
RH
355#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
356#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 357#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 358
f09111e0 359#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 360
fdf9b3e8 361#define CHECK_NOT_DELAY_SLOT \
ab419fd8
RH
362 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) { \
363 goto do_illegal_slot; \
a6215749
AJ
364 }
365
6b98213d
RH
366#define CHECK_PRIVILEGED \
367 if (IS_USER(ctx)) { \
368 goto do_illegal; \
a6215749
AJ
369 }
370
dec4f042
RH
371#define CHECK_FPU_ENABLED \
372 if (ctx->tbflags & (1u << SR_FD)) { \
373 goto do_fpu_disabled; \
a6215749 374 }
d8299bcc 375
7e9f7ca8
RH
376#define CHECK_FPSCR_PR_0 \
377 if (ctx->tbflags & FPSCR_PR) { \
378 goto do_illegal; \
379 }
380
381#define CHECK_FPSCR_PR_1 \
382 if (!(ctx->tbflags & FPSCR_PR)) { \
383 goto do_illegal; \
384 }
385
ccae24d4
RH
386#define CHECK_SH4A \
387 if (!(ctx->features & SH_FEATURE_SH4A)) { \
388 goto do_illegal; \
389 }
390
b1d8e52e 391static void _decode_opc(DisasContext * ctx)
fdf9b3e8 392{
852d481f
EI
393 /* This code tries to make movcal emulation sufficiently
394 accurate for Linux purposes. This instruction writes
395 memory, and prior to that, always allocates a cache line.
396 It is used in two contexts:
397 - in memcpy, where data is copied in blocks, the first write
398 of to a block uses movca.l for performance.
399 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
400 to flush the cache. Here, the data written by movcal.l is never
401 written to memory, and the data written is just bogus.
402
403 To simulate this, we simulate movcal.l, we store the value to memory,
404 but we also remember the previous content. If we see ocbi, we check
405 if movcal.l for that address was done previously. If so, the write should
406 not have hit the memory, so we restore the previous content.
407 When we see an instruction that is neither movca.l
408 nor ocbi, the previous content is discarded.
409
410 To optimize, we only try to flush stores when we're at the start of
411 TB, or if we already saw movca.l in this TB and did not flush stores
412 yet. */
413 if (ctx->has_movcal)
414 {
415 int opcode = ctx->opcode & 0xf0ff;
416 if (opcode != 0x0093 /* ocbi */
417 && opcode != 0x00c3 /* movca.l */)
418 {
ad75a51e 419 gen_helper_discard_movcal_backup(tcg_env);
852d481f
EI
420 ctx->has_movcal = 0;
421 }
422 }
423
fdf9b3e8
FB
424#if 0
425 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
426#endif
f6198371 427
fdf9b3e8
FB
428 switch (ctx->opcode) {
429 case 0x0019: /* div0u */
1d565b21
AJ
430 tcg_gen_movi_i32(cpu_sr_m, 0);
431 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 432 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
433 return;
434 case 0x000b: /* rts */
1000822b
AJ
435 CHECK_NOT_DELAY_SLOT
436 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
ab419fd8 437 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
438 ctx->delayed_pc = (uint32_t) - 1;
439 return;
440 case 0x0028: /* clrmac */
3a8a44c4
AJ
441 tcg_gen_movi_i32(cpu_mach, 0);
442 tcg_gen_movi_i32(cpu_macl, 0);
fdf9b3e8
FB
443 return;
444 case 0x0048: /* clrs */
5ed9a259 445 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
fdf9b3e8
FB
446 return;
447 case 0x0008: /* clrt */
34086945 448 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
449 return;
450 case 0x0038: /* ldtlb */
fe25591e 451 CHECK_PRIVILEGED
ad75a51e 452 gen_helper_ldtlb(tcg_env);
fdf9b3e8 453 return;
c5e814b2 454 case 0x002b: /* rte */
fe25591e 455 CHECK_PRIVILEGED
1000822b 456 CHECK_NOT_DELAY_SLOT
34086945 457 gen_write_sr(cpu_ssr);
1000822b 458 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
ab419fd8 459 ctx->envflags |= TB_FLAG_DELAY_SLOT_RTE;
fdf9b3e8 460 ctx->delayed_pc = (uint32_t) - 1;
6f1c2af6 461 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8
FB
462 return;
463 case 0x0058: /* sets */
5ed9a259 464 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
fdf9b3e8
FB
465 return;
466 case 0x0018: /* sett */
34086945 467 tcg_gen_movi_i32(cpu_sr_t, 1);
fdf9b3e8 468 return;
24988dc2 469 case 0xfbfd: /* frchg */
61dedf2a 470 CHECK_FPSCR_PR_0
6f06939b 471 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
6f1c2af6 472 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 473 return;
24988dc2 474 case 0xf3fd: /* fschg */
61dedf2a 475 CHECK_FPSCR_PR_0
7a64244f 476 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
6f1c2af6 477 ctx->base.is_jmp = DISAS_STOP;
fdf9b3e8 478 return;
907759f9
RH
479 case 0xf7fd: /* fpchg */
480 CHECK_SH4A
481 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
6f1c2af6 482 ctx->base.is_jmp = DISAS_STOP;
907759f9 483 return;
fdf9b3e8
FB
484 case 0x0009: /* nop */
485 return;
486 case 0x001b: /* sleep */
fe25591e 487 CHECK_PRIVILEGED
6f1c2af6 488 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next + 2);
ad75a51e 489 gen_helper_sleep(tcg_env);
fdf9b3e8
FB
490 return;
491 }
492
493 switch (ctx->opcode & 0xf000) {
494 case 0x1000: /* mov.l Rm,@(disp,Rn) */
c55497ec 495 {
a7812ae4 496 TCGv addr = tcg_temp_new();
c55497ec 497 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
4da06fb3
RH
498 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
499 MO_TEUL | UNALIGN(ctx));
c55497ec 500 }
fdf9b3e8
FB
501 return;
502 case 0x5000: /* mov.l @(disp,Rm),Rn */
c55497ec 503 {
a7812ae4 504 TCGv addr = tcg_temp_new();
c55497ec 505 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
4da06fb3
RH
506 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
507 MO_TESL | UNALIGN(ctx));
c55497ec 508 }
fdf9b3e8 509 return;
24988dc2 510 case 0xe000: /* mov #imm,Rn */
4bfa602b 511#ifdef CONFIG_USER_ONLY
ab419fd8
RH
512 /*
513 * Detect the start of a gUSA region (mov #-n, r15).
514 * If so, update envflags and end the TB. This will allow us
515 * to see the end of the region (stored in R0) in the next TB.
516 */
6f1c2af6
RH
517 if (B11_8 == 15 && B7_0s < 0 &&
518 (tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
ab419fd8
RH
519 ctx->envflags =
520 deposit32(ctx->envflags, TB_FLAG_GUSA_SHIFT, 8, B7_0s);
6f1c2af6 521 ctx->base.is_jmp = DISAS_STOP;
4bfa602b
RH
522 }
523#endif
7efbe241 524 tcg_gen_movi_i32(REG(B11_8), B7_0s);
fdf9b3e8
FB
525 return;
526 case 0x9000: /* mov.w @(disp,PC),Rn */
c55497ec 527 {
950b91be 528 TCGv addr = tcg_constant_i32(ctx->base.pc_next + 4 + B7_0 * 2);
03a0d87e
RH
529 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
530 MO_TESW | MO_ALIGN);
c55497ec 531 }
fdf9b3e8
FB
532 return;
533 case 0xd000: /* mov.l @(disp,PC),Rn */
c55497ec 534 {
950b91be 535 TCGv addr = tcg_constant_i32((ctx->base.pc_next + 4 + B7_0 * 4) & ~3);
03a0d87e
RH
536 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
537 MO_TESL | MO_ALIGN);
c55497ec 538 }
fdf9b3e8 539 return;
24988dc2 540 case 0x7000: /* add #imm,Rn */
7efbe241 541 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
fdf9b3e8
FB
542 return;
543 case 0xa000: /* bra disp */
544 CHECK_NOT_DELAY_SLOT
6f1c2af6 545 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 546 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
547 return;
548 case 0xb000: /* bsr disp */
549 CHECK_NOT_DELAY_SLOT
6f1c2af6
RH
550 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
551 ctx->delayed_pc = ctx->base.pc_next + 4 + B11_0s * 2;
ab419fd8 552 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
553 return;
554 }
555
556 switch (ctx->opcode & 0xf00f) {
557 case 0x6003: /* mov Rm,Rn */
7efbe241 558 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
559 return;
560 case 0x2000: /* mov.b Rm,@Rn */
3376f415 561 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
fdf9b3e8
FB
562 return;
563 case 0x2001: /* mov.w Rm,@Rn */
4da06fb3
RH
564 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
565 MO_TEUW | UNALIGN(ctx));
fdf9b3e8
FB
566 return;
567 case 0x2002: /* mov.l Rm,@Rn */
4da06fb3
RH
568 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx,
569 MO_TEUL | UNALIGN(ctx));
fdf9b3e8
FB
570 return;
571 case 0x6000: /* mov.b @Rm,Rn */
3376f415 572 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
fdf9b3e8
FB
573 return;
574 case 0x6001: /* mov.w @Rm,Rn */
4da06fb3
RH
575 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
576 MO_TESW | UNALIGN(ctx));
fdf9b3e8
FB
577 return;
578 case 0x6002: /* mov.l @Rm,Rn */
4da06fb3
RH
579 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
580 MO_TESL | UNALIGN(ctx));
fdf9b3e8
FB
581 return;
582 case 0x2004: /* mov.b Rm,@-Rn */
c55497ec 583 {
a7812ae4 584 TCGv addr = tcg_temp_new();
c55497ec 585 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
586 /* might cause re-execution */
587 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
3101e99c 588 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
c55497ec 589 }
fdf9b3e8
FB
590 return;
591 case 0x2005: /* mov.w Rm,@-Rn */
c55497ec 592 {
a7812ae4 593 TCGv addr = tcg_temp_new();
c55497ec 594 tcg_gen_subi_i32(addr, REG(B11_8), 2);
4da06fb3
RH
595 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
596 MO_TEUW | UNALIGN(ctx));
3101e99c 597 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 598 }
fdf9b3e8
FB
599 return;
600 case 0x2006: /* mov.l Rm,@-Rn */
c55497ec 601 {
a7812ae4 602 TCGv addr = tcg_temp_new();
c55497ec 603 tcg_gen_subi_i32(addr, REG(B11_8), 4);
4da06fb3
RH
604 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
605 MO_TEUL | UNALIGN(ctx));
3101e99c 606 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 607 }
fdf9b3e8 608 return;
eda9b09b 609 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 610 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
24988dc2 611 if ( B11_8 != B7_4 )
7efbe241 612 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
fdf9b3e8
FB
613 return;
614 case 0x6005: /* mov.w @Rm+,Rn */
4da06fb3
RH
615 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
616 MO_TESW | UNALIGN(ctx));
24988dc2 617 if ( B11_8 != B7_4 )
7efbe241 618 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
fdf9b3e8
FB
619 return;
620 case 0x6006: /* mov.l @Rm+,Rn */
4da06fb3
RH
621 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx,
622 MO_TESL | UNALIGN(ctx));
24988dc2 623 if ( B11_8 != B7_4 )
7efbe241 624 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
fdf9b3e8
FB
625 return;
626 case 0x0004: /* mov.b Rm,@(R0,Rn) */
c55497ec 627 {
a7812ae4 628 TCGv addr = tcg_temp_new();
c55497ec 629 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 630 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
c55497ec 631 }
fdf9b3e8
FB
632 return;
633 case 0x0005: /* mov.w Rm,@(R0,Rn) */
c55497ec 634 {
a7812ae4 635 TCGv addr = tcg_temp_new();
c55497ec 636 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
637 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
638 MO_TEUW | UNALIGN(ctx));
c55497ec 639 }
fdf9b3e8
FB
640 return;
641 case 0x0006: /* mov.l Rm,@(R0,Rn) */
c55497ec 642 {
a7812ae4 643 TCGv addr = tcg_temp_new();
c55497ec 644 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
4da06fb3
RH
645 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx,
646 MO_TEUL | UNALIGN(ctx));
c55497ec 647 }
fdf9b3e8
FB
648 return;
649 case 0x000c: /* mov.b @(R0,Rm),Rn */
c55497ec 650 {
a7812ae4 651 TCGv addr = tcg_temp_new();
c55497ec 652 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 653 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
c55497ec 654 }
fdf9b3e8
FB
655 return;
656 case 0x000d: /* mov.w @(R0,Rm),Rn */
c55497ec 657 {
a7812ae4 658 TCGv addr = tcg_temp_new();
c55497ec 659 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
660 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
661 MO_TESW | UNALIGN(ctx));
c55497ec 662 }
fdf9b3e8
FB
663 return;
664 case 0x000e: /* mov.l @(R0,Rm),Rn */
c55497ec 665 {
a7812ae4 666 TCGv addr = tcg_temp_new();
c55497ec 667 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
4da06fb3
RH
668 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx,
669 MO_TESL | UNALIGN(ctx));
c55497ec 670 }
fdf9b3e8
FB
671 return;
672 case 0x6008: /* swap.b Rm,Rn */
c55497ec 673 {
3c254ab8 674 TCGv low = tcg_temp_new();
b983a0e1 675 tcg_gen_bswap16_i32(low, REG(B7_4), 0);
218fd730 676 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
c55497ec 677 }
fdf9b3e8
FB
678 return;
679 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 680 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
fdf9b3e8
FB
681 return;
682 case 0x200d: /* xtrct Rm,Rn */
c55497ec
AJ
683 {
684 TCGv high, low;
a7812ae4 685 high = tcg_temp_new();
3101e99c 686 tcg_gen_shli_i32(high, REG(B7_4), 16);
a7812ae4 687 low = tcg_temp_new();
c55497ec 688 tcg_gen_shri_i32(low, REG(B11_8), 16);
c55497ec 689 tcg_gen_or_i32(REG(B11_8), high, low);
c55497ec 690 }
fdf9b3e8
FB
691 return;
692 case 0x300c: /* add Rm,Rn */
7efbe241 693 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
694 return;
695 case 0x300e: /* addc Rm,Rn */
22b88fd7 696 {
34086945 697 TCGv t0, t1;
950b91be 698 t0 = tcg_constant_tl(0);
22b88fd7 699 t1 = tcg_temp_new();
a2368e01
AJ
700 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
701 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
702 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 703 }
fdf9b3e8
FB
704 return;
705 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
706 {
707 TCGv t0, t1, t2;
708 t0 = tcg_temp_new();
709 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
710 t1 = tcg_temp_new();
711 tcg_gen_xor_i32(t1, t0, REG(B11_8));
712 t2 = tcg_temp_new();
713 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 714 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
34086945 715 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1 716 tcg_gen_mov_i32(REG(B7_4), t0);
ad8d25a1 717 }
fdf9b3e8
FB
718 return;
719 case 0x2009: /* and Rm,Rn */
7efbe241 720 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
721 return;
722 case 0x3000: /* cmp/eq Rm,Rn */
34086945 723 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
724 return;
725 case 0x3003: /* cmp/ge Rm,Rn */
34086945 726 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
727 return;
728 case 0x3007: /* cmp/gt Rm,Rn */
34086945 729 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
730 return;
731 case 0x3006: /* cmp/hi Rm,Rn */
34086945 732 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
733 return;
734 case 0x3002: /* cmp/hs Rm,Rn */
34086945 735 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
736 return;
737 case 0x200c: /* cmp/str Rm,Rn */
69d6275b 738 {
c5c19137
AJ
739 TCGv cmp1 = tcg_temp_new();
740 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
741 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
742 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
743 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
744 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
745 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
69d6275b 746 }
fdf9b3e8
FB
747 return;
748 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
749 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
750 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
751 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
fdf9b3e8
FB
752 return;
753 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
754 {
755 TCGv t0 = tcg_temp_new();
756 TCGv t1 = tcg_temp_new();
757 TCGv t2 = tcg_temp_new();
950b91be 758 TCGv zero = tcg_constant_i32(0);
1d565b21
AJ
759
760 /* shift left arg1, saving the bit being pushed out and inserting
761 T on the right */
762 tcg_gen_shri_i32(t0, REG(B11_8), 31);
763 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
764 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
765
766 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
767 using 64-bit temps, we compute arg0's high part from q ^ m, so
768 that it is 0x00000000 when adding the value or 0xffffffff when
769 subtracting it. */
770 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
771 tcg_gen_subi_i32(t1, t1, 1);
772 tcg_gen_neg_i32(t2, REG(B7_4));
773 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
774 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
775
776 /* compute T and Q depending on carry */
777 tcg_gen_andi_i32(t1, t1, 1);
778 tcg_gen_xor_i32(t1, t1, t0);
779 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
780 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
1d565b21 781 }
fdf9b3e8
FB
782 return;
783 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 784 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
785 return;
786 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 787 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
788 return;
789 case 0x600e: /* exts.b Rm,Rn */
7efbe241 790 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
791 return;
792 case 0x600f: /* exts.w Rm,Rn */
7efbe241 793 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
794 return;
795 case 0x600c: /* extu.b Rm,Rn */
7efbe241 796 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
797 return;
798 case 0x600d: /* extu.w Rm,Rn */
7efbe241 799 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8 800 return;
24988dc2 801 case 0x000f: /* mac.l @Rm+,@Rn+ */
c55497ec
AJ
802 {
803 TCGv arg0, arg1;
a7812ae4 804 arg0 = tcg_temp_new();
03a0d87e
RH
805 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
806 MO_TESL | MO_ALIGN);
a7812ae4 807 arg1 = tcg_temp_new();
03a0d87e
RH
808 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
809 MO_TESL | MO_ALIGN);
ad75a51e 810 gen_helper_macl(tcg_env, arg0, arg1);
c55497ec
AJ
811 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
812 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
813 }
fdf9b3e8
FB
814 return;
815 case 0x400f: /* mac.w @Rm+,@Rn+ */
c55497ec
AJ
816 {
817 TCGv arg0, arg1;
a7812ae4 818 arg0 = tcg_temp_new();
03a0d87e
RH
819 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx,
820 MO_TESL | MO_ALIGN);
a7812ae4 821 arg1 = tcg_temp_new();
03a0d87e
RH
822 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx,
823 MO_TESL | MO_ALIGN);
ad75a51e 824 gen_helper_macw(tcg_env, arg0, arg1);
c55497ec
AJ
825 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
826 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
827 }
fdf9b3e8
FB
828 return;
829 case 0x0007: /* mul.l Rm,Rn */
7efbe241 830 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
831 return;
832 case 0x200f: /* muls.w Rm,Rn */
c55497ec
AJ
833 {
834 TCGv arg0, arg1;
a7812ae4 835 arg0 = tcg_temp_new();
c55497ec 836 tcg_gen_ext16s_i32(arg0, REG(B7_4));
a7812ae4 837 arg1 = tcg_temp_new();
c55497ec
AJ
838 tcg_gen_ext16s_i32(arg1, REG(B11_8));
839 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
c55497ec 840 }
fdf9b3e8
FB
841 return;
842 case 0x200e: /* mulu.w Rm,Rn */
c55497ec
AJ
843 {
844 TCGv arg0, arg1;
a7812ae4 845 arg0 = tcg_temp_new();
c55497ec 846 tcg_gen_ext16u_i32(arg0, REG(B7_4));
a7812ae4 847 arg1 = tcg_temp_new();
c55497ec
AJ
848 tcg_gen_ext16u_i32(arg1, REG(B11_8));
849 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
c55497ec 850 }
fdf9b3e8
FB
851 return;
852 case 0x600b: /* neg Rm,Rn */
7efbe241 853 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
854 return;
855 case 0x600a: /* negc Rm,Rn */
b2d9eda5 856 {
950b91be 857 TCGv t0 = tcg_constant_i32(0);
60eb27fe
AJ
858 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
859 REG(B7_4), t0, cpu_sr_t, t0);
860 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
861 t0, t0, REG(B11_8), cpu_sr_t);
862 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 863 }
fdf9b3e8
FB
864 return;
865 case 0x6007: /* not Rm,Rn */
7efbe241 866 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
867 return;
868 case 0x200b: /* or Rm,Rn */
7efbe241 869 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
870 return;
871 case 0x400c: /* shad Rm,Rn */
69d6275b 872 {
be654c83
AJ
873 TCGv t0 = tcg_temp_new();
874 TCGv t1 = tcg_temp_new();
875 TCGv t2 = tcg_temp_new();
876
877 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
878
879 /* positive case: shift to the left */
880 tcg_gen_shl_i32(t1, REG(B11_8), t0);
881
882 /* negative case: shift to the right in two steps to
883 correctly handle the -32 case */
884 tcg_gen_xori_i32(t0, t0, 0x1f);
885 tcg_gen_sar_i32(t2, REG(B11_8), t0);
886 tcg_gen_sari_i32(t2, t2, 1);
887
888 /* select between the two cases */
889 tcg_gen_movi_i32(t0, 0);
890 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
69d6275b 891 }
fdf9b3e8
FB
892 return;
893 case 0x400d: /* shld Rm,Rn */
69d6275b 894 {
57760161
AJ
895 TCGv t0 = tcg_temp_new();
896 TCGv t1 = tcg_temp_new();
897 TCGv t2 = tcg_temp_new();
898
899 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
900
901 /* positive case: shift to the left */
902 tcg_gen_shl_i32(t1, REG(B11_8), t0);
903
904 /* negative case: shift to the right in two steps to
905 correctly handle the -32 case */
906 tcg_gen_xori_i32(t0, t0, 0x1f);
907 tcg_gen_shr_i32(t2, REG(B11_8), t0);
908 tcg_gen_shri_i32(t2, t2, 1);
909
910 /* select between the two cases */
911 tcg_gen_movi_i32(t0, 0);
912 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
69d6275b 913 }
fdf9b3e8
FB
914 return;
915 case 0x3008: /* sub Rm,Rn */
7efbe241 916 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
917 return;
918 case 0x300a: /* subc Rm,Rn */
22b88fd7 919 {
d0f44a55 920 TCGv t0, t1;
950b91be 921 t0 = tcg_constant_tl(0);
22b88fd7 922 t1 = tcg_temp_new();
d0f44a55
AJ
923 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
924 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
925 REG(B11_8), t0, t1, cpu_sr_t);
926 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 927 }
fdf9b3e8
FB
928 return;
929 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
930 {
931 TCGv t0, t1, t2;
932 t0 = tcg_temp_new();
933 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
934 t1 = tcg_temp_new();
935 tcg_gen_xor_i32(t1, t0, REG(B7_4));
936 t2 = tcg_temp_new();
937 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
938 tcg_gen_and_i32(t1, t1, t2);
34086945 939 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1 940 tcg_gen_mov_i32(REG(B11_8), t0);
ad8d25a1 941 }
fdf9b3e8
FB
942 return;
943 case 0x2008: /* tst Rm,Rn */
c55497ec 944 {
a7812ae4 945 TCGv val = tcg_temp_new();
c55497ec 946 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 947 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec 948 }
fdf9b3e8
FB
949 return;
950 case 0x200a: /* xor Rm,Rn */
7efbe241 951 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8 952 return;
e67888a7 953 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 954 CHECK_FPU_ENABLED
a6215749 955 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
956 int xsrc = XHACK(B7_4);
957 int xdst = XHACK(B11_8);
958 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
959 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
eda9b09b 960 } else {
7c9f7038 961 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
eda9b09b
FB
962 }
963 return;
e67888a7 964 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
f6198371 965 CHECK_FPU_ENABLED
a6215749 966 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
967 TCGv_i64 fp = tcg_temp_new_i64();
968 gen_load_fpr64(ctx, fp, XHACK(B7_4));
03a0d87e
RH
969 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx,
970 MO_TEUQ | MO_ALIGN);
eda9b09b 971 } else {
03a0d87e
RH
972 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx,
973 MO_TEUL | MO_ALIGN);
eda9b09b
FB
974 }
975 return;
e67888a7 976 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 977 CHECK_FPU_ENABLED
a6215749 978 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 979 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
980 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
981 MO_TEUQ | MO_ALIGN);
4d57fa50 982 gen_store_fpr64(ctx, fp, XHACK(B11_8));
eda9b09b 983 } else {
03a0d87e
RH
984 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
985 MO_TEUL | MO_ALIGN);
eda9b09b
FB
986 }
987 return;
e67888a7 988 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
f6198371 989 CHECK_FPU_ENABLED
a6215749 990 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 991 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
992 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx,
993 MO_TEUQ | MO_ALIGN);
4d57fa50 994 gen_store_fpr64(ctx, fp, XHACK(B11_8));
4d57fa50 995 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
eda9b09b 996 } else {
03a0d87e
RH
997 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx,
998 MO_TEUL | MO_ALIGN);
cc4ba6a9 999 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
eda9b09b
FB
1000 }
1001 return;
e67888a7 1002 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
f6198371 1003 CHECK_FPU_ENABLED
4d57fa50
RH
1004 {
1005 TCGv addr = tcg_temp_new_i32();
1006 if (ctx->tbflags & FPSCR_SZ) {
1007 TCGv_i64 fp = tcg_temp_new_i64();
1008 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1009 tcg_gen_subi_i32(addr, REG(B11_8), 8);
03a0d87e
RH
1010 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1011 MO_TEUQ | MO_ALIGN);
4d57fa50
RH
1012 } else {
1013 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e
RH
1014 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1015 MO_TEUL | MO_ALIGN);
4d57fa50
RH
1016 }
1017 tcg_gen_mov_i32(REG(B11_8), addr);
4d57fa50 1018 }
eda9b09b 1019 return;
e67888a7 1020 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
f6198371 1021 CHECK_FPU_ENABLED
cc4ba6a9 1022 {
a7812ae4 1023 TCGv addr = tcg_temp_new_i32();
cc4ba6a9 1024 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1025 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50 1026 TCGv_i64 fp = tcg_temp_new_i64();
03a0d87e
RH
1027 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx,
1028 MO_TEUQ | MO_ALIGN);
4d57fa50 1029 gen_store_fpr64(ctx, fp, XHACK(B11_8));
cc4ba6a9 1030 } else {
03a0d87e
RH
1031 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx,
1032 MO_TEUL | MO_ALIGN);
cc4ba6a9 1033 }
eda9b09b
FB
1034 }
1035 return;
e67888a7 1036 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
f6198371 1037 CHECK_FPU_ENABLED
cc4ba6a9 1038 {
a7812ae4 1039 TCGv addr = tcg_temp_new();
cc4ba6a9 1040 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1041 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1042 TCGv_i64 fp = tcg_temp_new_i64();
1043 gen_load_fpr64(ctx, fp, XHACK(B7_4));
03a0d87e
RH
1044 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx,
1045 MO_TEUQ | MO_ALIGN);
cc4ba6a9 1046 } else {
03a0d87e
RH
1047 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx,
1048 MO_TEUL | MO_ALIGN);
cc4ba6a9 1049 }
eda9b09b
FB
1050 }
1051 return;
e67888a7
TS
1052 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1053 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1054 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1055 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1056 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1057 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
cc4ba6a9 1058 {
f6198371 1059 CHECK_FPU_ENABLED
a6215749 1060 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1061 TCGv_i64 fp0, fp1;
1062
93dc9c89
RH
1063 if (ctx->opcode & 0x0110) {
1064 goto do_illegal;
1065 }
a7812ae4
PB
1066 fp0 = tcg_temp_new_i64();
1067 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1068 gen_load_fpr64(ctx, fp0, B11_8);
1069 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4
PB
1070 switch (ctx->opcode & 0xf00f) {
1071 case 0xf000: /* fadd Rm,Rn */
ad75a51e 1072 gen_helper_fadd_DT(fp0, tcg_env, fp0, fp1);
a7812ae4
PB
1073 break;
1074 case 0xf001: /* fsub Rm,Rn */
ad75a51e 1075 gen_helper_fsub_DT(fp0, tcg_env, fp0, fp1);
a7812ae4
PB
1076 break;
1077 case 0xf002: /* fmul Rm,Rn */
ad75a51e 1078 gen_helper_fmul_DT(fp0, tcg_env, fp0, fp1);
a7812ae4
PB
1079 break;
1080 case 0xf003: /* fdiv Rm,Rn */
ad75a51e 1081 gen_helper_fdiv_DT(fp0, tcg_env, fp0, fp1);
a7812ae4
PB
1082 break;
1083 case 0xf004: /* fcmp/eq Rm,Rn */
ad75a51e 1084 gen_helper_fcmp_eq_DT(cpu_sr_t, tcg_env, fp0, fp1);
a7812ae4
PB
1085 return;
1086 case 0xf005: /* fcmp/gt Rm,Rn */
ad75a51e 1087 gen_helper_fcmp_gt_DT(cpu_sr_t, tcg_env, fp0, fp1);
a7812ae4
PB
1088 return;
1089 }
1e0b21d8 1090 gen_store_fpr64(ctx, fp0, B11_8);
a7812ae4 1091 } else {
a7812ae4
PB
1092 switch (ctx->opcode & 0xf00f) {
1093 case 0xf000: /* fadd Rm,Rn */
ad75a51e 1094 gen_helper_fadd_FT(FREG(B11_8), tcg_env,
7c9f7038 1095 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1096 break;
1097 case 0xf001: /* fsub Rm,Rn */
ad75a51e 1098 gen_helper_fsub_FT(FREG(B11_8), tcg_env,
7c9f7038 1099 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1100 break;
1101 case 0xf002: /* fmul Rm,Rn */
ad75a51e 1102 gen_helper_fmul_FT(FREG(B11_8), tcg_env,
7c9f7038 1103 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1104 break;
1105 case 0xf003: /* fdiv Rm,Rn */
ad75a51e 1106 gen_helper_fdiv_FT(FREG(B11_8), tcg_env,
7c9f7038 1107 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1108 break;
1109 case 0xf004: /* fcmp/eq Rm,Rn */
ad75a51e 1110 gen_helper_fcmp_eq_FT(cpu_sr_t, tcg_env,
7c9f7038 1111 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1112 return;
1113 case 0xf005: /* fcmp/gt Rm,Rn */
ad75a51e 1114 gen_helper_fcmp_gt_FT(cpu_sr_t, tcg_env,
7c9f7038 1115 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1116 return;
1117 }
cc4ba6a9 1118 }
ea6cf6be
TS
1119 }
1120 return;
5b7141a1 1121 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1122 CHECK_FPU_ENABLED
1123 CHECK_FPSCR_PR_0
ad75a51e 1124 gen_helper_fmac_FT(FREG(B11_8), tcg_env,
7e9f7ca8
RH
1125 FREG(0), FREG(B7_4), FREG(B11_8));
1126 return;
fdf9b3e8
FB
1127 }
1128
1129 switch (ctx->opcode & 0xff00) {
1130 case 0xc900: /* and #imm,R0 */
7efbe241 1131 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1132 return;
24988dc2 1133 case 0xcd00: /* and.b #imm,@(R0,GBR) */
c55497ec
AJ
1134 {
1135 TCGv addr, val;
a7812ae4 1136 addr = tcg_temp_new();
c55497ec 1137 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1138 val = tcg_temp_new();
3376f415 1139 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1140 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1141 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1142 }
fdf9b3e8
FB
1143 return;
1144 case 0x8b00: /* bf label */
1145 CHECK_NOT_DELAY_SLOT
6f1c2af6 1146 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, false);
fdf9b3e8
FB
1147 return;
1148 case 0x8f00: /* bf/s label */
1149 CHECK_NOT_DELAY_SLOT
ac9707ea 1150 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
6f1c2af6 1151 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1152 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
fdf9b3e8
FB
1153 return;
1154 case 0x8900: /* bt label */
1155 CHECK_NOT_DELAY_SLOT
6f1c2af6 1156 gen_conditional_jump(ctx, ctx->base.pc_next + 4 + B7_0s * 2, true);
fdf9b3e8
FB
1157 return;
1158 case 0x8d00: /* bt/s label */
1159 CHECK_NOT_DELAY_SLOT
ac9707ea 1160 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
6f1c2af6 1161 ctx->delayed_pc = ctx->base.pc_next + 4 + B7_0s * 2;
ab419fd8 1162 ctx->envflags |= TB_FLAG_DELAY_SLOT_COND;
fdf9b3e8
FB
1163 return;
1164 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1165 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
fdf9b3e8
FB
1166 return;
1167 case 0xc400: /* mov.b @(disp,GBR),R0 */
c55497ec 1168 {
a7812ae4 1169 TCGv addr = tcg_temp_new();
c55497ec 1170 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1171 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec 1172 }
fdf9b3e8
FB
1173 return;
1174 case 0xc500: /* mov.w @(disp,GBR),R0 */
c55497ec 1175 {
a7812ae4 1176 TCGv addr = tcg_temp_new();
c55497ec 1177 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
03a0d87e 1178 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW | MO_ALIGN);
c55497ec 1179 }
fdf9b3e8
FB
1180 return;
1181 case 0xc600: /* mov.l @(disp,GBR),R0 */
c55497ec 1182 {
a7812ae4 1183 TCGv addr = tcg_temp_new();
c55497ec 1184 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
03a0d87e 1185 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL | MO_ALIGN);
c55497ec 1186 }
fdf9b3e8
FB
1187 return;
1188 case 0xc000: /* mov.b R0,@(disp,GBR) */
c55497ec 1189 {
a7812ae4 1190 TCGv addr = tcg_temp_new();
c55497ec 1191 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1192 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec 1193 }
fdf9b3e8
FB
1194 return;
1195 case 0xc100: /* mov.w R0,@(disp,GBR) */
c55497ec 1196 {
a7812ae4 1197 TCGv addr = tcg_temp_new();
c55497ec 1198 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
03a0d87e 1199 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW | MO_ALIGN);
c55497ec 1200 }
fdf9b3e8
FB
1201 return;
1202 case 0xc200: /* mov.l R0,@(disp,GBR) */
c55497ec 1203 {
a7812ae4 1204 TCGv addr = tcg_temp_new();
c55497ec 1205 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
03a0d87e 1206 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL | MO_ALIGN);
c55497ec 1207 }
fdf9b3e8
FB
1208 return;
1209 case 0x8000: /* mov.b R0,@(disp,Rn) */
c55497ec 1210 {
a7812ae4 1211 TCGv addr = tcg_temp_new();
c55497ec 1212 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1213 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec 1214 }
fdf9b3e8
FB
1215 return;
1216 case 0x8100: /* mov.w R0,@(disp,Rn) */
c55497ec 1217 {
a7812ae4 1218 TCGv addr = tcg_temp_new();
c55497ec 1219 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1220 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx,
1221 MO_TEUW | UNALIGN(ctx));
c55497ec 1222 }
fdf9b3e8
FB
1223 return;
1224 case 0x8400: /* mov.b @(disp,Rn),R0 */
c55497ec 1225 {
a7812ae4 1226 TCGv addr = tcg_temp_new();
c55497ec 1227 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1228 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec 1229 }
fdf9b3e8
FB
1230 return;
1231 case 0x8500: /* mov.w @(disp,Rn),R0 */
c55497ec 1232 {
a7812ae4 1233 TCGv addr = tcg_temp_new();
c55497ec 1234 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
4da06fb3
RH
1235 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx,
1236 MO_TESW | UNALIGN(ctx));
c55497ec 1237 }
fdf9b3e8
FB
1238 return;
1239 case 0xc700: /* mova @(disp,PC),R0 */
6f1c2af6
RH
1240 tcg_gen_movi_i32(REG(0), ((ctx->base.pc_next & 0xfffffffc) +
1241 4 + B7_0 * 4) & ~3);
fdf9b3e8
FB
1242 return;
1243 case 0xcb00: /* or #imm,R0 */
7efbe241 1244 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1245 return;
24988dc2 1246 case 0xcf00: /* or.b #imm,@(R0,GBR) */
c55497ec
AJ
1247 {
1248 TCGv addr, val;
a7812ae4 1249 addr = tcg_temp_new();
c55497ec 1250 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1251 val = tcg_temp_new();
3376f415 1252 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1253 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1254 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1255 }
fdf9b3e8
FB
1256 return;
1257 case 0xc300: /* trapa #imm */
c55497ec
AJ
1258 {
1259 TCGv imm;
1260 CHECK_NOT_DELAY_SLOT
ac9707ea 1261 gen_save_cpu_state(ctx, true);
950b91be 1262 imm = tcg_constant_i32(B7_0);
ad75a51e 1263 gen_helper_trapa(tcg_env, imm);
6f1c2af6 1264 ctx->base.is_jmp = DISAS_NORETURN;
c55497ec 1265 }
fdf9b3e8
FB
1266 return;
1267 case 0xc800: /* tst #imm,R0 */
c55497ec 1268 {
a7812ae4 1269 TCGv val = tcg_temp_new();
c55497ec 1270 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1271 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec 1272 }
fdf9b3e8 1273 return;
24988dc2 1274 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
c55497ec 1275 {
a7812ae4 1276 TCGv val = tcg_temp_new();
c55497ec 1277 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1278 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
c55497ec 1279 tcg_gen_andi_i32(val, val, B7_0);
34086945 1280 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec 1281 }
fdf9b3e8
FB
1282 return;
1283 case 0xca00: /* xor #imm,R0 */
7efbe241 1284 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1285 return;
24988dc2 1286 case 0xce00: /* xor.b #imm,@(R0,GBR) */
c55497ec
AJ
1287 {
1288 TCGv addr, val;
a7812ae4 1289 addr = tcg_temp_new();
c55497ec 1290 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1291 val = tcg_temp_new();
3376f415 1292 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1293 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1294 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1295 }
fdf9b3e8
FB
1296 return;
1297 }
1298
1299 switch (ctx->opcode & 0xf08f) {
1300 case 0x408e: /* ldc Rm,Rn_BANK */
fe25591e 1301 CHECK_PRIVILEGED
7efbe241 1302 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
fdf9b3e8
FB
1303 return;
1304 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
fe25591e 1305 CHECK_PRIVILEGED
03a0d87e
RH
1306 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx,
1307 MO_TESL | MO_ALIGN);
7efbe241 1308 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
fdf9b3e8
FB
1309 return;
1310 case 0x0082: /* stc Rm_BANK,Rn */
fe25591e 1311 CHECK_PRIVILEGED
7efbe241 1312 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
fdf9b3e8
FB
1313 return;
1314 case 0x4083: /* stc.l Rm_BANK,@-Rn */
fe25591e 1315 CHECK_PRIVILEGED
c55497ec 1316 {
a7812ae4 1317 TCGv addr = tcg_temp_new();
c55497ec 1318 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e
RH
1319 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx,
1320 MO_TEUL | MO_ALIGN);
3101e99c 1321 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1322 }
fdf9b3e8
FB
1323 return;
1324 }
1325
1326 switch (ctx->opcode & 0xf0ff) {
1327 case 0x0023: /* braf Rn */
7efbe241 1328 CHECK_NOT_DELAY_SLOT
6f1c2af6 1329 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->base.pc_next + 4);
ab419fd8 1330 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1331 ctx->delayed_pc = (uint32_t) - 1;
1332 return;
1333 case 0x0003: /* bsrf Rn */
7efbe241 1334 CHECK_NOT_DELAY_SLOT
6f1c2af6 1335 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1336 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
ab419fd8 1337 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1338 ctx->delayed_pc = (uint32_t) - 1;
1339 return;
1340 case 0x4015: /* cmp/pl Rn */
34086945 1341 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1342 return;
1343 case 0x4011: /* cmp/pz Rn */
34086945 1344 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1345 return;
1346 case 0x4010: /* dt Rn */
7efbe241 1347 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1348 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1349 return;
1350 case 0x402b: /* jmp @Rn */
7efbe241
AJ
1351 CHECK_NOT_DELAY_SLOT
1352 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1353 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1354 ctx->delayed_pc = (uint32_t) - 1;
1355 return;
1356 case 0x400b: /* jsr @Rn */
7efbe241 1357 CHECK_NOT_DELAY_SLOT
6f1c2af6 1358 tcg_gen_movi_i32(cpu_pr, ctx->base.pc_next + 4);
7efbe241 1359 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
ab419fd8 1360 ctx->envflags |= TB_FLAG_DELAY_SLOT;
fdf9b3e8
FB
1361 ctx->delayed_pc = (uint32_t) - 1;
1362 return;
fe25591e
AJ
1363 case 0x400e: /* ldc Rm,SR */
1364 CHECK_PRIVILEGED
34086945
AJ
1365 {
1366 TCGv val = tcg_temp_new();
1367 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1368 gen_write_sr(val);
6f1c2af6 1369 ctx->base.is_jmp = DISAS_STOP;
34086945 1370 }
390af821 1371 return;
fe25591e
AJ
1372 case 0x4007: /* ldc.l @Rm+,SR */
1373 CHECK_PRIVILEGED
c55497ec 1374 {
a7812ae4 1375 TCGv val = tcg_temp_new();
03a0d87e
RH
1376 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1377 MO_TESL | MO_ALIGN);
34086945
AJ
1378 tcg_gen_andi_i32(val, val, 0x700083f3);
1379 gen_write_sr(val);
c55497ec 1380 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
6f1c2af6 1381 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1382 }
390af821 1383 return;
fe25591e
AJ
1384 case 0x0002: /* stc SR,Rn */
1385 CHECK_PRIVILEGED
34086945 1386 gen_read_sr(REG(B11_8));
390af821 1387 return;
fe25591e
AJ
1388 case 0x4003: /* stc SR,@-Rn */
1389 CHECK_PRIVILEGED
c55497ec 1390 {
a7812ae4 1391 TCGv addr = tcg_temp_new();
34086945 1392 TCGv val = tcg_temp_new();
c55497ec 1393 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945 1394 gen_read_sr(val);
03a0d87e 1395 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
3101e99c 1396 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1397 }
390af821 1398 return;
8e9b0678 1399#define LD(reg,ldnum,ldpnum,prechk) \
fdf9b3e8 1400 case ldnum: \
fe25591e 1401 prechk \
7efbe241 1402 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
fdf9b3e8
FB
1403 return; \
1404 case ldpnum: \
fe25591e 1405 prechk \
03a0d87e
RH
1406 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, \
1407 MO_TESL | MO_ALIGN); \
7efbe241 1408 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678
AC
1409 return;
1410#define ST(reg,stnum,stpnum,prechk) \
fdf9b3e8 1411 case stnum: \
fe25591e 1412 prechk \
7efbe241 1413 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
fdf9b3e8
FB
1414 return; \
1415 case stpnum: \
fe25591e 1416 prechk \
c55497ec 1417 { \
3101e99c 1418 TCGv addr = tcg_temp_new(); \
c55497ec 1419 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
03a0d87e
RH
1420 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, \
1421 MO_TEUL | MO_ALIGN); \
3101e99c 1422 tcg_gen_mov_i32(REG(B11_8), addr); \
86e0abc7 1423 } \
fdf9b3e8 1424 return;
8e9b0678
AC
1425#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1426 LD(reg,ldnum,ldpnum,prechk) \
1427 ST(reg,stnum,stpnum,prechk)
fe25591e
AJ
1428 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1429 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1430 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1431 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
935fc175 1432 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1433 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
fe25591e
AJ
1434 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1435 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1436 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1437 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
d8299bcc 1438 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
390af821 1439 case 0x406a: /* lds Rm,FPSCR */
d8299bcc 1440 CHECK_FPU_ENABLED
ad75a51e 1441 gen_helper_ld_fpscr(tcg_env, REG(B11_8));
6f1c2af6 1442 ctx->base.is_jmp = DISAS_STOP;
390af821
AJ
1443 return;
1444 case 0x4066: /* lds.l @Rm+,FPSCR */
d8299bcc 1445 CHECK_FPU_ENABLED
c55497ec 1446 {
a7812ae4 1447 TCGv addr = tcg_temp_new();
03a0d87e
RH
1448 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx,
1449 MO_TESL | MO_ALIGN);
c55497ec 1450 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
ad75a51e 1451 gen_helper_ld_fpscr(tcg_env, addr);
6f1c2af6 1452 ctx->base.is_jmp = DISAS_STOP;
c55497ec 1453 }
390af821
AJ
1454 return;
1455 case 0x006a: /* sts FPSCR,Rn */
d8299bcc 1456 CHECK_FPU_ENABLED
c55497ec 1457 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
390af821
AJ
1458 return;
1459 case 0x4062: /* sts FPSCR,@-Rn */
d8299bcc 1460 CHECK_FPU_ENABLED
c55497ec
AJ
1461 {
1462 TCGv addr, val;
a7812ae4 1463 val = tcg_temp_new();
c55497ec 1464 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
a7812ae4 1465 addr = tcg_temp_new();
c55497ec 1466 tcg_gen_subi_i32(addr, REG(B11_8), 4);
03a0d87e 1467 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL | MO_ALIGN);
3101e99c 1468 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1469 }
390af821 1470 return;
fdf9b3e8 1471 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1472 {
1473 TCGv val = tcg_temp_new();
03a0d87e
RH
1474 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx,
1475 MO_TEUL | MO_ALIGN);
ad75a51e 1476 gen_helper_movcal(tcg_env, REG(B11_8), val);
03a0d87e
RH
1477 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1478 MO_TEUL | MO_ALIGN);
852d481f
EI
1479 }
1480 ctx->has_movcal = 1;
fdf9b3e8 1481 return;
143021b2 1482 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1483 CHECK_SH4A
143021b2 1484 /* Load non-boundary-aligned data */
ccae24d4
RH
1485 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1486 MO_TEUL | MO_UNALN);
1487 return;
143021b2 1488 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1489 CHECK_SH4A
143021b2 1490 /* Load non-boundary-aligned data */
ccae24d4
RH
1491 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1492 MO_TEUL | MO_UNALN);
1493 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1494 return;
fdf9b3e8 1495 case 0x0029: /* movt Rn */
34086945 1496 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
fdf9b3e8 1497 return;
66c7c806
AJ
1498 case 0x0073:
1499 /* MOVCO.L
f85da308
RH
1500 * LDST -> T
1501 * If (T == 1) R0 -> (Rn)
1502 * 0 -> LDST
1503 *
1504 * The above description doesn't work in a parallel context.
1505 * Since we currently support no smp boards, this implies user-mode.
1506 * But we can still support the official mechanism while user-mode
1507 * is single-threaded. */
ccae24d4
RH
1508 CHECK_SH4A
1509 {
f85da308
RH
1510 TCGLabel *fail = gen_new_label();
1511 TCGLabel *done = gen_new_label();
1512
6f1c2af6 1513 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1514 TCGv tmp;
1515
1516 tcg_gen_brcond_i32(TCG_COND_NE, REG(B11_8),
1517 cpu_lock_addr, fail);
1518 tmp = tcg_temp_new();
1519 tcg_gen_atomic_cmpxchg_i32(tmp, REG(B11_8), cpu_lock_value,
03a0d87e
RH
1520 REG(0), ctx->memidx,
1521 MO_TEUL | MO_ALIGN);
f85da308 1522 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, tmp, cpu_lock_value);
f85da308
RH
1523 } else {
1524 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_lock_addr, -1, fail);
03a0d87e
RH
1525 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx,
1526 MO_TEUL | MO_ALIGN);
f85da308
RH
1527 tcg_gen_movi_i32(cpu_sr_t, 1);
1528 }
1529 tcg_gen_br(done);
1530
1531 gen_set_label(fail);
1532 tcg_gen_movi_i32(cpu_sr_t, 0);
1533
1534 gen_set_label(done);
1535 tcg_gen_movi_i32(cpu_lock_addr, -1);
ccae24d4 1536 }
f85da308 1537 return;
66c7c806
AJ
1538 case 0x0063:
1539 /* MOVLI.L @Rm,R0
f85da308
RH
1540 * 1 -> LDST
1541 * (Rm) -> R0
1542 * When interrupt/exception
1543 * occurred 0 -> LDST
1544 *
1545 * In a parallel context, we must also save the loaded value
1546 * for use with the cmpxchg that we'll use with movco.l. */
ccae24d4 1547 CHECK_SH4A
6f1c2af6 1548 if ((tb_cflags(ctx->base.tb) & CF_PARALLEL)) {
f85da308
RH
1549 TCGv tmp = tcg_temp_new();
1550 tcg_gen_mov_i32(tmp, REG(B11_8));
03a0d87e
RH
1551 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1552 MO_TESL | MO_ALIGN);
f85da308
RH
1553 tcg_gen_mov_i32(cpu_lock_value, REG(0));
1554 tcg_gen_mov_i32(cpu_lock_addr, tmp);
f85da308 1555 } else {
03a0d87e
RH
1556 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1557 MO_TESL | MO_ALIGN);
f85da308
RH
1558 tcg_gen_movi_i32(cpu_lock_addr, 0);
1559 }
ccae24d4 1560 return;
fdf9b3e8 1561 case 0x0093: /* ocbi @Rn */
c55497ec 1562 {
ad75a51e 1563 gen_helper_ocbi(tcg_env, REG(B11_8));
c55497ec 1564 }
fdf9b3e8 1565 return;
24988dc2 1566 case 0x00a3: /* ocbp @Rn */
fdf9b3e8 1567 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1568 /* These instructions are supposed to do nothing in case of
1569 a cache miss. Given that we only partially emulate caches
1570 it is safe to simply ignore them. */
fdf9b3e8
FB
1571 return;
1572 case 0x0083: /* pref @Rn */
1573 return;
71968fa6 1574 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1575 CHECK_SH4A
1576 return;
71968fa6 1577 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1578 CHECK_SH4A
1579 return;
71968fa6 1580 case 0x00ab: /* synco */
ccae24d4
RH
1581 CHECK_SH4A
1582 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1583 return;
fdf9b3e8 1584 case 0x4024: /* rotcl Rn */
c55497ec 1585 {
a7812ae4 1586 TCGv tmp = tcg_temp_new();
34086945
AJ
1587 tcg_gen_mov_i32(tmp, cpu_sr_t);
1588 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
c55497ec 1589 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1590 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec 1591 }
fdf9b3e8
FB
1592 return;
1593 case 0x4025: /* rotcr Rn */
c55497ec 1594 {
a7812ae4 1595 TCGv tmp = tcg_temp_new();
34086945
AJ
1596 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1597 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
c55497ec 1598 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1599 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec 1600 }
fdf9b3e8
FB
1601 return;
1602 case 0x4004: /* rotl Rn */
2411fde9 1603 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1604 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1605 return;
1606 case 0x4005: /* rotr Rn */
34086945 1607 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
2411fde9 1608 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1609 return;
1610 case 0x4000: /* shll Rn */
1611 case 0x4020: /* shal Rn */
34086945 1612 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
7efbe241 1613 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1614 return;
1615 case 0x4021: /* shar Rn */
34086945 1616 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1617 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1618 return;
1619 case 0x4001: /* shlr Rn */
34086945 1620 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1621 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1622 return;
1623 case 0x4008: /* shll2 Rn */
7efbe241 1624 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1625 return;
1626 case 0x4018: /* shll8 Rn */
7efbe241 1627 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1628 return;
1629 case 0x4028: /* shll16 Rn */
7efbe241 1630 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1631 return;
1632 case 0x4009: /* shlr2 Rn */
7efbe241 1633 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1634 return;
1635 case 0x4019: /* shlr8 Rn */
7efbe241 1636 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1637 return;
1638 case 0x4029: /* shlr16 Rn */
7efbe241 1639 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1640 return;
1641 case 0x401b: /* tas.b @Rn */
d3c2b2b3
RH
1642 tcg_gen_atomic_fetch_or_i32(cpu_sr_t, REG(B11_8),
1643 tcg_constant_i32(0x80), ctx->memidx, MO_UB);
1644 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, cpu_sr_t, 0);
cb32f179 1645 return;
e67888a7 1646 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
f6198371 1647 CHECK_FPU_ENABLED
7c9f7038 1648 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
eda9b09b 1649 return;
e67888a7 1650 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
f6198371 1651 CHECK_FPU_ENABLED
7c9f7038 1652 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
eda9b09b 1653 return;
e67888a7 1654 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
f6198371 1655 CHECK_FPU_ENABLED
a6215749 1656 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1657 TCGv_i64 fp;
93dc9c89
RH
1658 if (ctx->opcode & 0x0100) {
1659 goto do_illegal;
1660 }
a7812ae4 1661 fp = tcg_temp_new_i64();
ad75a51e 1662 gen_helper_float_DT(fp, tcg_env, cpu_fpul);
1e0b21d8 1663 gen_store_fpr64(ctx, fp, B11_8);
ea6cf6be
TS
1664 }
1665 else {
ad75a51e 1666 gen_helper_float_FT(FREG(B11_8), tcg_env, cpu_fpul);
ea6cf6be
TS
1667 }
1668 return;
e67888a7 1669 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
f6198371 1670 CHECK_FPU_ENABLED
a6215749 1671 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1672 TCGv_i64 fp;
93dc9c89
RH
1673 if (ctx->opcode & 0x0100) {
1674 goto do_illegal;
1675 }
a7812ae4 1676 fp = tcg_temp_new_i64();
1e0b21d8 1677 gen_load_fpr64(ctx, fp, B11_8);
ad75a51e 1678 gen_helper_ftrc_DT(cpu_fpul, tcg_env, fp);
ea6cf6be
TS
1679 }
1680 else {
ad75a51e 1681 gen_helper_ftrc_FT(cpu_fpul, tcg_env, FREG(B11_8));
ea6cf6be
TS
1682 }
1683 return;
24988dc2 1684 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
f6198371 1685 CHECK_FPU_ENABLED
7c9f7038 1686 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
24988dc2 1687 return;
57f5c1b0 1688 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
f6198371 1689 CHECK_FPU_ENABLED
7c9f7038 1690 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
24988dc2
AJ
1691 return;
1692 case 0xf06d: /* fsqrt FRn */
f6198371 1693 CHECK_FPU_ENABLED
a6215749 1694 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1695 if (ctx->opcode & 0x0100) {
1696 goto do_illegal;
1697 }
a7812ae4 1698 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1699 gen_load_fpr64(ctx, fp, B11_8);
ad75a51e 1700 gen_helper_fsqrt_DT(fp, tcg_env, fp);
1e0b21d8 1701 gen_store_fpr64(ctx, fp, B11_8);
24988dc2 1702 } else {
ad75a51e 1703 gen_helper_fsqrt_FT(FREG(B11_8), tcg_env, FREG(B11_8));
24988dc2
AJ
1704 }
1705 return;
1706 case 0xf07d: /* fsrra FRn */
f6198371 1707 CHECK_FPU_ENABLED
11b7aa23 1708 CHECK_FPSCR_PR_0
ad75a51e 1709 gen_helper_fsrra_FT(FREG(B11_8), tcg_env, FREG(B11_8));
24988dc2 1710 break;
e67888a7 1711 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
f6198371 1712 CHECK_FPU_ENABLED
7e9f7ca8
RH
1713 CHECK_FPSCR_PR_0
1714 tcg_gen_movi_i32(FREG(B11_8), 0);
1715 return;
e67888a7 1716 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
f6198371 1717 CHECK_FPU_ENABLED
7e9f7ca8
RH
1718 CHECK_FPSCR_PR_0
1719 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1720 return;
24988dc2 1721 case 0xf0ad: /* fcnvsd FPUL,DRn */
f6198371 1722 CHECK_FPU_ENABLED
cc4ba6a9 1723 {
a7812ae4 1724 TCGv_i64 fp = tcg_temp_new_i64();
ad75a51e 1725 gen_helper_fcnvsd_FT_DT(fp, tcg_env, cpu_fpul);
1e0b21d8 1726 gen_store_fpr64(ctx, fp, B11_8);
cc4ba6a9 1727 }
24988dc2
AJ
1728 return;
1729 case 0xf0bd: /* fcnvds DRn,FPUL */
f6198371 1730 CHECK_FPU_ENABLED
cc4ba6a9 1731 {
a7812ae4 1732 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1733 gen_load_fpr64(ctx, fp, B11_8);
ad75a51e 1734 gen_helper_fcnvds_DT_FT(cpu_fpul, tcg_env, fp);
cc4ba6a9 1735 }
24988dc2 1736 return;
af8c2bde
AJ
1737 case 0xf0ed: /* fipr FVm,FVn */
1738 CHECK_FPU_ENABLED
7e9f7ca8
RH
1739 CHECK_FPSCR_PR_1
1740 {
950b91be
RH
1741 TCGv m = tcg_constant_i32((ctx->opcode >> 8) & 3);
1742 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
ad75a51e 1743 gen_helper_fipr(tcg_env, m, n);
af8c2bde
AJ
1744 return;
1745 }
1746 break;
17075f10
AJ
1747 case 0xf0fd: /* ftrv XMTRX,FVn */
1748 CHECK_FPU_ENABLED
7e9f7ca8
RH
1749 CHECK_FPSCR_PR_1
1750 {
1751 if ((ctx->opcode & 0x0300) != 0x0100) {
1752 goto do_illegal;
1753 }
950b91be 1754 TCGv n = tcg_constant_i32((ctx->opcode >> 10) & 3);
ad75a51e 1755 gen_helper_ftrv(tcg_env, n);
17075f10
AJ
1756 return;
1757 }
1758 break;
fdf9b3e8 1759 }
bacc637a 1760#if 0
fdf9b3e8 1761 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
6f1c2af6 1762 ctx->opcode, ctx->base.pc_next);
bacc637a
AJ
1763 fflush(stderr);
1764#endif
6b98213d 1765 do_illegal:
ab419fd8 1766 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
dec16c6e
RH
1767 do_illegal_slot:
1768 gen_save_cpu_state(ctx, true);
ad75a51e 1769 gen_helper_raise_slot_illegal_instruction(tcg_env);
86865c5f 1770 } else {
dec16c6e 1771 gen_save_cpu_state(ctx, true);
ad75a51e 1772 gen_helper_raise_illegal_instruction(tcg_env);
86865c5f 1773 }
6f1c2af6 1774 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042
RH
1775 return;
1776
1777 do_fpu_disabled:
1778 gen_save_cpu_state(ctx, true);
ab419fd8 1779 if (ctx->envflags & TB_FLAG_DELAY_SLOT_MASK) {
ad75a51e 1780 gen_helper_raise_slot_fpu_disable(tcg_env);
dec4f042 1781 } else {
ad75a51e 1782 gen_helper_raise_fpu_disable(tcg_env);
dec4f042 1783 }
6f1c2af6 1784 ctx->base.is_jmp = DISAS_NORETURN;
dec4f042 1785 return;
823029f9
TS
1786}
1787
b1d8e52e 1788static void decode_opc(DisasContext * ctx)
823029f9 1789{
a6215749 1790 uint32_t old_flags = ctx->envflags;
823029f9
TS
1791
1792 _decode_opc(ctx);
1793
ab419fd8 1794 if (old_flags & TB_FLAG_DELAY_SLOT_MASK) {
39682608 1795 /* go out of the delay slot */
ab419fd8 1796 ctx->envflags &= ~TB_FLAG_DELAY_SLOT_MASK;
4bfa602b
RH
1797
1798 /* When in an exclusive region, we must continue to the end
1799 for conditional branches. */
ab419fd8
RH
1800 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE
1801 && old_flags & TB_FLAG_DELAY_SLOT_COND) {
4bfa602b
RH
1802 gen_delayed_conditional_jump(ctx);
1803 return;
1804 }
1805 /* Otherwise this is probably an invalid gUSA region.
1806 Drop the GUSA bits so the next TB doesn't see them. */
ab419fd8 1807 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b 1808
ac9707ea 1809 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
ab419fd8 1810 if (old_flags & TB_FLAG_DELAY_SLOT_COND) {
823029f9 1811 gen_delayed_conditional_jump(ctx);
be53081a 1812 } else {
823029f9
TS
1813 gen_jump(ctx);
1814 }
4bfa602b
RH
1815 }
1816}
1817
1818#ifdef CONFIG_USER_ONLY
1819/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1820 Upon an interrupt, a real kernel would simply notice magic values in
1821 the registers and reset the PC to the start of the sequence.
1822
1823 For QEMU, we cannot do this in quite the same way. Instead, we notice
1824 the normal start of such a sequence (mov #-x,r15). While we can handle
1825 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1826 sequences and transform them into atomic operations as seen by the host.
1827*/
be0e3d7a 1828static void decode_gusa(DisasContext *ctx, CPUSH4State *env)
4bfa602b 1829{
d6a6cffd
RH
1830 uint16_t insns[5];
1831 int ld_adr, ld_dst, ld_mop;
1832 int op_dst, op_src, op_opc;
1833 int mv_src, mt_dst, st_src, st_mop;
1834 TCGv op_arg;
6f1c2af6
RH
1835 uint32_t pc = ctx->base.pc_next;
1836 uint32_t pc_end = ctx->base.tb->cs_base;
4bfa602b 1837 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1838 int i;
4bfa602b 1839
d6a6cffd
RH
1840 /* The state machine below will consume only a few insns.
1841 If there are more than that in a region, fail now. */
1842 if (max_insns > ARRAY_SIZE(insns)) {
1843 goto fail;
1844 }
1845
1846 /* Read all of the insns for the region. */
1847 for (i = 0; i < max_insns; ++i) {
4e116893 1848 insns[i] = translator_lduw(env, &ctx->base, pc + i * 2);
d6a6cffd
RH
1849 }
1850
1851 ld_adr = ld_dst = ld_mop = -1;
1852 mv_src = -1;
1853 op_dst = op_src = op_opc = -1;
1854 mt_dst = -1;
1855 st_src = st_mop = -1;
f764718d 1856 op_arg = NULL;
d6a6cffd
RH
1857 i = 0;
1858
1859#define NEXT_INSN \
1860 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1861
1862 /*
1863 * Expect a load to begin the region.
1864 */
1865 NEXT_INSN;
1866 switch (ctx->opcode & 0xf00f) {
1867 case 0x6000: /* mov.b @Rm,Rn */
1868 ld_mop = MO_SB;
1869 break;
1870 case 0x6001: /* mov.w @Rm,Rn */
1871 ld_mop = MO_TESW;
1872 break;
1873 case 0x6002: /* mov.l @Rm,Rn */
1874 ld_mop = MO_TESL;
1875 break;
1876 default:
1877 goto fail;
1878 }
1879 ld_adr = B7_4;
1880 ld_dst = B11_8;
1881 if (ld_adr == ld_dst) {
1882 goto fail;
1883 }
1884 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1885 op_dst = ld_dst;
1886
1887 /*
1888 * Expect an optional register move.
1889 */
1890 NEXT_INSN;
1891 switch (ctx->opcode & 0xf00f) {
1892 case 0x6003: /* mov Rm,Rn */
02b8e735 1893 /*
23b5d9fa 1894 * Here we want to recognize ld_dst being saved for later consumption,
02b8e735
PMD
1895 * or for another input register being copied so that ld_dst need not
1896 * be clobbered during the operation.
1897 */
d6a6cffd
RH
1898 op_dst = B11_8;
1899 mv_src = B7_4;
1900 if (op_dst == ld_dst) {
1901 /* Overwriting the load output. */
1902 goto fail;
1903 }
1904 if (mv_src != ld_dst) {
1905 /* Copying a new input; constrain op_src to match the load. */
1906 op_src = ld_dst;
1907 }
1908 break;
1909
1910 default:
1911 /* Put back and re-examine as operation. */
1912 --i;
1913 }
1914
1915 /*
1916 * Expect the operation.
1917 */
1918 NEXT_INSN;
1919 switch (ctx->opcode & 0xf00f) {
1920 case 0x300c: /* add Rm,Rn */
1921 op_opc = INDEX_op_add_i32;
1922 goto do_reg_op;
1923 case 0x2009: /* and Rm,Rn */
1924 op_opc = INDEX_op_and_i32;
1925 goto do_reg_op;
1926 case 0x200a: /* xor Rm,Rn */
1927 op_opc = INDEX_op_xor_i32;
1928 goto do_reg_op;
1929 case 0x200b: /* or Rm,Rn */
1930 op_opc = INDEX_op_or_i32;
1931 do_reg_op:
1932 /* The operation register should be as expected, and the
1933 other input cannot depend on the load. */
1934 if (op_dst != B11_8) {
1935 goto fail;
1936 }
1937 if (op_src < 0) {
1938 /* Unconstrainted input. */
1939 op_src = B7_4;
1940 } else if (op_src == B7_4) {
1941 /* Constrained input matched load. All operations are
1942 commutative; "swap" them by "moving" the load output
1943 to the (implicit) first argument and the move source
1944 to the (explicit) second argument. */
1945 op_src = mv_src;
1946 } else {
1947 goto fail;
1948 }
1949 op_arg = REG(op_src);
1950 break;
1951
1952 case 0x6007: /* not Rm,Rn */
1953 if (ld_dst != B7_4 || mv_src >= 0) {
1954 goto fail;
1955 }
1956 op_dst = B11_8;
1957 op_opc = INDEX_op_xor_i32;
950b91be 1958 op_arg = tcg_constant_i32(-1);
d6a6cffd
RH
1959 break;
1960
1961 case 0x7000 ... 0x700f: /* add #imm,Rn */
1962 if (op_dst != B11_8 || mv_src >= 0) {
1963 goto fail;
1964 }
1965 op_opc = INDEX_op_add_i32;
950b91be 1966 op_arg = tcg_constant_i32(B7_0s);
d6a6cffd
RH
1967 break;
1968
1969 case 0x3000: /* cmp/eq Rm,Rn */
1970 /* Looking for the middle of a compare-and-swap sequence,
1971 beginning with the compare. Operands can be either order,
1972 but with only one overlapping the load. */
1973 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
1974 goto fail;
1975 }
1976 op_opc = INDEX_op_setcond_i32; /* placeholder */
1977 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
1978 op_arg = REG(op_src);
1979
1980 NEXT_INSN;
1981 switch (ctx->opcode & 0xff00) {
1982 case 0x8b00: /* bf label */
1983 case 0x8f00: /* bf/s label */
1984 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
1985 goto fail;
1986 }
1987 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
1988 break;
1989 }
1990 /* We're looking to unconditionally modify Rn with the
1991 result of the comparison, within the delay slot of
1992 the branch. This is used by older gcc. */
1993 NEXT_INSN;
1994 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
1995 mt_dst = B11_8;
1996 } else {
1997 goto fail;
1998 }
1999 break;
2000
2001 default:
2002 goto fail;
2003 }
2004 break;
2005
2006 case 0x2008: /* tst Rm,Rn */
2007 /* Looking for a compare-and-swap against zero. */
2008 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2009 goto fail;
2010 }
2011 op_opc = INDEX_op_setcond_i32;
950b91be 2012 op_arg = tcg_constant_i32(0);
d6a6cffd
RH
2013
2014 NEXT_INSN;
2015 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2016 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2017 goto fail;
2018 }
2019 break;
2020
2021 default:
2022 /* Put back and re-examine as store. */
2023 --i;
2024 }
2025
2026 /*
2027 * Expect the store.
2028 */
2029 /* The store must be the last insn. */
2030 if (i != max_insns - 1) {
2031 goto fail;
2032 }
2033 NEXT_INSN;
2034 switch (ctx->opcode & 0xf00f) {
2035 case 0x2000: /* mov.b Rm,@Rn */
2036 st_mop = MO_UB;
2037 break;
2038 case 0x2001: /* mov.w Rm,@Rn */
2039 st_mop = MO_UW;
2040 break;
2041 case 0x2002: /* mov.l Rm,@Rn */
2042 st_mop = MO_UL;
2043 break;
2044 default:
2045 goto fail;
2046 }
2047 /* The store must match the load. */
2048 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2049 goto fail;
2050 }
2051 st_src = B7_4;
2052
2053#undef NEXT_INSN
2054
2055 /*
2056 * Emit the operation.
2057 */
d6a6cffd
RH
2058 switch (op_opc) {
2059 case -1:
2060 /* No operation found. Look for exchange pattern. */
2061 if (st_src == ld_dst || mv_src >= 0) {
2062 goto fail;
2063 }
2064 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2065 ctx->memidx, ld_mop);
2066 break;
2067
2068 case INDEX_op_add_i32:
2069 if (op_dst != st_src) {
2070 goto fail;
2071 }
2072 if (op_dst == ld_dst && st_mop == MO_UL) {
2073 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2074 op_arg, ctx->memidx, ld_mop);
2075 } else {
2076 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2077 op_arg, ctx->memidx, ld_mop);
2078 if (op_dst != ld_dst) {
2079 /* Note that mop sizes < 4 cannot use add_fetch
2080 because it won't carry into the higher bits. */
2081 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2082 }
2083 }
2084 break;
2085
2086 case INDEX_op_and_i32:
2087 if (op_dst != st_src) {
2088 goto fail;
2089 }
2090 if (op_dst == ld_dst) {
2091 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2092 op_arg, ctx->memidx, ld_mop);
2093 } else {
2094 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2095 op_arg, ctx->memidx, ld_mop);
2096 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2097 }
2098 break;
2099
2100 case INDEX_op_or_i32:
2101 if (op_dst != st_src) {
2102 goto fail;
2103 }
2104 if (op_dst == ld_dst) {
2105 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2106 op_arg, ctx->memidx, ld_mop);
2107 } else {
2108 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2109 op_arg, ctx->memidx, ld_mop);
2110 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2111 }
2112 break;
2113
2114 case INDEX_op_xor_i32:
2115 if (op_dst != st_src) {
2116 goto fail;
2117 }
2118 if (op_dst == ld_dst) {
2119 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2120 op_arg, ctx->memidx, ld_mop);
2121 } else {
2122 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2123 op_arg, ctx->memidx, ld_mop);
2124 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2125 }
2126 break;
2127
2128 case INDEX_op_setcond_i32:
2129 if (st_src == ld_dst) {
2130 goto fail;
2131 }
2132 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2133 REG(st_src), ctx->memidx, ld_mop);
2134 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2135 if (mt_dst >= 0) {
2136 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2137 }
2138 break;
2139
2140 default:
2141 g_assert_not_reached();
2142 }
2143
d6a6cffd 2144 /* The entire region has been translated. */
ab419fd8 2145 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
e03291cd 2146 goto done;
d6a6cffd
RH
2147
2148 fail:
4bfa602b
RH
2149 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2150 pc, pc_end);
2151
2152 /* Restart with the EXCLUSIVE bit set, within a TB run via
2153 cpu_exec_step_atomic holding the exclusive lock. */
ab419fd8 2154 ctx->envflags |= TB_FLAG_GUSA_EXCLUSIVE;
4bfa602b 2155 gen_save_cpu_state(ctx, false);
ad75a51e 2156 gen_helper_exclusive(tcg_env);
6f1c2af6 2157 ctx->base.is_jmp = DISAS_NORETURN;
4bfa602b
RH
2158
2159 /* We're not executing an instruction, but we must report one for the
2160 purposes of accounting within the TB. We might as well report the
6f1c2af6
RH
2161 entire region consumed via ctx->base.pc_next so that it's immediately
2162 available in the disassembly dump. */
e03291cd
RH
2163
2164 done:
6f1c2af6 2165 ctx->base.pc_next = pc_end;
be0e3d7a 2166 ctx->base.num_insns += max_insns - 1;
e03291cd
RH
2167
2168 /*
2169 * Emit insn_start to cover each of the insns in the region.
2170 * This matches an assert in tcg.c making sure that we have
2171 * tb->icount * insn_start.
2172 */
2173 for (i = 1; i < max_insns; ++i) {
2174 tcg_gen_insn_start(pc + i * 2, ctx->envflags);
2175 }
fdf9b3e8 2176}
4bfa602b 2177#endif
fdf9b3e8 2178
fd1b3d38 2179static void sh4_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
fdf9b3e8 2180{
fd1b3d38 2181 DisasContext *ctx = container_of(dcbase, DisasContext, base);
9c489ea6 2182 CPUSH4State *env = cs->env_ptr;
be0e3d7a 2183 uint32_t tbflags;
fd1b3d38
EC
2184 int bound;
2185
be0e3d7a
RH
2186 ctx->tbflags = tbflags = ctx->base.tb->flags;
2187 ctx->envflags = tbflags & TB_FLAG_ENVFLAGS_MASK;
2188 ctx->memidx = (tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2189 /* We don't know if the delayed pc came from a dynamic or static branch,
2190 so assume it is a dynamic branch. */
fd1b3d38
EC
2191 ctx->delayed_pc = -1; /* use delayed pc from env pointer */
2192 ctx->features = env->features;
be0e3d7a
RH
2193 ctx->has_movcal = (tbflags & TB_FLAG_PENDING_MOVCA);
2194 ctx->gbank = ((tbflags & (1 << SR_MD)) &&
2195 (tbflags & (1 << SR_RB))) * 0x10;
2196 ctx->fbank = tbflags & FPSCR_FR ? 0x10 : 0;
2197
ab419fd8
RH
2198#ifdef CONFIG_USER_ONLY
2199 if (tbflags & TB_FLAG_GUSA_MASK) {
2200 /* In gUSA exclusive region. */
be0e3d7a
RH
2201 uint32_t pc = ctx->base.pc_next;
2202 uint32_t pc_end = ctx->base.tb->cs_base;
ab419fd8 2203 int backup = sextract32(ctx->tbflags, TB_FLAG_GUSA_SHIFT, 8);
be0e3d7a
RH
2204 int max_insns = (pc_end - pc) / 2;
2205
2206 if (pc != pc_end + backup || max_insns < 2) {
2207 /* This is a malformed gUSA region. Don't do anything special,
2208 since the interpreter is likely to get confused. */
ab419fd8
RH
2209 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
2210 } else if (tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
be0e3d7a
RH
2211 /* Regardless of single-stepping or the end of the page,
2212 we must complete execution of the gUSA region while
2213 holding the exclusive lock. */
2214 ctx->base.max_insns = max_insns;
2215 return;
2216 }
2217 }
ab419fd8 2218#endif
4448a836
RH
2219
2220 /* Since the ISA is fixed-width, we can bound by the number
2221 of instructions remaining on the page. */
fd1b3d38
EC
2222 bound = -(ctx->base.pc_next | TARGET_PAGE_MASK) / 2;
2223 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
2224}
4448a836 2225
fd1b3d38
EC
2226static void sh4_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2227{
fd1b3d38 2228}
4bfa602b 2229
fd1b3d38
EC
2230static void sh4_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2231{
2232 DisasContext *ctx = container_of(dcbase, DisasContext, base);
667b8e29 2233
fd1b3d38
EC
2234 tcg_gen_insn_start(ctx->base.pc_next, ctx->envflags);
2235}
b933066a 2236
fd1b3d38
EC
2237static void sh4_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2238{
2239 CPUSH4State *env = cs->env_ptr;
2240 DisasContext *ctx = container_of(dcbase, DisasContext, base);
4bfa602b 2241
be0e3d7a 2242#ifdef CONFIG_USER_ONLY
ab419fd8
RH
2243 if (unlikely(ctx->envflags & TB_FLAG_GUSA_MASK)
2244 && !(ctx->envflags & TB_FLAG_GUSA_EXCLUSIVE)) {
be0e3d7a
RH
2245 /* We're in an gUSA region, and we have not already fallen
2246 back on using an exclusive region. Attempt to parse the
2247 region into a single supported atomic operation. Failure
2248 is handled within the parser by raising an exception to
2249 retry using an exclusive region. */
2250 decode_gusa(ctx, env);
2251 return;
2252 }
2253#endif
2254
4e116893 2255 ctx->opcode = translator_lduw(env, &ctx->base, ctx->base.pc_next);
fd1b3d38
EC
2256 decode_opc(ctx);
2257 ctx->base.pc_next += 2;
2258}
2259
2260static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2261{
2262 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2263
ab419fd8 2264 if (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) {
4bfa602b 2265 /* Ending the region of exclusivity. Clear the bits. */
ab419fd8 2266 ctx->envflags &= ~TB_FLAG_GUSA_MASK;
4bfa602b
RH
2267 }
2268
fd1b3d38 2269 switch (ctx->base.is_jmp) {
34cf5678 2270 case DISAS_STOP:
fd1b3d38 2271 gen_save_cpu_state(ctx, true);
52df5adc 2272 tcg_gen_exit_tb(NULL, 0);
34cf5678
RH
2273 break;
2274 case DISAS_NEXT:
fd1b3d38
EC
2275 case DISAS_TOO_MANY:
2276 gen_save_cpu_state(ctx, false);
2277 gen_goto_tb(ctx, 0, ctx->base.pc_next);
34cf5678
RH
2278 break;
2279 case DISAS_NORETURN:
2280 break;
2281 default:
2282 g_assert_not_reached();
fdf9b3e8 2283 }
fd1b3d38 2284}
823029f9 2285
8eb806a7
RH
2286static void sh4_tr_disas_log(const DisasContextBase *dcbase,
2287 CPUState *cs, FILE *logfile)
fd1b3d38 2288{
8eb806a7
RH
2289 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2290 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
fd1b3d38 2291}
0a7df5da 2292
fd1b3d38
EC
2293static const TranslatorOps sh4_tr_ops = {
2294 .init_disas_context = sh4_tr_init_disas_context,
2295 .tb_start = sh4_tr_tb_start,
2296 .insn_start = sh4_tr_insn_start,
fd1b3d38
EC
2297 .translate_insn = sh4_tr_translate_insn,
2298 .tb_stop = sh4_tr_tb_stop,
2299 .disas_log = sh4_tr_disas_log,
2300};
2301
597f9b2d 2302void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 2303 target_ulong pc, void *host_pc)
fd1b3d38
EC
2304{
2305 DisasContext ctx;
fdf9b3e8 2306
306c8721 2307 translator_loop(cs, tb, max_insns, pc, host_pc, &sh4_tr_ops, &ctx.base);
fdf9b3e8 2308}