]> git.proxmox.com Git - mirror_qemu.git/blame - target/sh4/translate.c
tcg/mips: constify tcg_target_callee_save_regs
[mirror_qemu.git] / target / sh4 / translate.c
CommitLineData
fdf9b3e8
FB
1/*
2 * SH4 translation
5fafdf24 3 *
fdf9b3e8
FB
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
fdf9b3e8 18 */
fdf9b3e8
FB
19
20#define DEBUG_DISAS
fdf9b3e8 21
9d4c9946 22#include "qemu/osdep.h"
fdf9b3e8 23#include "cpu.h"
76cad711 24#include "disas/disas.h"
63c91552 25#include "exec/exec-all.h"
57fec1fe 26#include "tcg-op.h"
f08b6170 27#include "exec/cpu_ldst.h"
fdf9b3e8 28
2ef6175a
RH
29#include "exec/helper-proto.h"
30#include "exec/helper-gen.h"
a7812ae4 31
a7e30d84 32#include "trace-tcg.h"
508127e2 33#include "exec/log.h"
a7e30d84
LV
34
35
fdf9b3e8
FB
36typedef struct DisasContext {
37 struct TranslationBlock *tb;
38 target_ulong pc;
fdf9b3e8 39 uint16_t opcode;
a6215749
AJ
40 uint32_t tbflags; /* should stay unmodified during the TB translation */
41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
823029f9 42 int bstate;
fdf9b3e8 43 int memidx;
3a3bb8d2 44 int gbank;
5c13bad9 45 int fbank;
fdf9b3e8
FB
46 uint32_t delayed_pc;
47 int singlestep_enabled;
71968fa6 48 uint32_t features;
852d481f 49 int has_movcal;
fdf9b3e8
FB
50} DisasContext;
51
fe25591e
AJ
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(ctx) 1
54#else
a6215749 55#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
fe25591e
AJ
56#endif
57
823029f9
TS
58enum {
59 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
60 * exception condition
61 */
62 BS_STOP = 1, /* We want to stop translation for any reason */
63 BS_BRANCH = 2, /* We reached a branch condition */
64 BS_EXCP = 3, /* We reached an exception condition */
65};
66
1e8864f7 67/* global register indexes */
1bcea73e 68static TCGv_env cpu_env;
3a3bb8d2 69static TCGv cpu_gregs[32];
1d565b21
AJ
70static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
71static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
3a8a44c4 72static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
66c7c806 73static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
66ba317c 74static TCGv cpu_fregs[32];
1000822b
AJ
75
76/* internal register indexes */
47b9f4d5 77static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
1e8864f7 78
022c62cb 79#include "exec/gen-icount.h"
2e70f6ef 80
aa7408ec 81void sh4_translate_init(void)
2e70f6ef 82{
1e8864f7 83 int i;
2e70f6ef 84 static int done_init = 0;
559dd74d 85 static const char * const gregnames[24] = {
1e8864f7
AJ
86 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
87 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
88 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
89 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
90 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
91 };
66ba317c
AJ
92 static const char * const fregnames[32] = {
93 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
94 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
95 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
96 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
97 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
98 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
99 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
100 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
101 };
1e8864f7 102
3a3bb8d2 103 if (done_init) {
2e70f6ef 104 return;
3a3bb8d2 105 }
1e8864f7 106
a7812ae4 107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7c255043 108 tcg_ctx.tcg_env = cpu_env;
1e8864f7 109
3a3bb8d2 110 for (i = 0; i < 24; i++) {
e1ccc054 111 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 112 offsetof(CPUSH4State, gregs[i]),
66ba317c 113 gregnames[i]);
3a3bb8d2
RH
114 }
115 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
988d7eaa 116
e1ccc054 117 cpu_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 118 offsetof(CPUSH4State, pc), "PC");
e1ccc054 119 cpu_sr = tcg_global_mem_new_i32(cpu_env,
73e5716c 120 offsetof(CPUSH4State, sr), "SR");
e1ccc054
RH
121 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
122 offsetof(CPUSH4State, sr_m), "SR_M");
123 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
124 offsetof(CPUSH4State, sr_q), "SR_Q");
125 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
126 offsetof(CPUSH4State, sr_t), "SR_T");
127 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
73e5716c 128 offsetof(CPUSH4State, ssr), "SSR");
e1ccc054 129 cpu_spc = tcg_global_mem_new_i32(cpu_env,
73e5716c 130 offsetof(CPUSH4State, spc), "SPC");
e1ccc054 131 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 132 offsetof(CPUSH4State, gbr), "GBR");
e1ccc054 133 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 134 offsetof(CPUSH4State, vbr), "VBR");
e1ccc054 135 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
73e5716c 136 offsetof(CPUSH4State, sgr), "SGR");
e1ccc054 137 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
73e5716c 138 offsetof(CPUSH4State, dbr), "DBR");
e1ccc054 139 cpu_mach = tcg_global_mem_new_i32(cpu_env,
73e5716c 140 offsetof(CPUSH4State, mach), "MACH");
e1ccc054 141 cpu_macl = tcg_global_mem_new_i32(cpu_env,
73e5716c 142 offsetof(CPUSH4State, macl), "MACL");
e1ccc054 143 cpu_pr = tcg_global_mem_new_i32(cpu_env,
73e5716c 144 offsetof(CPUSH4State, pr), "PR");
e1ccc054 145 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
73e5716c 146 offsetof(CPUSH4State, fpscr), "FPSCR");
e1ccc054 147 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
73e5716c 148 offsetof(CPUSH4State, fpul), "FPUL");
a7812ae4 149
e1ccc054 150 cpu_flags = tcg_global_mem_new_i32(cpu_env,
73e5716c 151 offsetof(CPUSH4State, flags), "_flags_");
e1ccc054 152 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
73e5716c 153 offsetof(CPUSH4State, delayed_pc),
a7812ae4 154 "_delayed_pc_");
47b9f4d5
AJ
155 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
156 offsetof(CPUSH4State,
157 delayed_cond),
158 "_delayed_cond_");
e1ccc054 159 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
73e5716c 160 offsetof(CPUSH4State, ldst), "_ldst_");
1000822b 161
66ba317c 162 for (i = 0; i < 32; i++)
e1ccc054 163 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
73e5716c 164 offsetof(CPUSH4State, fregs[i]),
66ba317c
AJ
165 fregnames[i]);
166
2e70f6ef
PB
167 done_init = 1;
168}
169
878096ee
AF
170void superh_cpu_dump_state(CPUState *cs, FILE *f,
171 fprintf_function cpu_fprintf, int flags)
fdf9b3e8 172{
878096ee
AF
173 SuperHCPU *cpu = SUPERH_CPU(cs);
174 CPUSH4State *env = &cpu->env;
fdf9b3e8 175 int i;
eda9b09b 176 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
34086945 177 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
274a9e70
AJ
178 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
179 env->spc, env->ssr, env->gbr, env->vbr);
180 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
181 env->sgr, env->dbr, env->delayed_pc, env->fpul);
fdf9b3e8
FB
182 for (i = 0; i < 24; i += 4) {
183 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
184 i, env->gregs[i], i + 1, env->gregs[i + 1],
185 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
186 }
187 if (env->flags & DELAY_SLOT) {
188 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
189 env->delayed_pc);
190 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
191 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
192 env->delayed_pc);
be53081a
AJ
193 } else if (env->flags & DELAY_SLOT_RTE) {
194 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
195 env->delayed_pc);
fdf9b3e8
FB
196 }
197}
198
34086945
AJ
199static void gen_read_sr(TCGv dst)
200{
1d565b21
AJ
201 TCGv t0 = tcg_temp_new();
202 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
203 tcg_gen_or_i32(dst, dst, t0);
204 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
205 tcg_gen_or_i32(dst, dst, t0);
206 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
207 tcg_gen_or_i32(dst, cpu_sr, t0);
208 tcg_temp_free_i32(t0);
34086945
AJ
209}
210
211static void gen_write_sr(TCGv src)
212{
1d565b21
AJ
213 tcg_gen_andi_i32(cpu_sr, src,
214 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
a380f9db
AJ
215 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
216 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
217 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
34086945
AJ
218}
219
ac9707ea
AJ
220static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
221{
222 if (save_pc) {
223 tcg_gen_movi_i32(cpu_pc, ctx->pc);
224 }
225 if (ctx->delayed_pc != (uint32_t) -1) {
226 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
227 }
e1933d14 228 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
ac9707ea
AJ
229 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
230 }
231}
232
ec2eb22e
RH
233static inline bool use_exit_tb(DisasContext *ctx)
234{
235 return (ctx->tbflags & GUSA_EXCLUSIVE) != 0;
236}
237
90aa39a1 238static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
fdf9b3e8 239{
ec2eb22e
RH
240 /* Use a direct jump if in same page and singlestep not enabled */
241 if (unlikely(ctx->singlestep_enabled || use_exit_tb(ctx))) {
4bfa602b
RH
242 return false;
243 }
90aa39a1
SF
244#ifndef CONFIG_USER_ONLY
245 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
246#else
247 return true;
248#endif
249}
fdf9b3e8 250
90aa39a1
SF
251static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
252{
253 if (use_goto_tb(ctx, dest)) {
57fec1fe 254 tcg_gen_goto_tb(n);
3a8a44c4 255 tcg_gen_movi_i32(cpu_pc, dest);
90aa39a1 256 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
fdf9b3e8 257 } else {
3a8a44c4 258 tcg_gen_movi_i32(cpu_pc, dest);
ec2eb22e 259 if (ctx->singlestep_enabled) {
485d0035 260 gen_helper_debug(cpu_env);
ec2eb22e
RH
261 } else if (use_exit_tb(ctx)) {
262 tcg_gen_exit_tb(0);
263 } else {
264 tcg_gen_lookup_and_goto_ptr(cpu_pc);
265 }
fdf9b3e8 266 }
fdf9b3e8
FB
267}
268
fdf9b3e8
FB
269static void gen_jump(DisasContext * ctx)
270{
ec2eb22e 271 if (ctx->delayed_pc == -1) {
fdf9b3e8
FB
272 /* Target is not statically known, it comes necessarily from a
273 delayed jump as immediate jump are conditinal jumps */
1000822b 274 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
ac9707ea 275 tcg_gen_discard_i32(cpu_delayed_pc);
ec2eb22e 276 if (ctx->singlestep_enabled) {
485d0035 277 gen_helper_debug(cpu_env);
ec2eb22e
RH
278 } else if (use_exit_tb(ctx)) {
279 tcg_gen_exit_tb(0);
280 } else {
281 tcg_gen_lookup_and_goto_ptr(cpu_pc);
282 }
fdf9b3e8
FB
283 } else {
284 gen_goto_tb(ctx, 0, ctx->delayed_pc);
285 }
286}
287
288/* Immediate conditional jump (bt or bf) */
4bfa602b
RH
289static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
290 bool jump_if_true)
fdf9b3e8 291{
34086945 292 TCGLabel *l1 = gen_new_label();
4bfa602b
RH
293 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
294
295 if (ctx->tbflags & GUSA_EXCLUSIVE) {
296 /* When in an exclusive region, we must continue to the end.
297 Therefore, exit the region on a taken branch, but otherwise
298 fall through to the next instruction. */
299 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
300 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
301 /* Note that this won't actually use a goto_tb opcode because we
302 disallow it in use_goto_tb, but it handles exit + singlestep. */
303 gen_goto_tb(ctx, 0, dest);
304 gen_set_label(l1);
305 return;
306 }
307
ac9707ea 308 gen_save_cpu_state(ctx, false);
4bfa602b
RH
309 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
310 gen_goto_tb(ctx, 0, dest);
fdf9b3e8 311 gen_set_label(l1);
4bfa602b 312 gen_goto_tb(ctx, 1, ctx->pc + 2);
b3995c23 313 ctx->bstate = BS_BRANCH;
fdf9b3e8
FB
314}
315
316/* Delayed conditional jump (bt or bf) */
317static void gen_delayed_conditional_jump(DisasContext * ctx)
318{
4bfa602b
RH
319 TCGLabel *l1 = gen_new_label();
320 TCGv ds = tcg_temp_new();
fdf9b3e8 321
47b9f4d5
AJ
322 tcg_gen_mov_i32(ds, cpu_delayed_cond);
323 tcg_gen_discard_i32(cpu_delayed_cond);
4bfa602b
RH
324
325 if (ctx->tbflags & GUSA_EXCLUSIVE) {
326 /* When in an exclusive region, we must continue to the end.
327 Therefore, exit the region on a taken branch, but otherwise
328 fall through to the next instruction. */
329 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
330
331 /* Leave the gUSA region. */
332 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
333 gen_jump(ctx);
334
335 gen_set_label(l1);
336 return;
337 }
338
6f396c8f 339 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
823029f9 340 gen_goto_tb(ctx, 1, ctx->pc + 2);
fdf9b3e8 341 gen_set_label(l1);
9c2a9ea1 342 gen_jump(ctx);
fdf9b3e8
FB
343}
344
e5d8053e 345static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 346{
1e0b21d8
RH
347 /* We have already signaled illegal instruction for odd Dr. */
348 tcg_debug_assert((reg & 1) == 0);
349 reg ^= ctx->fbank;
66ba317c 350 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
cc4ba6a9
AJ
351}
352
e5d8053e 353static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
cc4ba6a9 354{
1e0b21d8
RH
355 /* We have already signaled illegal instruction for odd Dr. */
356 tcg_debug_assert((reg & 1) == 0);
357 reg ^= ctx->fbank;
58d2a9ae 358 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
cc4ba6a9
AJ
359}
360
fdf9b3e8
FB
361#define B3_0 (ctx->opcode & 0xf)
362#define B6_4 ((ctx->opcode >> 4) & 0x7)
363#define B7_4 ((ctx->opcode >> 4) & 0xf)
364#define B7_0 (ctx->opcode & 0xff)
365#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
366#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
367 (ctx->opcode & 0xfff))
368#define B11_8 ((ctx->opcode >> 8) & 0xf)
369#define B15_12 ((ctx->opcode >> 12) & 0xf)
370
3a3bb8d2
RH
371#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
372#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
5c13bad9 373#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
fdf9b3e8 374
f09111e0 375#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
eda9b09b 376
fdf9b3e8 377#define CHECK_NOT_DELAY_SLOT \
dec16c6e
RH
378 if (ctx->envflags & DELAY_SLOT_MASK) { \
379 goto do_illegal_slot; \
a6215749
AJ
380 }
381
6b98213d
RH
382#define CHECK_PRIVILEGED \
383 if (IS_USER(ctx)) { \
384 goto do_illegal; \
a6215749
AJ
385 }
386
dec4f042
RH
387#define CHECK_FPU_ENABLED \
388 if (ctx->tbflags & (1u << SR_FD)) { \
389 goto do_fpu_disabled; \
a6215749 390 }
d8299bcc 391
7e9f7ca8
RH
392#define CHECK_FPSCR_PR_0 \
393 if (ctx->tbflags & FPSCR_PR) { \
394 goto do_illegal; \
395 }
396
397#define CHECK_FPSCR_PR_1 \
398 if (!(ctx->tbflags & FPSCR_PR)) { \
399 goto do_illegal; \
400 }
401
ccae24d4
RH
402#define CHECK_SH4A \
403 if (!(ctx->features & SH_FEATURE_SH4A)) { \
404 goto do_illegal; \
405 }
406
b1d8e52e 407static void _decode_opc(DisasContext * ctx)
fdf9b3e8 408{
852d481f
EI
409 /* This code tries to make movcal emulation sufficiently
410 accurate for Linux purposes. This instruction writes
411 memory, and prior to that, always allocates a cache line.
412 It is used in two contexts:
413 - in memcpy, where data is copied in blocks, the first write
414 of to a block uses movca.l for performance.
415 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
416 to flush the cache. Here, the data written by movcal.l is never
417 written to memory, and the data written is just bogus.
418
419 To simulate this, we simulate movcal.l, we store the value to memory,
420 but we also remember the previous content. If we see ocbi, we check
421 if movcal.l for that address was done previously. If so, the write should
422 not have hit the memory, so we restore the previous content.
423 When we see an instruction that is neither movca.l
424 nor ocbi, the previous content is discarded.
425
426 To optimize, we only try to flush stores when we're at the start of
427 TB, or if we already saw movca.l in this TB and did not flush stores
428 yet. */
429 if (ctx->has_movcal)
430 {
431 int opcode = ctx->opcode & 0xf0ff;
432 if (opcode != 0x0093 /* ocbi */
433 && opcode != 0x00c3 /* movca.l */)
434 {
485d0035 435 gen_helper_discard_movcal_backup(cpu_env);
852d481f
EI
436 ctx->has_movcal = 0;
437 }
438 }
439
fdf9b3e8
FB
440#if 0
441 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
442#endif
f6198371 443
fdf9b3e8
FB
444 switch (ctx->opcode) {
445 case 0x0019: /* div0u */
1d565b21
AJ
446 tcg_gen_movi_i32(cpu_sr_m, 0);
447 tcg_gen_movi_i32(cpu_sr_q, 0);
34086945 448 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
449 return;
450 case 0x000b: /* rts */
1000822b
AJ
451 CHECK_NOT_DELAY_SLOT
452 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
a6215749 453 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
454 ctx->delayed_pc = (uint32_t) - 1;
455 return;
456 case 0x0028: /* clrmac */
3a8a44c4
AJ
457 tcg_gen_movi_i32(cpu_mach, 0);
458 tcg_gen_movi_i32(cpu_macl, 0);
fdf9b3e8
FB
459 return;
460 case 0x0048: /* clrs */
5ed9a259 461 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
fdf9b3e8
FB
462 return;
463 case 0x0008: /* clrt */
34086945 464 tcg_gen_movi_i32(cpu_sr_t, 0);
fdf9b3e8
FB
465 return;
466 case 0x0038: /* ldtlb */
fe25591e 467 CHECK_PRIVILEGED
485d0035 468 gen_helper_ldtlb(cpu_env);
fdf9b3e8 469 return;
c5e814b2 470 case 0x002b: /* rte */
fe25591e 471 CHECK_PRIVILEGED
1000822b 472 CHECK_NOT_DELAY_SLOT
34086945 473 gen_write_sr(cpu_ssr);
1000822b 474 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
be53081a 475 ctx->envflags |= DELAY_SLOT_RTE;
fdf9b3e8 476 ctx->delayed_pc = (uint32_t) - 1;
be53081a 477 ctx->bstate = BS_STOP;
fdf9b3e8
FB
478 return;
479 case 0x0058: /* sets */
5ed9a259 480 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
fdf9b3e8
FB
481 return;
482 case 0x0018: /* sett */
34086945 483 tcg_gen_movi_i32(cpu_sr_t, 1);
fdf9b3e8 484 return;
24988dc2 485 case 0xfbfd: /* frchg */
61dedf2a 486 CHECK_FPSCR_PR_0
6f06939b 487 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
823029f9 488 ctx->bstate = BS_STOP;
fdf9b3e8 489 return;
24988dc2 490 case 0xf3fd: /* fschg */
61dedf2a 491 CHECK_FPSCR_PR_0
7a64244f 492 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
823029f9 493 ctx->bstate = BS_STOP;
fdf9b3e8 494 return;
907759f9
RH
495 case 0xf7fd: /* fpchg */
496 CHECK_SH4A
497 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
498 ctx->bstate = BS_STOP;
499 return;
fdf9b3e8
FB
500 case 0x0009: /* nop */
501 return;
502 case 0x001b: /* sleep */
fe25591e 503 CHECK_PRIVILEGED
10127400
AJ
504 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
505 gen_helper_sleep(cpu_env);
fdf9b3e8
FB
506 return;
507 }
508
509 switch (ctx->opcode & 0xf000) {
510 case 0x1000: /* mov.l Rm,@(disp,Rn) */
c55497ec 511 {
a7812ae4 512 TCGv addr = tcg_temp_new();
c55497ec 513 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
3376f415 514 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
515 tcg_temp_free(addr);
516 }
fdf9b3e8
FB
517 return;
518 case 0x5000: /* mov.l @(disp,Rm),Rn */
c55497ec 519 {
a7812ae4 520 TCGv addr = tcg_temp_new();
c55497ec 521 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
3376f415 522 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
523 tcg_temp_free(addr);
524 }
fdf9b3e8 525 return;
24988dc2 526 case 0xe000: /* mov #imm,Rn */
4bfa602b
RH
527#ifdef CONFIG_USER_ONLY
528 /* Detect the start of a gUSA region. If so, update envflags
529 and end the TB. This will allow us to see the end of the
530 region (stored in R0) in the next TB. */
531 if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) {
532 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
533 ctx->bstate = BS_STOP;
534 }
535#endif
7efbe241 536 tcg_gen_movi_i32(REG(B11_8), B7_0s);
fdf9b3e8
FB
537 return;
538 case 0x9000: /* mov.w @(disp,PC),Rn */
c55497ec
AJ
539 {
540 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
3376f415 541 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
542 tcg_temp_free(addr);
543 }
fdf9b3e8
FB
544 return;
545 case 0xd000: /* mov.l @(disp,PC),Rn */
c55497ec
AJ
546 {
547 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
3376f415 548 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
549 tcg_temp_free(addr);
550 }
fdf9b3e8 551 return;
24988dc2 552 case 0x7000: /* add #imm,Rn */
7efbe241 553 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
fdf9b3e8
FB
554 return;
555 case 0xa000: /* bra disp */
556 CHECK_NOT_DELAY_SLOT
1000822b 557 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
a6215749 558 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
559 return;
560 case 0xb000: /* bsr disp */
561 CHECK_NOT_DELAY_SLOT
1000822b
AJ
562 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
563 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
a6215749 564 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
565 return;
566 }
567
568 switch (ctx->opcode & 0xf00f) {
569 case 0x6003: /* mov Rm,Rn */
7efbe241 570 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
571 return;
572 case 0x2000: /* mov.b Rm,@Rn */
3376f415 573 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
fdf9b3e8
FB
574 return;
575 case 0x2001: /* mov.w Rm,@Rn */
3376f415 576 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
fdf9b3e8
FB
577 return;
578 case 0x2002: /* mov.l Rm,@Rn */
3376f415 579 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
fdf9b3e8
FB
580 return;
581 case 0x6000: /* mov.b @Rm,Rn */
3376f415 582 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
fdf9b3e8
FB
583 return;
584 case 0x6001: /* mov.w @Rm,Rn */
3376f415 585 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
fdf9b3e8
FB
586 return;
587 case 0x6002: /* mov.l @Rm,Rn */
3376f415 588 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
fdf9b3e8
FB
589 return;
590 case 0x2004: /* mov.b Rm,@-Rn */
c55497ec 591 {
a7812ae4 592 TCGv addr = tcg_temp_new();
c55497ec 593 tcg_gen_subi_i32(addr, REG(B11_8), 1);
3376f415
AJ
594 /* might cause re-execution */
595 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
3101e99c 596 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
c55497ec
AJ
597 tcg_temp_free(addr);
598 }
fdf9b3e8
FB
599 return;
600 case 0x2005: /* mov.w Rm,@-Rn */
c55497ec 601 {
a7812ae4 602 TCGv addr = tcg_temp_new();
c55497ec 603 tcg_gen_subi_i32(addr, REG(B11_8), 2);
3376f415 604 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
3101e99c 605 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
606 tcg_temp_free(addr);
607 }
fdf9b3e8
FB
608 return;
609 case 0x2006: /* mov.l Rm,@-Rn */
c55497ec 610 {
a7812ae4 611 TCGv addr = tcg_temp_new();
c55497ec 612 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 613 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
3101e99c 614 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 615 }
fdf9b3e8 616 return;
eda9b09b 617 case 0x6004: /* mov.b @Rm+,Rn */
3376f415 618 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
24988dc2 619 if ( B11_8 != B7_4 )
7efbe241 620 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
fdf9b3e8
FB
621 return;
622 case 0x6005: /* mov.w @Rm+,Rn */
3376f415 623 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
24988dc2 624 if ( B11_8 != B7_4 )
7efbe241 625 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
fdf9b3e8
FB
626 return;
627 case 0x6006: /* mov.l @Rm+,Rn */
3376f415 628 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
24988dc2 629 if ( B11_8 != B7_4 )
7efbe241 630 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
fdf9b3e8
FB
631 return;
632 case 0x0004: /* mov.b Rm,@(R0,Rn) */
c55497ec 633 {
a7812ae4 634 TCGv addr = tcg_temp_new();
c55497ec 635 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 636 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
c55497ec
AJ
637 tcg_temp_free(addr);
638 }
fdf9b3e8
FB
639 return;
640 case 0x0005: /* mov.w Rm,@(R0,Rn) */
c55497ec 641 {
a7812ae4 642 TCGv addr = tcg_temp_new();
c55497ec 643 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 644 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
645 tcg_temp_free(addr);
646 }
fdf9b3e8
FB
647 return;
648 case 0x0006: /* mov.l Rm,@(R0,Rn) */
c55497ec 649 {
a7812ae4 650 TCGv addr = tcg_temp_new();
c55497ec 651 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
3376f415 652 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
653 tcg_temp_free(addr);
654 }
fdf9b3e8
FB
655 return;
656 case 0x000c: /* mov.b @(R0,Rm),Rn */
c55497ec 657 {
a7812ae4 658 TCGv addr = tcg_temp_new();
c55497ec 659 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 660 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
c55497ec
AJ
661 tcg_temp_free(addr);
662 }
fdf9b3e8
FB
663 return;
664 case 0x000d: /* mov.w @(R0,Rm),Rn */
c55497ec 665 {
a7812ae4 666 TCGv addr = tcg_temp_new();
c55497ec 667 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 668 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
669 tcg_temp_free(addr);
670 }
fdf9b3e8
FB
671 return;
672 case 0x000e: /* mov.l @(R0,Rm),Rn */
c55497ec 673 {
a7812ae4 674 TCGv addr = tcg_temp_new();
c55497ec 675 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
3376f415 676 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
677 tcg_temp_free(addr);
678 }
fdf9b3e8
FB
679 return;
680 case 0x6008: /* swap.b Rm,Rn */
c55497ec 681 {
218fd730 682 TCGv low = tcg_temp_new();;
3101e99c
AJ
683 tcg_gen_ext16u_i32(low, REG(B7_4));
684 tcg_gen_bswap16_i32(low, low);
218fd730 685 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
c55497ec 686 tcg_temp_free(low);
c55497ec 687 }
fdf9b3e8
FB
688 return;
689 case 0x6009: /* swap.w Rm,Rn */
c53b36d2 690 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
fdf9b3e8
FB
691 return;
692 case 0x200d: /* xtrct Rm,Rn */
c55497ec
AJ
693 {
694 TCGv high, low;
a7812ae4 695 high = tcg_temp_new();
3101e99c 696 tcg_gen_shli_i32(high, REG(B7_4), 16);
a7812ae4 697 low = tcg_temp_new();
c55497ec 698 tcg_gen_shri_i32(low, REG(B11_8), 16);
c55497ec
AJ
699 tcg_gen_or_i32(REG(B11_8), high, low);
700 tcg_temp_free(low);
701 tcg_temp_free(high);
702 }
fdf9b3e8
FB
703 return;
704 case 0x300c: /* add Rm,Rn */
7efbe241 705 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
706 return;
707 case 0x300e: /* addc Rm,Rn */
22b88fd7 708 {
34086945 709 TCGv t0, t1;
a2368e01 710 t0 = tcg_const_tl(0);
22b88fd7 711 t1 = tcg_temp_new();
a2368e01
AJ
712 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
713 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
714 REG(B11_8), t0, t1, cpu_sr_t);
22b88fd7 715 tcg_temp_free(t0);
34086945 716 tcg_temp_free(t1);
22b88fd7 717 }
fdf9b3e8
FB
718 return;
719 case 0x300f: /* addv Rm,Rn */
ad8d25a1
AJ
720 {
721 TCGv t0, t1, t2;
722 t0 = tcg_temp_new();
723 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
724 t1 = tcg_temp_new();
725 tcg_gen_xor_i32(t1, t0, REG(B11_8));
726 t2 = tcg_temp_new();
727 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
34086945 728 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
ad8d25a1 729 tcg_temp_free(t2);
34086945 730 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
ad8d25a1
AJ
731 tcg_temp_free(t1);
732 tcg_gen_mov_i32(REG(B7_4), t0);
733 tcg_temp_free(t0);
734 }
fdf9b3e8
FB
735 return;
736 case 0x2009: /* and Rm,Rn */
7efbe241 737 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
738 return;
739 case 0x3000: /* cmp/eq Rm,Rn */
34086945 740 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
741 return;
742 case 0x3003: /* cmp/ge Rm,Rn */
34086945 743 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
744 return;
745 case 0x3007: /* cmp/gt Rm,Rn */
34086945 746 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
747 return;
748 case 0x3006: /* cmp/hi Rm,Rn */
34086945 749 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
750 return;
751 case 0x3002: /* cmp/hs Rm,Rn */
34086945 752 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
fdf9b3e8
FB
753 return;
754 case 0x200c: /* cmp/str Rm,Rn */
69d6275b 755 {
c5c19137
AJ
756 TCGv cmp1 = tcg_temp_new();
757 TCGv cmp2 = tcg_temp_new();
eb6ca2b4
AJ
758 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
759 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
760 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
761 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
762 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
c55497ec
AJ
763 tcg_temp_free(cmp2);
764 tcg_temp_free(cmp1);
69d6275b 765 }
fdf9b3e8
FB
766 return;
767 case 0x2007: /* div0s Rm,Rn */
1d565b21
AJ
768 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
769 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
770 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
fdf9b3e8
FB
771 return;
772 case 0x3004: /* div1 Rm,Rn */
1d565b21
AJ
773 {
774 TCGv t0 = tcg_temp_new();
775 TCGv t1 = tcg_temp_new();
776 TCGv t2 = tcg_temp_new();
777 TCGv zero = tcg_const_i32(0);
778
779 /* shift left arg1, saving the bit being pushed out and inserting
780 T on the right */
781 tcg_gen_shri_i32(t0, REG(B11_8), 31);
782 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
783 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
784
785 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
786 using 64-bit temps, we compute arg0's high part from q ^ m, so
787 that it is 0x00000000 when adding the value or 0xffffffff when
788 subtracting it. */
789 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
790 tcg_gen_subi_i32(t1, t1, 1);
791 tcg_gen_neg_i32(t2, REG(B7_4));
792 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
793 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
794
795 /* compute T and Q depending on carry */
796 tcg_gen_andi_i32(t1, t1, 1);
797 tcg_gen_xor_i32(t1, t1, t0);
798 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
799 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
800
801 tcg_temp_free(zero);
802 tcg_temp_free(t2);
803 tcg_temp_free(t1);
804 tcg_temp_free(t0);
805 }
fdf9b3e8
FB
806 return;
807 case 0x300d: /* dmuls.l Rm,Rn */
1d3b7084 808 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
809 return;
810 case 0x3005: /* dmulu.l Rm,Rn */
1d3b7084 811 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
812 return;
813 case 0x600e: /* exts.b Rm,Rn */
7efbe241 814 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
815 return;
816 case 0x600f: /* exts.w Rm,Rn */
7efbe241 817 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
818 return;
819 case 0x600c: /* extu.b Rm,Rn */
7efbe241 820 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
821 return;
822 case 0x600d: /* extu.w Rm,Rn */
7efbe241 823 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
fdf9b3e8 824 return;
24988dc2 825 case 0x000f: /* mac.l @Rm+,@Rn+ */
c55497ec
AJ
826 {
827 TCGv arg0, arg1;
a7812ae4 828 arg0 = tcg_temp_new();
3376f415 829 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 830 arg1 = tcg_temp_new();
3376f415 831 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 832 gen_helper_macl(cpu_env, arg0, arg1);
c55497ec
AJ
833 tcg_temp_free(arg1);
834 tcg_temp_free(arg0);
835 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
836 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
837 }
fdf9b3e8
FB
838 return;
839 case 0x400f: /* mac.w @Rm+,@Rn+ */
c55497ec
AJ
840 {
841 TCGv arg0, arg1;
a7812ae4 842 arg0 = tcg_temp_new();
3376f415 843 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
a7812ae4 844 arg1 = tcg_temp_new();
3376f415 845 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
485d0035 846 gen_helper_macw(cpu_env, arg0, arg1);
c55497ec
AJ
847 tcg_temp_free(arg1);
848 tcg_temp_free(arg0);
849 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
850 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
851 }
fdf9b3e8
FB
852 return;
853 case 0x0007: /* mul.l Rm,Rn */
7efbe241 854 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
fdf9b3e8
FB
855 return;
856 case 0x200f: /* muls.w Rm,Rn */
c55497ec
AJ
857 {
858 TCGv arg0, arg1;
a7812ae4 859 arg0 = tcg_temp_new();
c55497ec 860 tcg_gen_ext16s_i32(arg0, REG(B7_4));
a7812ae4 861 arg1 = tcg_temp_new();
c55497ec
AJ
862 tcg_gen_ext16s_i32(arg1, REG(B11_8));
863 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
864 tcg_temp_free(arg1);
865 tcg_temp_free(arg0);
866 }
fdf9b3e8
FB
867 return;
868 case 0x200e: /* mulu.w Rm,Rn */
c55497ec
AJ
869 {
870 TCGv arg0, arg1;
a7812ae4 871 arg0 = tcg_temp_new();
c55497ec 872 tcg_gen_ext16u_i32(arg0, REG(B7_4));
a7812ae4 873 arg1 = tcg_temp_new();
c55497ec
AJ
874 tcg_gen_ext16u_i32(arg1, REG(B11_8));
875 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
876 tcg_temp_free(arg1);
877 tcg_temp_free(arg0);
878 }
fdf9b3e8
FB
879 return;
880 case 0x600b: /* neg Rm,Rn */
7efbe241 881 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
882 return;
883 case 0x600a: /* negc Rm,Rn */
b2d9eda5 884 {
60eb27fe
AJ
885 TCGv t0 = tcg_const_i32(0);
886 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
887 REG(B7_4), t0, cpu_sr_t, t0);
888 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
889 t0, t0, REG(B11_8), cpu_sr_t);
890 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
b2d9eda5 891 tcg_temp_free(t0);
b2d9eda5 892 }
fdf9b3e8
FB
893 return;
894 case 0x6007: /* not Rm,Rn */
7efbe241 895 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
fdf9b3e8
FB
896 return;
897 case 0x200b: /* or Rm,Rn */
7efbe241 898 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
899 return;
900 case 0x400c: /* shad Rm,Rn */
69d6275b 901 {
be654c83
AJ
902 TCGv t0 = tcg_temp_new();
903 TCGv t1 = tcg_temp_new();
904 TCGv t2 = tcg_temp_new();
905
906 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
907
908 /* positive case: shift to the left */
909 tcg_gen_shl_i32(t1, REG(B11_8), t0);
910
911 /* negative case: shift to the right in two steps to
912 correctly handle the -32 case */
913 tcg_gen_xori_i32(t0, t0, 0x1f);
914 tcg_gen_sar_i32(t2, REG(B11_8), t0);
915 tcg_gen_sari_i32(t2, t2, 1);
916
917 /* select between the two cases */
918 tcg_gen_movi_i32(t0, 0);
919 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
920
921 tcg_temp_free(t0);
922 tcg_temp_free(t1);
923 tcg_temp_free(t2);
69d6275b 924 }
fdf9b3e8
FB
925 return;
926 case 0x400d: /* shld Rm,Rn */
69d6275b 927 {
57760161
AJ
928 TCGv t0 = tcg_temp_new();
929 TCGv t1 = tcg_temp_new();
930 TCGv t2 = tcg_temp_new();
931
932 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
933
934 /* positive case: shift to the left */
935 tcg_gen_shl_i32(t1, REG(B11_8), t0);
936
937 /* negative case: shift to the right in two steps to
938 correctly handle the -32 case */
939 tcg_gen_xori_i32(t0, t0, 0x1f);
940 tcg_gen_shr_i32(t2, REG(B11_8), t0);
941 tcg_gen_shri_i32(t2, t2, 1);
942
943 /* select between the two cases */
944 tcg_gen_movi_i32(t0, 0);
945 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
946
947 tcg_temp_free(t0);
948 tcg_temp_free(t1);
949 tcg_temp_free(t2);
69d6275b 950 }
fdf9b3e8
FB
951 return;
952 case 0x3008: /* sub Rm,Rn */
7efbe241 953 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8
FB
954 return;
955 case 0x300a: /* subc Rm,Rn */
22b88fd7 956 {
d0f44a55
AJ
957 TCGv t0, t1;
958 t0 = tcg_const_tl(0);
22b88fd7 959 t1 = tcg_temp_new();
d0f44a55
AJ
960 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
961 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
962 REG(B11_8), t0, t1, cpu_sr_t);
963 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
22b88fd7 964 tcg_temp_free(t0);
d0f44a55 965 tcg_temp_free(t1);
22b88fd7 966 }
fdf9b3e8
FB
967 return;
968 case 0x300b: /* subv Rm,Rn */
ad8d25a1
AJ
969 {
970 TCGv t0, t1, t2;
971 t0 = tcg_temp_new();
972 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
973 t1 = tcg_temp_new();
974 tcg_gen_xor_i32(t1, t0, REG(B7_4));
975 t2 = tcg_temp_new();
976 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
977 tcg_gen_and_i32(t1, t1, t2);
978 tcg_temp_free(t2);
34086945 979 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
ad8d25a1
AJ
980 tcg_temp_free(t1);
981 tcg_gen_mov_i32(REG(B11_8), t0);
982 tcg_temp_free(t0);
983 }
fdf9b3e8
FB
984 return;
985 case 0x2008: /* tst Rm,Rn */
c55497ec 986 {
a7812ae4 987 TCGv val = tcg_temp_new();
c55497ec 988 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
34086945 989 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
990 tcg_temp_free(val);
991 }
fdf9b3e8
FB
992 return;
993 case 0x200a: /* xor Rm,Rn */
7efbe241 994 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
fdf9b3e8 995 return;
e67888a7 996 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 997 CHECK_FPU_ENABLED
a6215749 998 if (ctx->tbflags & FPSCR_SZ) {
bdcb3739
RH
999 int xsrc = XHACK(B7_4);
1000 int xdst = XHACK(B11_8);
1001 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
1002 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
eda9b09b 1003 } else {
7c9f7038 1004 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
eda9b09b
FB
1005 }
1006 return;
e67888a7 1007 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
f6198371 1008 CHECK_FPU_ENABLED
a6215749 1009 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1010 TCGv_i64 fp = tcg_temp_new_i64();
1011 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1012 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1013 tcg_temp_free_i64(fp);
eda9b09b 1014 } else {
7c9f7038 1015 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
eda9b09b
FB
1016 }
1017 return;
e67888a7 1018 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1019 CHECK_FPU_ENABLED
a6215749 1020 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1021 TCGv_i64 fp = tcg_temp_new_i64();
1022 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1023 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1024 tcg_temp_free_i64(fp);
eda9b09b 1025 } else {
7c9f7038 1026 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
eda9b09b
FB
1027 }
1028 return;
e67888a7 1029 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
f6198371 1030 CHECK_FPU_ENABLED
a6215749 1031 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1032 TCGv_i64 fp = tcg_temp_new_i64();
1033 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1034 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1035 tcg_temp_free_i64(fp);
1036 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
eda9b09b 1037 } else {
7c9f7038 1038 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
cc4ba6a9 1039 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
eda9b09b
FB
1040 }
1041 return;
e67888a7 1042 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
f6198371 1043 CHECK_FPU_ENABLED
4d57fa50
RH
1044 {
1045 TCGv addr = tcg_temp_new_i32();
1046 if (ctx->tbflags & FPSCR_SZ) {
1047 TCGv_i64 fp = tcg_temp_new_i64();
1048 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1049 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1050 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1051 tcg_temp_free_i64(fp);
1052 } else {
1053 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1054 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1055 }
1056 tcg_gen_mov_i32(REG(B11_8), addr);
1057 tcg_temp_free(addr);
1058 }
eda9b09b 1059 return;
e67888a7 1060 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
f6198371 1061 CHECK_FPU_ENABLED
cc4ba6a9 1062 {
a7812ae4 1063 TCGv addr = tcg_temp_new_i32();
cc4ba6a9 1064 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
a6215749 1065 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1066 TCGv_i64 fp = tcg_temp_new_i64();
1067 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1068 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1069 tcg_temp_free_i64(fp);
cc4ba6a9 1070 } else {
7c9f7038 1071 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1072 }
1073 tcg_temp_free(addr);
eda9b09b
FB
1074 }
1075 return;
e67888a7 1076 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
f6198371 1077 CHECK_FPU_ENABLED
cc4ba6a9 1078 {
a7812ae4 1079 TCGv addr = tcg_temp_new();
cc4ba6a9 1080 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
a6215749 1081 if (ctx->tbflags & FPSCR_SZ) {
4d57fa50
RH
1082 TCGv_i64 fp = tcg_temp_new_i64();
1083 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1084 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1085 tcg_temp_free_i64(fp);
cc4ba6a9 1086 } else {
7c9f7038 1087 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
cc4ba6a9
AJ
1088 }
1089 tcg_temp_free(addr);
eda9b09b
FB
1090 }
1091 return;
e67888a7
TS
1092 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1093 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1094 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1095 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1096 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1097 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
cc4ba6a9 1098 {
f6198371 1099 CHECK_FPU_ENABLED
a6215749 1100 if (ctx->tbflags & FPSCR_PR) {
a7812ae4
PB
1101 TCGv_i64 fp0, fp1;
1102
93dc9c89
RH
1103 if (ctx->opcode & 0x0110) {
1104 goto do_illegal;
1105 }
a7812ae4
PB
1106 fp0 = tcg_temp_new_i64();
1107 fp1 = tcg_temp_new_i64();
1e0b21d8
RH
1108 gen_load_fpr64(ctx, fp0, B11_8);
1109 gen_load_fpr64(ctx, fp1, B7_4);
a7812ae4
PB
1110 switch (ctx->opcode & 0xf00f) {
1111 case 0xf000: /* fadd Rm,Rn */
485d0035 1112 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1113 break;
1114 case 0xf001: /* fsub Rm,Rn */
485d0035 1115 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1116 break;
1117 case 0xf002: /* fmul Rm,Rn */
485d0035 1118 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1119 break;
1120 case 0xf003: /* fdiv Rm,Rn */
485d0035 1121 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
a7812ae4
PB
1122 break;
1123 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1124 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1125 return;
1126 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1127 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
a7812ae4
PB
1128 return;
1129 }
1e0b21d8 1130 gen_store_fpr64(ctx, fp0, B11_8);
a7812ae4
PB
1131 tcg_temp_free_i64(fp0);
1132 tcg_temp_free_i64(fp1);
1133 } else {
a7812ae4
PB
1134 switch (ctx->opcode & 0xf00f) {
1135 case 0xf000: /* fadd Rm,Rn */
7c9f7038
RH
1136 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1137 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1138 break;
1139 case 0xf001: /* fsub Rm,Rn */
7c9f7038
RH
1140 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1141 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1142 break;
1143 case 0xf002: /* fmul Rm,Rn */
7c9f7038
RH
1144 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1145 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1146 break;
1147 case 0xf003: /* fdiv Rm,Rn */
7c9f7038
RH
1148 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1149 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1150 break;
1151 case 0xf004: /* fcmp/eq Rm,Rn */
92f1f83e 1152 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
7c9f7038 1153 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1154 return;
1155 case 0xf005: /* fcmp/gt Rm,Rn */
92f1f83e 1156 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
7c9f7038 1157 FREG(B11_8), FREG(B7_4));
a7812ae4
PB
1158 return;
1159 }
cc4ba6a9 1160 }
ea6cf6be
TS
1161 }
1162 return;
5b7141a1 1163 case 0xf00e: /* fmac FR0,RM,Rn */
7e9f7ca8
RH
1164 CHECK_FPU_ENABLED
1165 CHECK_FPSCR_PR_0
1166 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1167 FREG(0), FREG(B7_4), FREG(B11_8));
1168 return;
fdf9b3e8
FB
1169 }
1170
1171 switch (ctx->opcode & 0xff00) {
1172 case 0xc900: /* and #imm,R0 */
7efbe241 1173 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1174 return;
24988dc2 1175 case 0xcd00: /* and.b #imm,@(R0,GBR) */
c55497ec
AJ
1176 {
1177 TCGv addr, val;
a7812ae4 1178 addr = tcg_temp_new();
c55497ec 1179 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1180 val = tcg_temp_new();
3376f415 1181 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1182 tcg_gen_andi_i32(val, val, B7_0);
3376f415 1183 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1184 tcg_temp_free(val);
1185 tcg_temp_free(addr);
1186 }
fdf9b3e8
FB
1187 return;
1188 case 0x8b00: /* bf label */
1189 CHECK_NOT_DELAY_SLOT
4bfa602b 1190 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false);
fdf9b3e8
FB
1191 return;
1192 case 0x8f00: /* bf/s label */
1193 CHECK_NOT_DELAY_SLOT
ac9707ea
AJ
1194 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1195 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
a6215749 1196 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
fdf9b3e8
FB
1197 return;
1198 case 0x8900: /* bt label */
1199 CHECK_NOT_DELAY_SLOT
4bfa602b 1200 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true);
fdf9b3e8
FB
1201 return;
1202 case 0x8d00: /* bt/s label */
1203 CHECK_NOT_DELAY_SLOT
ac9707ea
AJ
1204 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1205 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
a6215749 1206 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
fdf9b3e8
FB
1207 return;
1208 case 0x8800: /* cmp/eq #imm,R0 */
34086945 1209 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
fdf9b3e8
FB
1210 return;
1211 case 0xc400: /* mov.b @(disp,GBR),R0 */
c55497ec 1212 {
a7812ae4 1213 TCGv addr = tcg_temp_new();
c55497ec 1214 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1215 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1216 tcg_temp_free(addr);
1217 }
fdf9b3e8
FB
1218 return;
1219 case 0xc500: /* mov.w @(disp,GBR),R0 */
c55497ec 1220 {
a7812ae4 1221 TCGv addr = tcg_temp_new();
c55497ec 1222 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1223 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1224 tcg_temp_free(addr);
1225 }
fdf9b3e8
FB
1226 return;
1227 case 0xc600: /* mov.l @(disp,GBR),R0 */
c55497ec 1228 {
a7812ae4 1229 TCGv addr = tcg_temp_new();
c55497ec 1230 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1231 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
c55497ec
AJ
1232 tcg_temp_free(addr);
1233 }
fdf9b3e8
FB
1234 return;
1235 case 0xc000: /* mov.b R0,@(disp,GBR) */
c55497ec 1236 {
a7812ae4 1237 TCGv addr = tcg_temp_new();
c55497ec 1238 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
3376f415 1239 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1240 tcg_temp_free(addr);
1241 }
fdf9b3e8
FB
1242 return;
1243 case 0xc100: /* mov.w R0,@(disp,GBR) */
c55497ec 1244 {
a7812ae4 1245 TCGv addr = tcg_temp_new();
c55497ec 1246 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
3376f415 1247 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1248 tcg_temp_free(addr);
1249 }
fdf9b3e8
FB
1250 return;
1251 case 0xc200: /* mov.l R0,@(disp,GBR) */
c55497ec 1252 {
a7812ae4 1253 TCGv addr = tcg_temp_new();
c55497ec 1254 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
3376f415 1255 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
c55497ec
AJ
1256 tcg_temp_free(addr);
1257 }
fdf9b3e8
FB
1258 return;
1259 case 0x8000: /* mov.b R0,@(disp,Rn) */
c55497ec 1260 {
a7812ae4 1261 TCGv addr = tcg_temp_new();
c55497ec 1262 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1263 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
c55497ec
AJ
1264 tcg_temp_free(addr);
1265 }
fdf9b3e8
FB
1266 return;
1267 case 0x8100: /* mov.w R0,@(disp,Rn) */
c55497ec 1268 {
a7812ae4 1269 TCGv addr = tcg_temp_new();
c55497ec 1270 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
3376f415 1271 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
c55497ec
AJ
1272 tcg_temp_free(addr);
1273 }
fdf9b3e8
FB
1274 return;
1275 case 0x8400: /* mov.b @(disp,Rn),R0 */
c55497ec 1276 {
a7812ae4 1277 TCGv addr = tcg_temp_new();
c55497ec 1278 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
3376f415 1279 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
c55497ec
AJ
1280 tcg_temp_free(addr);
1281 }
fdf9b3e8
FB
1282 return;
1283 case 0x8500: /* mov.w @(disp,Rn),R0 */
c55497ec 1284 {
a7812ae4 1285 TCGv addr = tcg_temp_new();
c55497ec 1286 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
3376f415 1287 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
c55497ec
AJ
1288 tcg_temp_free(addr);
1289 }
fdf9b3e8
FB
1290 return;
1291 case 0xc700: /* mova @(disp,PC),R0 */
7efbe241 1292 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
fdf9b3e8
FB
1293 return;
1294 case 0xcb00: /* or #imm,R0 */
7efbe241 1295 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1296 return;
24988dc2 1297 case 0xcf00: /* or.b #imm,@(R0,GBR) */
c55497ec
AJ
1298 {
1299 TCGv addr, val;
a7812ae4 1300 addr = tcg_temp_new();
c55497ec 1301 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1302 val = tcg_temp_new();
3376f415 1303 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1304 tcg_gen_ori_i32(val, val, B7_0);
3376f415 1305 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1306 tcg_temp_free(val);
1307 tcg_temp_free(addr);
1308 }
fdf9b3e8
FB
1309 return;
1310 case 0xc300: /* trapa #imm */
c55497ec
AJ
1311 {
1312 TCGv imm;
1313 CHECK_NOT_DELAY_SLOT
ac9707ea 1314 gen_save_cpu_state(ctx, true);
c55497ec 1315 imm = tcg_const_i32(B7_0);
485d0035 1316 gen_helper_trapa(cpu_env, imm);
c55497ec 1317 tcg_temp_free(imm);
63205665 1318 ctx->bstate = BS_EXCP;
c55497ec 1319 }
fdf9b3e8
FB
1320 return;
1321 case 0xc800: /* tst #imm,R0 */
c55497ec 1322 {
a7812ae4 1323 TCGv val = tcg_temp_new();
c55497ec 1324 tcg_gen_andi_i32(val, REG(0), B7_0);
34086945 1325 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1326 tcg_temp_free(val);
1327 }
fdf9b3e8 1328 return;
24988dc2 1329 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
c55497ec 1330 {
a7812ae4 1331 TCGv val = tcg_temp_new();
c55497ec 1332 tcg_gen_add_i32(val, REG(0), cpu_gbr);
3376f415 1333 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
c55497ec 1334 tcg_gen_andi_i32(val, val, B7_0);
34086945 1335 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
c55497ec
AJ
1336 tcg_temp_free(val);
1337 }
fdf9b3e8
FB
1338 return;
1339 case 0xca00: /* xor #imm,R0 */
7efbe241 1340 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
fdf9b3e8 1341 return;
24988dc2 1342 case 0xce00: /* xor.b #imm,@(R0,GBR) */
c55497ec
AJ
1343 {
1344 TCGv addr, val;
a7812ae4 1345 addr = tcg_temp_new();
c55497ec 1346 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
a7812ae4 1347 val = tcg_temp_new();
3376f415 1348 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
c55497ec 1349 tcg_gen_xori_i32(val, val, B7_0);
3376f415 1350 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
c55497ec
AJ
1351 tcg_temp_free(val);
1352 tcg_temp_free(addr);
1353 }
fdf9b3e8
FB
1354 return;
1355 }
1356
1357 switch (ctx->opcode & 0xf08f) {
1358 case 0x408e: /* ldc Rm,Rn_BANK */
fe25591e 1359 CHECK_PRIVILEGED
7efbe241 1360 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
fdf9b3e8
FB
1361 return;
1362 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
fe25591e 1363 CHECK_PRIVILEGED
3376f415 1364 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
7efbe241 1365 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
fdf9b3e8
FB
1366 return;
1367 case 0x0082: /* stc Rm_BANK,Rn */
fe25591e 1368 CHECK_PRIVILEGED
7efbe241 1369 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
fdf9b3e8
FB
1370 return;
1371 case 0x4083: /* stc.l Rm_BANK,@-Rn */
fe25591e 1372 CHECK_PRIVILEGED
c55497ec 1373 {
a7812ae4 1374 TCGv addr = tcg_temp_new();
c55497ec 1375 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1376 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
3101e99c 1377 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec 1378 tcg_temp_free(addr);
c55497ec 1379 }
fdf9b3e8
FB
1380 return;
1381 }
1382
1383 switch (ctx->opcode & 0xf0ff) {
1384 case 0x0023: /* braf Rn */
7efbe241
AJ
1385 CHECK_NOT_DELAY_SLOT
1386 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
a6215749 1387 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1388 ctx->delayed_pc = (uint32_t) - 1;
1389 return;
1390 case 0x0003: /* bsrf Rn */
7efbe241 1391 CHECK_NOT_DELAY_SLOT
1000822b 1392 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
7efbe241 1393 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
a6215749 1394 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1395 ctx->delayed_pc = (uint32_t) - 1;
1396 return;
1397 case 0x4015: /* cmp/pl Rn */
34086945 1398 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1399 return;
1400 case 0x4011: /* cmp/pz Rn */
34086945 1401 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1402 return;
1403 case 0x4010: /* dt Rn */
7efbe241 1404 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
34086945 1405 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1406 return;
1407 case 0x402b: /* jmp @Rn */
7efbe241
AJ
1408 CHECK_NOT_DELAY_SLOT
1409 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
a6215749 1410 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1411 ctx->delayed_pc = (uint32_t) - 1;
1412 return;
1413 case 0x400b: /* jsr @Rn */
7efbe241 1414 CHECK_NOT_DELAY_SLOT
1000822b 1415 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
7efbe241 1416 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
a6215749 1417 ctx->envflags |= DELAY_SLOT;
fdf9b3e8
FB
1418 ctx->delayed_pc = (uint32_t) - 1;
1419 return;
fe25591e
AJ
1420 case 0x400e: /* ldc Rm,SR */
1421 CHECK_PRIVILEGED
34086945
AJ
1422 {
1423 TCGv val = tcg_temp_new();
1424 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1425 gen_write_sr(val);
1426 tcg_temp_free(val);
1427 ctx->bstate = BS_STOP;
1428 }
390af821 1429 return;
fe25591e
AJ
1430 case 0x4007: /* ldc.l @Rm+,SR */
1431 CHECK_PRIVILEGED
c55497ec 1432 {
a7812ae4 1433 TCGv val = tcg_temp_new();
3376f415 1434 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
34086945
AJ
1435 tcg_gen_andi_i32(val, val, 0x700083f3);
1436 gen_write_sr(val);
c55497ec
AJ
1437 tcg_temp_free(val);
1438 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1439 ctx->bstate = BS_STOP;
1440 }
390af821 1441 return;
fe25591e
AJ
1442 case 0x0002: /* stc SR,Rn */
1443 CHECK_PRIVILEGED
34086945 1444 gen_read_sr(REG(B11_8));
390af821 1445 return;
fe25591e
AJ
1446 case 0x4003: /* stc SR,@-Rn */
1447 CHECK_PRIVILEGED
c55497ec 1448 {
a7812ae4 1449 TCGv addr = tcg_temp_new();
34086945 1450 TCGv val = tcg_temp_new();
c55497ec 1451 tcg_gen_subi_i32(addr, REG(B11_8), 4);
34086945
AJ
1452 gen_read_sr(val);
1453 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1454 tcg_gen_mov_i32(REG(B11_8), addr);
34086945 1455 tcg_temp_free(val);
c55497ec 1456 tcg_temp_free(addr);
c55497ec 1457 }
390af821 1458 return;
8e9b0678 1459#define LD(reg,ldnum,ldpnum,prechk) \
fdf9b3e8 1460 case ldnum: \
fe25591e 1461 prechk \
7efbe241 1462 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
fdf9b3e8
FB
1463 return; \
1464 case ldpnum: \
fe25591e 1465 prechk \
3376f415 1466 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
7efbe241 1467 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
8e9b0678
AC
1468 return;
1469#define ST(reg,stnum,stpnum,prechk) \
fdf9b3e8 1470 case stnum: \
fe25591e 1471 prechk \
7efbe241 1472 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
fdf9b3e8
FB
1473 return; \
1474 case stpnum: \
fe25591e 1475 prechk \
c55497ec 1476 { \
3101e99c 1477 TCGv addr = tcg_temp_new(); \
c55497ec 1478 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
3376f415 1479 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
3101e99c 1480 tcg_gen_mov_i32(REG(B11_8), addr); \
c55497ec 1481 tcg_temp_free(addr); \
86e0abc7 1482 } \
fdf9b3e8 1483 return;
8e9b0678
AC
1484#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1485 LD(reg,ldnum,ldpnum,prechk) \
1486 ST(reg,stnum,stpnum,prechk)
fe25591e
AJ
1487 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1488 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1489 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1490 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
935fc175 1491 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
ccae24d4 1492 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
fe25591e
AJ
1493 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1494 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1495 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1496 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
d8299bcc 1497 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
390af821 1498 case 0x406a: /* lds Rm,FPSCR */
d8299bcc 1499 CHECK_FPU_ENABLED
485d0035 1500 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
390af821
AJ
1501 ctx->bstate = BS_STOP;
1502 return;
1503 case 0x4066: /* lds.l @Rm+,FPSCR */
d8299bcc 1504 CHECK_FPU_ENABLED
c55497ec 1505 {
a7812ae4 1506 TCGv addr = tcg_temp_new();
3376f415 1507 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
c55497ec 1508 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
485d0035 1509 gen_helper_ld_fpscr(cpu_env, addr);
c55497ec
AJ
1510 tcg_temp_free(addr);
1511 ctx->bstate = BS_STOP;
1512 }
390af821
AJ
1513 return;
1514 case 0x006a: /* sts FPSCR,Rn */
d8299bcc 1515 CHECK_FPU_ENABLED
c55497ec 1516 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
390af821
AJ
1517 return;
1518 case 0x4062: /* sts FPSCR,@-Rn */
d8299bcc 1519 CHECK_FPU_ENABLED
c55497ec
AJ
1520 {
1521 TCGv addr, val;
a7812ae4 1522 val = tcg_temp_new();
c55497ec 1523 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
a7812ae4 1524 addr = tcg_temp_new();
c55497ec 1525 tcg_gen_subi_i32(addr, REG(B11_8), 4);
3376f415 1526 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
3101e99c 1527 tcg_gen_mov_i32(REG(B11_8), addr);
c55497ec
AJ
1528 tcg_temp_free(addr);
1529 tcg_temp_free(val);
c55497ec 1530 }
390af821 1531 return;
fdf9b3e8 1532 case 0x00c3: /* movca.l R0,@Rm */
852d481f
EI
1533 {
1534 TCGv val = tcg_temp_new();
3376f415 1535 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
485d0035 1536 gen_helper_movcal(cpu_env, REG(B11_8), val);
3376f415 1537 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
852d481f
EI
1538 }
1539 ctx->has_movcal = 1;
fdf9b3e8 1540 return;
143021b2 1541 case 0x40a9: /* movua.l @Rm,R0 */
ccae24d4 1542 CHECK_SH4A
143021b2 1543 /* Load non-boundary-aligned data */
ccae24d4
RH
1544 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1545 MO_TEUL | MO_UNALN);
1546 return;
143021b2
AJ
1547 break;
1548 case 0x40e9: /* movua.l @Rm+,R0 */
ccae24d4 1549 CHECK_SH4A
143021b2 1550 /* Load non-boundary-aligned data */
ccae24d4
RH
1551 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1552 MO_TEUL | MO_UNALN);
1553 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1554 return;
143021b2 1555 break;
fdf9b3e8 1556 case 0x0029: /* movt Rn */
34086945 1557 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
fdf9b3e8 1558 return;
66c7c806
AJ
1559 case 0x0073:
1560 /* MOVCO.L
1561 LDST -> T
1562 If (T == 1) R0 -> (Rn)
1563 0 -> LDST
1564 */
ccae24d4
RH
1565 CHECK_SH4A
1566 {
42a268c2 1567 TCGLabel *label = gen_new_label();
34086945 1568 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
66c7c806 1569 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
3376f415 1570 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
66c7c806
AJ
1571 gen_set_label(label);
1572 tcg_gen_movi_i32(cpu_ldst, 0);
1573 return;
ccae24d4 1574 }
66c7c806
AJ
1575 case 0x0063:
1576 /* MOVLI.L @Rm,R0
1577 1 -> LDST
1578 (Rm) -> R0
1579 When interrupt/exception
1580 occurred 0 -> LDST
1581 */
ccae24d4
RH
1582 CHECK_SH4A
1583 tcg_gen_movi_i32(cpu_ldst, 0);
1584 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1585 tcg_gen_movi_i32(cpu_ldst, 1);
1586 return;
fdf9b3e8 1587 case 0x0093: /* ocbi @Rn */
c55497ec 1588 {
485d0035 1589 gen_helper_ocbi(cpu_env, REG(B11_8));
c55497ec 1590 }
fdf9b3e8 1591 return;
24988dc2 1592 case 0x00a3: /* ocbp @Rn */
fdf9b3e8 1593 case 0x00b3: /* ocbwb @Rn */
0cdb9554
AJ
1594 /* These instructions are supposed to do nothing in case of
1595 a cache miss. Given that we only partially emulate caches
1596 it is safe to simply ignore them. */
fdf9b3e8
FB
1597 return;
1598 case 0x0083: /* pref @Rn */
1599 return;
71968fa6 1600 case 0x00d3: /* prefi @Rn */
ccae24d4
RH
1601 CHECK_SH4A
1602 return;
71968fa6 1603 case 0x00e3: /* icbi @Rn */
ccae24d4
RH
1604 CHECK_SH4A
1605 return;
71968fa6 1606 case 0x00ab: /* synco */
ccae24d4
RH
1607 CHECK_SH4A
1608 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1609 return;
aa351317 1610 break;
fdf9b3e8 1611 case 0x4024: /* rotcl Rn */
c55497ec 1612 {
a7812ae4 1613 TCGv tmp = tcg_temp_new();
34086945
AJ
1614 tcg_gen_mov_i32(tmp, cpu_sr_t);
1615 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
c55497ec 1616 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1617 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1618 tcg_temp_free(tmp);
1619 }
fdf9b3e8
FB
1620 return;
1621 case 0x4025: /* rotcr Rn */
c55497ec 1622 {
a7812ae4 1623 TCGv tmp = tcg_temp_new();
34086945
AJ
1624 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1625 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
c55497ec 1626 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
34086945 1627 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
c55497ec
AJ
1628 tcg_temp_free(tmp);
1629 }
fdf9b3e8
FB
1630 return;
1631 case 0x4004: /* rotl Rn */
2411fde9 1632 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
34086945 1633 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
fdf9b3e8
FB
1634 return;
1635 case 0x4005: /* rotr Rn */
34086945 1636 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
2411fde9 1637 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1638 return;
1639 case 0x4000: /* shll Rn */
1640 case 0x4020: /* shal Rn */
34086945 1641 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
7efbe241 1642 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1643 return;
1644 case 0x4021: /* shar Rn */
34086945 1645 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1646 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1647 return;
1648 case 0x4001: /* shlr Rn */
34086945 1649 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
7efbe241 1650 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
fdf9b3e8
FB
1651 return;
1652 case 0x4008: /* shll2 Rn */
7efbe241 1653 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1654 return;
1655 case 0x4018: /* shll8 Rn */
7efbe241 1656 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1657 return;
1658 case 0x4028: /* shll16 Rn */
7efbe241 1659 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1660 return;
1661 case 0x4009: /* shlr2 Rn */
7efbe241 1662 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
fdf9b3e8
FB
1663 return;
1664 case 0x4019: /* shlr8 Rn */
7efbe241 1665 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
fdf9b3e8
FB
1666 return;
1667 case 0x4029: /* shlr16 Rn */
7efbe241 1668 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
fdf9b3e8
FB
1669 return;
1670 case 0x401b: /* tas.b @Rn */
cb32f179
AJ
1671 {
1672 TCGv val = tcg_const_i32(0x80);
1673 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1674 ctx->memidx, MO_UB);
34086945 1675 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
cb32f179
AJ
1676 tcg_temp_free(val);
1677 }
1678 return;
e67888a7 1679 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
f6198371 1680 CHECK_FPU_ENABLED
7c9f7038 1681 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
eda9b09b 1682 return;
e67888a7 1683 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
f6198371 1684 CHECK_FPU_ENABLED
7c9f7038 1685 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
eda9b09b 1686 return;
e67888a7 1687 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
f6198371 1688 CHECK_FPU_ENABLED
a6215749 1689 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1690 TCGv_i64 fp;
93dc9c89
RH
1691 if (ctx->opcode & 0x0100) {
1692 goto do_illegal;
1693 }
a7812ae4 1694 fp = tcg_temp_new_i64();
485d0035 1695 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1696 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1697 tcg_temp_free_i64(fp);
ea6cf6be
TS
1698 }
1699 else {
7c9f7038 1700 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
ea6cf6be
TS
1701 }
1702 return;
e67888a7 1703 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
f6198371 1704 CHECK_FPU_ENABLED
a6215749 1705 if (ctx->tbflags & FPSCR_PR) {
a7812ae4 1706 TCGv_i64 fp;
93dc9c89
RH
1707 if (ctx->opcode & 0x0100) {
1708 goto do_illegal;
1709 }
a7812ae4 1710 fp = tcg_temp_new_i64();
1e0b21d8 1711 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1712 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
a7812ae4 1713 tcg_temp_free_i64(fp);
ea6cf6be
TS
1714 }
1715 else {
7c9f7038 1716 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
ea6cf6be
TS
1717 }
1718 return;
24988dc2 1719 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
f6198371 1720 CHECK_FPU_ENABLED
7c9f7038 1721 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
24988dc2 1722 return;
57f5c1b0 1723 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
f6198371 1724 CHECK_FPU_ENABLED
7c9f7038 1725 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
24988dc2
AJ
1726 return;
1727 case 0xf06d: /* fsqrt FRn */
f6198371 1728 CHECK_FPU_ENABLED
a6215749 1729 if (ctx->tbflags & FPSCR_PR) {
93dc9c89
RH
1730 if (ctx->opcode & 0x0100) {
1731 goto do_illegal;
1732 }
a7812ae4 1733 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1734 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1735 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1e0b21d8 1736 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1737 tcg_temp_free_i64(fp);
24988dc2 1738 } else {
7c9f7038 1739 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2
AJ
1740 }
1741 return;
1742 case 0xf07d: /* fsrra FRn */
f6198371 1743 CHECK_FPU_ENABLED
11b7aa23
RH
1744 CHECK_FPSCR_PR_0
1745 gen_helper_fsrra_FT(FREG(B11_8), cpu_env, FREG(B11_8));
24988dc2 1746 break;
e67888a7 1747 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
f6198371 1748 CHECK_FPU_ENABLED
7e9f7ca8
RH
1749 CHECK_FPSCR_PR_0
1750 tcg_gen_movi_i32(FREG(B11_8), 0);
1751 return;
e67888a7 1752 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
f6198371 1753 CHECK_FPU_ENABLED
7e9f7ca8
RH
1754 CHECK_FPSCR_PR_0
1755 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1756 return;
24988dc2 1757 case 0xf0ad: /* fcnvsd FPUL,DRn */
f6198371 1758 CHECK_FPU_ENABLED
cc4ba6a9 1759 {
a7812ae4 1760 TCGv_i64 fp = tcg_temp_new_i64();
485d0035 1761 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1e0b21d8 1762 gen_store_fpr64(ctx, fp, B11_8);
a7812ae4 1763 tcg_temp_free_i64(fp);
cc4ba6a9 1764 }
24988dc2
AJ
1765 return;
1766 case 0xf0bd: /* fcnvds DRn,FPUL */
f6198371 1767 CHECK_FPU_ENABLED
cc4ba6a9 1768 {
a7812ae4 1769 TCGv_i64 fp = tcg_temp_new_i64();
1e0b21d8 1770 gen_load_fpr64(ctx, fp, B11_8);
485d0035 1771 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
a7812ae4 1772 tcg_temp_free_i64(fp);
cc4ba6a9 1773 }
24988dc2 1774 return;
af8c2bde
AJ
1775 case 0xf0ed: /* fipr FVm,FVn */
1776 CHECK_FPU_ENABLED
7e9f7ca8
RH
1777 CHECK_FPSCR_PR_1
1778 {
1779 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1780 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1781 gen_helper_fipr(cpu_env, m, n);
af8c2bde
AJ
1782 tcg_temp_free(m);
1783 tcg_temp_free(n);
1784 return;
1785 }
1786 break;
17075f10
AJ
1787 case 0xf0fd: /* ftrv XMTRX,FVn */
1788 CHECK_FPU_ENABLED
7e9f7ca8
RH
1789 CHECK_FPSCR_PR_1
1790 {
1791 if ((ctx->opcode & 0x0300) != 0x0100) {
1792 goto do_illegal;
1793 }
1794 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
485d0035 1795 gen_helper_ftrv(cpu_env, n);
17075f10
AJ
1796 tcg_temp_free(n);
1797 return;
1798 }
1799 break;
fdf9b3e8 1800 }
bacc637a 1801#if 0
fdf9b3e8
FB
1802 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1803 ctx->opcode, ctx->pc);
bacc637a
AJ
1804 fflush(stderr);
1805#endif
6b98213d 1806 do_illegal:
9a562ae7 1807 if (ctx->envflags & DELAY_SLOT_MASK) {
dec16c6e
RH
1808 do_illegal_slot:
1809 gen_save_cpu_state(ctx, true);
485d0035 1810 gen_helper_raise_slot_illegal_instruction(cpu_env);
86865c5f 1811 } else {
dec16c6e 1812 gen_save_cpu_state(ctx, true);
485d0035 1813 gen_helper_raise_illegal_instruction(cpu_env);
86865c5f 1814 }
63205665 1815 ctx->bstate = BS_EXCP;
dec4f042
RH
1816 return;
1817
1818 do_fpu_disabled:
1819 gen_save_cpu_state(ctx, true);
1820 if (ctx->envflags & DELAY_SLOT_MASK) {
1821 gen_helper_raise_slot_fpu_disable(cpu_env);
1822 } else {
1823 gen_helper_raise_fpu_disable(cpu_env);
1824 }
1825 ctx->bstate = BS_EXCP;
1826 return;
823029f9
TS
1827}
1828
b1d8e52e 1829static void decode_opc(DisasContext * ctx)
823029f9 1830{
a6215749 1831 uint32_t old_flags = ctx->envflags;
823029f9
TS
1832
1833 _decode_opc(ctx);
1834
9a562ae7 1835 if (old_flags & DELAY_SLOT_MASK) {
39682608 1836 /* go out of the delay slot */
9a562ae7 1837 ctx->envflags &= ~DELAY_SLOT_MASK;
4bfa602b
RH
1838
1839 /* When in an exclusive region, we must continue to the end
1840 for conditional branches. */
1841 if (ctx->tbflags & GUSA_EXCLUSIVE
1842 && old_flags & DELAY_SLOT_CONDITIONAL) {
1843 gen_delayed_conditional_jump(ctx);
1844 return;
1845 }
1846 /* Otherwise this is probably an invalid gUSA region.
1847 Drop the GUSA bits so the next TB doesn't see them. */
1848 ctx->envflags &= ~GUSA_MASK;
1849
ac9707ea 1850 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
823029f9
TS
1851 ctx->bstate = BS_BRANCH;
1852 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1853 gen_delayed_conditional_jump(ctx);
be53081a 1854 } else {
823029f9
TS
1855 gen_jump(ctx);
1856 }
4bfa602b
RH
1857 }
1858}
1859
1860#ifdef CONFIG_USER_ONLY
1861/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1862 Upon an interrupt, a real kernel would simply notice magic values in
1863 the registers and reset the PC to the start of the sequence.
1864
1865 For QEMU, we cannot do this in quite the same way. Instead, we notice
1866 the normal start of such a sequence (mov #-x,r15). While we can handle
1867 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1868 sequences and transform them into atomic operations as seen by the host.
1869*/
1870static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1871{
d6a6cffd
RH
1872 uint16_t insns[5];
1873 int ld_adr, ld_dst, ld_mop;
1874 int op_dst, op_src, op_opc;
1875 int mv_src, mt_dst, st_src, st_mop;
1876 TCGv op_arg;
1877
4bfa602b
RH
1878 uint32_t pc = ctx->pc;
1879 uint32_t pc_end = ctx->tb->cs_base;
1880 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1881 int max_insns = (pc_end - pc) / 2;
d6a6cffd 1882 int i;
4bfa602b
RH
1883
1884 if (pc != pc_end + backup || max_insns < 2) {
1885 /* This is a malformed gUSA region. Don't do anything special,
1886 since the interpreter is likely to get confused. */
1887 ctx->envflags &= ~GUSA_MASK;
1888 return 0;
1889 }
823029f9 1890
4bfa602b
RH
1891 if (ctx->tbflags & GUSA_EXCLUSIVE) {
1892 /* Regardless of single-stepping or the end of the page,
1893 we must complete execution of the gUSA region while
1894 holding the exclusive lock. */
1895 *pmax_insns = max_insns;
1896 return 0;
823029f9 1897 }
4bfa602b 1898
d6a6cffd
RH
1899 /* The state machine below will consume only a few insns.
1900 If there are more than that in a region, fail now. */
1901 if (max_insns > ARRAY_SIZE(insns)) {
1902 goto fail;
1903 }
1904
1905 /* Read all of the insns for the region. */
1906 for (i = 0; i < max_insns; ++i) {
1907 insns[i] = cpu_lduw_code(env, pc + i * 2);
1908 }
1909
1910 ld_adr = ld_dst = ld_mop = -1;
1911 mv_src = -1;
1912 op_dst = op_src = op_opc = -1;
1913 mt_dst = -1;
1914 st_src = st_mop = -1;
1915 TCGV_UNUSED(op_arg);
1916 i = 0;
1917
1918#define NEXT_INSN \
1919 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1920
1921 /*
1922 * Expect a load to begin the region.
1923 */
1924 NEXT_INSN;
1925 switch (ctx->opcode & 0xf00f) {
1926 case 0x6000: /* mov.b @Rm,Rn */
1927 ld_mop = MO_SB;
1928 break;
1929 case 0x6001: /* mov.w @Rm,Rn */
1930 ld_mop = MO_TESW;
1931 break;
1932 case 0x6002: /* mov.l @Rm,Rn */
1933 ld_mop = MO_TESL;
1934 break;
1935 default:
1936 goto fail;
1937 }
1938 ld_adr = B7_4;
1939 ld_dst = B11_8;
1940 if (ld_adr == ld_dst) {
1941 goto fail;
1942 }
1943 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1944 op_dst = ld_dst;
1945
1946 /*
1947 * Expect an optional register move.
1948 */
1949 NEXT_INSN;
1950 switch (ctx->opcode & 0xf00f) {
1951 case 0x6003: /* mov Rm,Rn */
1952 /* Here we want to recognize ld_dst being saved for later consumtion,
1953 or for another input register being copied so that ld_dst need not
1954 be clobbered during the operation. */
1955 op_dst = B11_8;
1956 mv_src = B7_4;
1957 if (op_dst == ld_dst) {
1958 /* Overwriting the load output. */
1959 goto fail;
1960 }
1961 if (mv_src != ld_dst) {
1962 /* Copying a new input; constrain op_src to match the load. */
1963 op_src = ld_dst;
1964 }
1965 break;
1966
1967 default:
1968 /* Put back and re-examine as operation. */
1969 --i;
1970 }
1971
1972 /*
1973 * Expect the operation.
1974 */
1975 NEXT_INSN;
1976 switch (ctx->opcode & 0xf00f) {
1977 case 0x300c: /* add Rm,Rn */
1978 op_opc = INDEX_op_add_i32;
1979 goto do_reg_op;
1980 case 0x2009: /* and Rm,Rn */
1981 op_opc = INDEX_op_and_i32;
1982 goto do_reg_op;
1983 case 0x200a: /* xor Rm,Rn */
1984 op_opc = INDEX_op_xor_i32;
1985 goto do_reg_op;
1986 case 0x200b: /* or Rm,Rn */
1987 op_opc = INDEX_op_or_i32;
1988 do_reg_op:
1989 /* The operation register should be as expected, and the
1990 other input cannot depend on the load. */
1991 if (op_dst != B11_8) {
1992 goto fail;
1993 }
1994 if (op_src < 0) {
1995 /* Unconstrainted input. */
1996 op_src = B7_4;
1997 } else if (op_src == B7_4) {
1998 /* Constrained input matched load. All operations are
1999 commutative; "swap" them by "moving" the load output
2000 to the (implicit) first argument and the move source
2001 to the (explicit) second argument. */
2002 op_src = mv_src;
2003 } else {
2004 goto fail;
2005 }
2006 op_arg = REG(op_src);
2007 break;
2008
2009 case 0x6007: /* not Rm,Rn */
2010 if (ld_dst != B7_4 || mv_src >= 0) {
2011 goto fail;
2012 }
2013 op_dst = B11_8;
2014 op_opc = INDEX_op_xor_i32;
2015 op_arg = tcg_const_i32(-1);
2016 break;
2017
2018 case 0x7000 ... 0x700f: /* add #imm,Rn */
2019 if (op_dst != B11_8 || mv_src >= 0) {
2020 goto fail;
2021 }
2022 op_opc = INDEX_op_add_i32;
2023 op_arg = tcg_const_i32(B7_0s);
2024 break;
2025
2026 case 0x3000: /* cmp/eq Rm,Rn */
2027 /* Looking for the middle of a compare-and-swap sequence,
2028 beginning with the compare. Operands can be either order,
2029 but with only one overlapping the load. */
2030 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2031 goto fail;
2032 }
2033 op_opc = INDEX_op_setcond_i32; /* placeholder */
2034 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2035 op_arg = REG(op_src);
2036
2037 NEXT_INSN;
2038 switch (ctx->opcode & 0xff00) {
2039 case 0x8b00: /* bf label */
2040 case 0x8f00: /* bf/s label */
2041 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2042 goto fail;
2043 }
2044 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2045 break;
2046 }
2047 /* We're looking to unconditionally modify Rn with the
2048 result of the comparison, within the delay slot of
2049 the branch. This is used by older gcc. */
2050 NEXT_INSN;
2051 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2052 mt_dst = B11_8;
2053 } else {
2054 goto fail;
2055 }
2056 break;
2057
2058 default:
2059 goto fail;
2060 }
2061 break;
2062
2063 case 0x2008: /* tst Rm,Rn */
2064 /* Looking for a compare-and-swap against zero. */
2065 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2066 goto fail;
2067 }
2068 op_opc = INDEX_op_setcond_i32;
2069 op_arg = tcg_const_i32(0);
2070
2071 NEXT_INSN;
2072 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2073 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2074 goto fail;
2075 }
2076 break;
2077
2078 default:
2079 /* Put back and re-examine as store. */
2080 --i;
2081 }
2082
2083 /*
2084 * Expect the store.
2085 */
2086 /* The store must be the last insn. */
2087 if (i != max_insns - 1) {
2088 goto fail;
2089 }
2090 NEXT_INSN;
2091 switch (ctx->opcode & 0xf00f) {
2092 case 0x2000: /* mov.b Rm,@Rn */
2093 st_mop = MO_UB;
2094 break;
2095 case 0x2001: /* mov.w Rm,@Rn */
2096 st_mop = MO_UW;
2097 break;
2098 case 0x2002: /* mov.l Rm,@Rn */
2099 st_mop = MO_UL;
2100 break;
2101 default:
2102 goto fail;
2103 }
2104 /* The store must match the load. */
2105 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2106 goto fail;
2107 }
2108 st_src = B7_4;
2109
2110#undef NEXT_INSN
2111
2112 /*
2113 * Emit the operation.
2114 */
2115 tcg_gen_insn_start(pc, ctx->envflags);
2116 switch (op_opc) {
2117 case -1:
2118 /* No operation found. Look for exchange pattern. */
2119 if (st_src == ld_dst || mv_src >= 0) {
2120 goto fail;
2121 }
2122 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2123 ctx->memidx, ld_mop);
2124 break;
2125
2126 case INDEX_op_add_i32:
2127 if (op_dst != st_src) {
2128 goto fail;
2129 }
2130 if (op_dst == ld_dst && st_mop == MO_UL) {
2131 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2132 op_arg, ctx->memidx, ld_mop);
2133 } else {
2134 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2135 op_arg, ctx->memidx, ld_mop);
2136 if (op_dst != ld_dst) {
2137 /* Note that mop sizes < 4 cannot use add_fetch
2138 because it won't carry into the higher bits. */
2139 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2140 }
2141 }
2142 break;
2143
2144 case INDEX_op_and_i32:
2145 if (op_dst != st_src) {
2146 goto fail;
2147 }
2148 if (op_dst == ld_dst) {
2149 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2150 op_arg, ctx->memidx, ld_mop);
2151 } else {
2152 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2153 op_arg, ctx->memidx, ld_mop);
2154 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2155 }
2156 break;
2157
2158 case INDEX_op_or_i32:
2159 if (op_dst != st_src) {
2160 goto fail;
2161 }
2162 if (op_dst == ld_dst) {
2163 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2164 op_arg, ctx->memidx, ld_mop);
2165 } else {
2166 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2167 op_arg, ctx->memidx, ld_mop);
2168 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2169 }
2170 break;
2171
2172 case INDEX_op_xor_i32:
2173 if (op_dst != st_src) {
2174 goto fail;
2175 }
2176 if (op_dst == ld_dst) {
2177 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2178 op_arg, ctx->memidx, ld_mop);
2179 } else {
2180 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2181 op_arg, ctx->memidx, ld_mop);
2182 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2183 }
2184 break;
2185
2186 case INDEX_op_setcond_i32:
2187 if (st_src == ld_dst) {
2188 goto fail;
2189 }
2190 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2191 REG(st_src), ctx->memidx, ld_mop);
2192 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2193 if (mt_dst >= 0) {
2194 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2195 }
2196 break;
2197
2198 default:
2199 g_assert_not_reached();
2200 }
2201
2202 /* If op_src is not a valid register, then op_arg was a constant. */
2203 if (op_src < 0) {
2204 tcg_temp_free_i32(op_arg);
2205 }
2206
2207 /* The entire region has been translated. */
2208 ctx->envflags &= ~GUSA_MASK;
2209 ctx->pc = pc_end;
2210 return max_insns;
2211
2212 fail:
4bfa602b
RH
2213 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2214 pc, pc_end);
2215
2216 /* Restart with the EXCLUSIVE bit set, within a TB run via
2217 cpu_exec_step_atomic holding the exclusive lock. */
2218 tcg_gen_insn_start(pc, ctx->envflags);
2219 ctx->envflags |= GUSA_EXCLUSIVE;
2220 gen_save_cpu_state(ctx, false);
2221 gen_helper_exclusive(cpu_env);
2222 ctx->bstate = BS_EXCP;
2223
2224 /* We're not executing an instruction, but we must report one for the
2225 purposes of accounting within the TB. We might as well report the
2226 entire region consumed via ctx->pc so that it's immediately available
2227 in the disassembly dump. */
2228 ctx->pc = pc_end;
2229 return 1;
fdf9b3e8 2230}
4bfa602b 2231#endif
fdf9b3e8 2232
9c489ea6 2233void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
fdf9b3e8 2234{
9c489ea6 2235 CPUSH4State *env = cs->env_ptr;
fdf9b3e8
FB
2236 DisasContext ctx;
2237 target_ulong pc_start;
2e70f6ef
PB
2238 int num_insns;
2239 int max_insns;
fdf9b3e8
FB
2240
2241 pc_start = tb->pc;
fdf9b3e8 2242 ctx.pc = pc_start;
a6215749 2243 ctx.tbflags = (uint32_t)tb->flags;
e1933d14 2244 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
823029f9 2245 ctx.bstate = BS_NONE;
a6215749 2246 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
9854bc46
PB
2247 /* We don't know if the delayed pc came from a dynamic or static branch,
2248 so assume it is a dynamic branch. */
823029f9 2249 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
fdf9b3e8 2250 ctx.tb = tb;
ed2803da 2251 ctx.singlestep_enabled = cs->singlestep_enabled;
71968fa6 2252 ctx.features = env->features;
a6215749 2253 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
3a3bb8d2
RH
2254 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2255 (ctx.tbflags & (1 << SR_RB))) * 0x10;
5c13bad9 2256 ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
fdf9b3e8 2257
2e70f6ef 2258 max_insns = tb->cflags & CF_COUNT_MASK;
190ce7fb 2259 if (max_insns == 0) {
2e70f6ef 2260 max_insns = CF_COUNT_MASK;
190ce7fb 2261 }
4448a836
RH
2262 max_insns = MIN(max_insns, TCG_MAX_INSNS);
2263
2264 /* Since the ISA is fixed-width, we can bound by the number
2265 of instructions remaining on the page. */
2266 num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2;
2267 max_insns = MIN(max_insns, num_insns);
2268
2269 /* Single stepping means just that. */
2270 if (ctx.singlestep_enabled || singlestep) {
2271 max_insns = 1;
190ce7fb
RH
2272 }
2273
cd42d5b2 2274 gen_tb_start(tb);
4448a836
RH
2275 num_insns = 0;
2276
4bfa602b
RH
2277#ifdef CONFIG_USER_ONLY
2278 if (ctx.tbflags & GUSA_MASK) {
2279 num_insns = decode_gusa(&ctx, env, &max_insns);
2280 }
2281#endif
2282
4448a836
RH
2283 while (ctx.bstate == BS_NONE
2284 && num_insns < max_insns
2285 && !tcg_op_buf_full()) {
a6215749 2286 tcg_gen_insn_start(ctx.pc, ctx.envflags);
959082fc 2287 num_insns++;
667b8e29 2288
b933066a
RH
2289 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2290 /* We have hit a breakpoint - make sure PC is up-to-date */
ac9707ea 2291 gen_save_cpu_state(&ctx, true);
b933066a 2292 gen_helper_debug(cpu_env);
63205665 2293 ctx.bstate = BS_EXCP;
522a0d4e
RH
2294 /* The address covered by the breakpoint must be included in
2295 [tb->pc, tb->pc + tb->size) in order to for it to be
2296 properly cleared -- thus we increment the PC here so that
2297 the logic setting tb->size below does the right thing. */
2298 ctx.pc += 2;
b933066a
RH
2299 break;
2300 }
2301
959082fc 2302 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2e70f6ef 2303 gen_io_start();
667b8e29
RH
2304 }
2305
485d0035 2306 ctx.opcode = cpu_lduw_code(env, ctx.pc);
fdf9b3e8
FB
2307 decode_opc(&ctx);
2308 ctx.pc += 2;
fdf9b3e8 2309 }
4448a836 2310 if (tb->cflags & CF_LAST_IO) {
2e70f6ef 2311 gen_io_end();
4448a836 2312 }
4bfa602b
RH
2313
2314 if (ctx.tbflags & GUSA_EXCLUSIVE) {
2315 /* Ending the region of exclusivity. Clear the bits. */
2316 ctx.envflags &= ~GUSA_MASK;
2317 }
2318
ed2803da 2319 if (cs->singlestep_enabled) {
ac9707ea 2320 gen_save_cpu_state(&ctx, true);
485d0035 2321 gen_helper_debug(cpu_env);
823029f9
TS
2322 } else {
2323 switch (ctx.bstate) {
2324 case BS_STOP:
ac9707ea 2325 gen_save_cpu_state(&ctx, true);
0fc37a8b
AJ
2326 tcg_gen_exit_tb(0);
2327 break;
823029f9 2328 case BS_NONE:
ac9707ea 2329 gen_save_cpu_state(&ctx, false);
823029f9
TS
2330 gen_goto_tb(&ctx, 0, ctx.pc);
2331 break;
2332 case BS_EXCP:
63205665 2333 /* fall through */
823029f9
TS
2334 case BS_BRANCH:
2335 default:
2336 break;
2337 }
fdf9b3e8 2338 }
823029f9 2339
806f352d 2340 gen_tb_end(tb, num_insns);
0a7df5da 2341
4e5e1215
RH
2342 tb->size = ctx.pc - pc_start;
2343 tb->icount = num_insns;
fdf9b3e8
FB
2344
2345#ifdef DEBUG_DISAS
4910e6e4
RH
2346 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2347 && qemu_log_in_addr_range(pc_start)) {
1ee73216 2348 qemu_log_lock();
93fcfe39 2349 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
d49190c4 2350 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
93fcfe39 2351 qemu_log("\n");
1ee73216 2352 qemu_log_unlock();
fdf9b3e8 2353 }
fdf9b3e8 2354#endif
fdf9b3e8
FB
2355}
2356
bad729e2
RH
2357void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2358 target_ulong *data)
d2856f1a 2359{
bad729e2
RH
2360 env->pc = data[0];
2361 env->flags = data[1];
ac9707ea
AJ
2362 /* Theoretically delayed_pc should also be restored. In practice the
2363 branch instruction is re-executed after exception, so the delayed
2364 branch target will be recomputed. */
d2856f1a 2365}