]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target/sh4/translate.c
target/sh4: Implement fpchg
[mirror_qemu.git] / target / sh4 / translate.c
... / ...
CommitLineData
1/*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define DEBUG_DISAS
21
22#include "qemu/osdep.h"
23#include "cpu.h"
24#include "disas/disas.h"
25#include "exec/exec-all.h"
26#include "tcg-op.h"
27#include "exec/cpu_ldst.h"
28
29#include "exec/helper-proto.h"
30#include "exec/helper-gen.h"
31
32#include "trace-tcg.h"
33#include "exec/log.h"
34
35
36typedef struct DisasContext {
37 struct TranslationBlock *tb;
38 target_ulong pc;
39 uint16_t opcode;
40 uint32_t tbflags; /* should stay unmodified during the TB translation */
41 uint32_t envflags; /* should stay in sync with env->flags using TCG ops */
42 int bstate;
43 int memidx;
44 int gbank;
45 int fbank;
46 uint32_t delayed_pc;
47 int singlestep_enabled;
48 uint32_t features;
49 int has_movcal;
50} DisasContext;
51
52#if defined(CONFIG_USER_ONLY)
53#define IS_USER(ctx) 1
54#else
55#define IS_USER(ctx) (!(ctx->tbflags & (1u << SR_MD)))
56#endif
57
58enum {
59 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
60 * exception condition
61 */
62 BS_STOP = 1, /* We want to stop translation for any reason */
63 BS_BRANCH = 2, /* We reached a branch condition */
64 BS_EXCP = 3, /* We reached an exception condition */
65};
66
67/* global register indexes */
68static TCGv_env cpu_env;
69static TCGv cpu_gregs[32];
70static TCGv cpu_sr, cpu_sr_m, cpu_sr_q, cpu_sr_t;
71static TCGv cpu_pc, cpu_ssr, cpu_spc, cpu_gbr;
72static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
73static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
74static TCGv cpu_fregs[32];
75
76/* internal register indexes */
77static TCGv cpu_flags, cpu_delayed_pc, cpu_delayed_cond;
78
79#include "exec/gen-icount.h"
80
81void sh4_translate_init(void)
82{
83 int i;
84 static int done_init = 0;
85 static const char * const gregnames[24] = {
86 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
87 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
88 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
89 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
90 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
91 };
92 static const char * const fregnames[32] = {
93 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
94 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
95 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
96 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
97 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
98 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
99 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
100 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
101 };
102
103 if (done_init) {
104 return;
105 }
106
107 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
108 tcg_ctx.tcg_env = cpu_env;
109
110 for (i = 0; i < 24; i++) {
111 cpu_gregs[i] = tcg_global_mem_new_i32(cpu_env,
112 offsetof(CPUSH4State, gregs[i]),
113 gregnames[i]);
114 }
115 memcpy(cpu_gregs + 24, cpu_gregs + 8, 8 * sizeof(TCGv));
116
117 cpu_pc = tcg_global_mem_new_i32(cpu_env,
118 offsetof(CPUSH4State, pc), "PC");
119 cpu_sr = tcg_global_mem_new_i32(cpu_env,
120 offsetof(CPUSH4State, sr), "SR");
121 cpu_sr_m = tcg_global_mem_new_i32(cpu_env,
122 offsetof(CPUSH4State, sr_m), "SR_M");
123 cpu_sr_q = tcg_global_mem_new_i32(cpu_env,
124 offsetof(CPUSH4State, sr_q), "SR_Q");
125 cpu_sr_t = tcg_global_mem_new_i32(cpu_env,
126 offsetof(CPUSH4State, sr_t), "SR_T");
127 cpu_ssr = tcg_global_mem_new_i32(cpu_env,
128 offsetof(CPUSH4State, ssr), "SSR");
129 cpu_spc = tcg_global_mem_new_i32(cpu_env,
130 offsetof(CPUSH4State, spc), "SPC");
131 cpu_gbr = tcg_global_mem_new_i32(cpu_env,
132 offsetof(CPUSH4State, gbr), "GBR");
133 cpu_vbr = tcg_global_mem_new_i32(cpu_env,
134 offsetof(CPUSH4State, vbr), "VBR");
135 cpu_sgr = tcg_global_mem_new_i32(cpu_env,
136 offsetof(CPUSH4State, sgr), "SGR");
137 cpu_dbr = tcg_global_mem_new_i32(cpu_env,
138 offsetof(CPUSH4State, dbr), "DBR");
139 cpu_mach = tcg_global_mem_new_i32(cpu_env,
140 offsetof(CPUSH4State, mach), "MACH");
141 cpu_macl = tcg_global_mem_new_i32(cpu_env,
142 offsetof(CPUSH4State, macl), "MACL");
143 cpu_pr = tcg_global_mem_new_i32(cpu_env,
144 offsetof(CPUSH4State, pr), "PR");
145 cpu_fpscr = tcg_global_mem_new_i32(cpu_env,
146 offsetof(CPUSH4State, fpscr), "FPSCR");
147 cpu_fpul = tcg_global_mem_new_i32(cpu_env,
148 offsetof(CPUSH4State, fpul), "FPUL");
149
150 cpu_flags = tcg_global_mem_new_i32(cpu_env,
151 offsetof(CPUSH4State, flags), "_flags_");
152 cpu_delayed_pc = tcg_global_mem_new_i32(cpu_env,
153 offsetof(CPUSH4State, delayed_pc),
154 "_delayed_pc_");
155 cpu_delayed_cond = tcg_global_mem_new_i32(cpu_env,
156 offsetof(CPUSH4State,
157 delayed_cond),
158 "_delayed_cond_");
159 cpu_ldst = tcg_global_mem_new_i32(cpu_env,
160 offsetof(CPUSH4State, ldst), "_ldst_");
161
162 for (i = 0; i < 32; i++)
163 cpu_fregs[i] = tcg_global_mem_new_i32(cpu_env,
164 offsetof(CPUSH4State, fregs[i]),
165 fregnames[i]);
166
167 done_init = 1;
168}
169
170void superh_cpu_dump_state(CPUState *cs, FILE *f,
171 fprintf_function cpu_fprintf, int flags)
172{
173 SuperHCPU *cpu = SUPERH_CPU(cs);
174 CPUSH4State *env = &cpu->env;
175 int i;
176 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
177 env->pc, cpu_read_sr(env), env->pr, env->fpscr);
178 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
179 env->spc, env->ssr, env->gbr, env->vbr);
180 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
181 env->sgr, env->dbr, env->delayed_pc, env->fpul);
182 for (i = 0; i < 24; i += 4) {
183 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
184 i, env->gregs[i], i + 1, env->gregs[i + 1],
185 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
186 }
187 if (env->flags & DELAY_SLOT) {
188 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
189 env->delayed_pc);
190 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
191 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
192 env->delayed_pc);
193 } else if (env->flags & DELAY_SLOT_RTE) {
194 cpu_fprintf(f, "in rte delay slot (delayed_pc=0x%08x)\n",
195 env->delayed_pc);
196 }
197}
198
199static void gen_read_sr(TCGv dst)
200{
201 TCGv t0 = tcg_temp_new();
202 tcg_gen_shli_i32(t0, cpu_sr_q, SR_Q);
203 tcg_gen_or_i32(dst, dst, t0);
204 tcg_gen_shli_i32(t0, cpu_sr_m, SR_M);
205 tcg_gen_or_i32(dst, dst, t0);
206 tcg_gen_shli_i32(t0, cpu_sr_t, SR_T);
207 tcg_gen_or_i32(dst, cpu_sr, t0);
208 tcg_temp_free_i32(t0);
209}
210
211static void gen_write_sr(TCGv src)
212{
213 tcg_gen_andi_i32(cpu_sr, src,
214 ~((1u << SR_Q) | (1u << SR_M) | (1u << SR_T)));
215 tcg_gen_extract_i32(cpu_sr_q, src, SR_Q, 1);
216 tcg_gen_extract_i32(cpu_sr_m, src, SR_M, 1);
217 tcg_gen_extract_i32(cpu_sr_t, src, SR_T, 1);
218}
219
220static inline void gen_save_cpu_state(DisasContext *ctx, bool save_pc)
221{
222 if (save_pc) {
223 tcg_gen_movi_i32(cpu_pc, ctx->pc);
224 }
225 if (ctx->delayed_pc != (uint32_t) -1) {
226 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
227 }
228 if ((ctx->tbflags & TB_FLAG_ENVFLAGS_MASK) != ctx->envflags) {
229 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
230 }
231}
232
233static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
234{
235 if (unlikely(ctx->singlestep_enabled)) {
236 return false;
237 }
238 if (ctx->tbflags & GUSA_EXCLUSIVE) {
239 return false;
240 }
241#ifndef CONFIG_USER_ONLY
242 return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
243#else
244 return true;
245#endif
246}
247
248static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
249{
250 if (use_goto_tb(ctx, dest)) {
251 /* Use a direct jump if in same page and singlestep not enabled */
252 tcg_gen_goto_tb(n);
253 tcg_gen_movi_i32(cpu_pc, dest);
254 tcg_gen_exit_tb((uintptr_t)ctx->tb + n);
255 } else {
256 tcg_gen_movi_i32(cpu_pc, dest);
257 if (ctx->singlestep_enabled)
258 gen_helper_debug(cpu_env);
259 tcg_gen_exit_tb(0);
260 }
261}
262
263static void gen_jump(DisasContext * ctx)
264{
265 if (ctx->delayed_pc == (uint32_t) - 1) {
266 /* Target is not statically known, it comes necessarily from a
267 delayed jump as immediate jump are conditinal jumps */
268 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
269 tcg_gen_discard_i32(cpu_delayed_pc);
270 if (ctx->singlestep_enabled)
271 gen_helper_debug(cpu_env);
272 tcg_gen_exit_tb(0);
273 } else {
274 gen_goto_tb(ctx, 0, ctx->delayed_pc);
275 }
276}
277
278/* Immediate conditional jump (bt or bf) */
279static void gen_conditional_jump(DisasContext *ctx, target_ulong dest,
280 bool jump_if_true)
281{
282 TCGLabel *l1 = gen_new_label();
283 TCGCond cond_not_taken = jump_if_true ? TCG_COND_EQ : TCG_COND_NE;
284
285 if (ctx->tbflags & GUSA_EXCLUSIVE) {
286 /* When in an exclusive region, we must continue to the end.
287 Therefore, exit the region on a taken branch, but otherwise
288 fall through to the next instruction. */
289 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
290 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
291 /* Note that this won't actually use a goto_tb opcode because we
292 disallow it in use_goto_tb, but it handles exit + singlestep. */
293 gen_goto_tb(ctx, 0, dest);
294 gen_set_label(l1);
295 return;
296 }
297
298 gen_save_cpu_state(ctx, false);
299 tcg_gen_brcondi_i32(cond_not_taken, cpu_sr_t, 0, l1);
300 gen_goto_tb(ctx, 0, dest);
301 gen_set_label(l1);
302 gen_goto_tb(ctx, 1, ctx->pc + 2);
303 ctx->bstate = BS_BRANCH;
304}
305
306/* Delayed conditional jump (bt or bf) */
307static void gen_delayed_conditional_jump(DisasContext * ctx)
308{
309 TCGLabel *l1 = gen_new_label();
310 TCGv ds = tcg_temp_new();
311
312 tcg_gen_mov_i32(ds, cpu_delayed_cond);
313 tcg_gen_discard_i32(cpu_delayed_cond);
314
315 if (ctx->tbflags & GUSA_EXCLUSIVE) {
316 /* When in an exclusive region, we must continue to the end.
317 Therefore, exit the region on a taken branch, but otherwise
318 fall through to the next instruction. */
319 tcg_gen_brcondi_i32(TCG_COND_EQ, ds, 0, l1);
320
321 /* Leave the gUSA region. */
322 tcg_gen_movi_i32(cpu_flags, ctx->envflags & ~GUSA_MASK);
323 gen_jump(ctx);
324
325 gen_set_label(l1);
326 return;
327 }
328
329 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
330 gen_goto_tb(ctx, 1, ctx->pc + 2);
331 gen_set_label(l1);
332 gen_jump(ctx);
333}
334
335static inline void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
336{
337 /* We have already signaled illegal instruction for odd Dr. */
338 tcg_debug_assert((reg & 1) == 0);
339 reg ^= ctx->fbank;
340 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
341}
342
343static inline void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
344{
345 /* We have already signaled illegal instruction for odd Dr. */
346 tcg_debug_assert((reg & 1) == 0);
347 reg ^= ctx->fbank;
348 tcg_gen_extr_i64_i32(cpu_fregs[reg + 1], cpu_fregs[reg], t);
349}
350
351#define B3_0 (ctx->opcode & 0xf)
352#define B6_4 ((ctx->opcode >> 4) & 0x7)
353#define B7_4 ((ctx->opcode >> 4) & 0xf)
354#define B7_0 (ctx->opcode & 0xff)
355#define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
356#define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
357 (ctx->opcode & 0xfff))
358#define B11_8 ((ctx->opcode >> 8) & 0xf)
359#define B15_12 ((ctx->opcode >> 12) & 0xf)
360
361#define REG(x) cpu_gregs[(x) ^ ctx->gbank]
362#define ALTREG(x) cpu_gregs[(x) ^ ctx->gbank ^ 0x10]
363#define FREG(x) cpu_fregs[(x) ^ ctx->fbank]
364
365#define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
366
367#define CHECK_NOT_DELAY_SLOT \
368 if (ctx->envflags & DELAY_SLOT_MASK) { \
369 goto do_illegal_slot; \
370 }
371
372#define CHECK_PRIVILEGED \
373 if (IS_USER(ctx)) { \
374 goto do_illegal; \
375 }
376
377#define CHECK_FPU_ENABLED \
378 if (ctx->tbflags & (1u << SR_FD)) { \
379 goto do_fpu_disabled; \
380 }
381
382#define CHECK_FPSCR_PR_0 \
383 if (ctx->tbflags & FPSCR_PR) { \
384 goto do_illegal; \
385 }
386
387#define CHECK_FPSCR_PR_1 \
388 if (!(ctx->tbflags & FPSCR_PR)) { \
389 goto do_illegal; \
390 }
391
392#define CHECK_SH4A \
393 if (!(ctx->features & SH_FEATURE_SH4A)) { \
394 goto do_illegal; \
395 }
396
397static void _decode_opc(DisasContext * ctx)
398{
399 /* This code tries to make movcal emulation sufficiently
400 accurate for Linux purposes. This instruction writes
401 memory, and prior to that, always allocates a cache line.
402 It is used in two contexts:
403 - in memcpy, where data is copied in blocks, the first write
404 of to a block uses movca.l for performance.
405 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
406 to flush the cache. Here, the data written by movcal.l is never
407 written to memory, and the data written is just bogus.
408
409 To simulate this, we simulate movcal.l, we store the value to memory,
410 but we also remember the previous content. If we see ocbi, we check
411 if movcal.l for that address was done previously. If so, the write should
412 not have hit the memory, so we restore the previous content.
413 When we see an instruction that is neither movca.l
414 nor ocbi, the previous content is discarded.
415
416 To optimize, we only try to flush stores when we're at the start of
417 TB, or if we already saw movca.l in this TB and did not flush stores
418 yet. */
419 if (ctx->has_movcal)
420 {
421 int opcode = ctx->opcode & 0xf0ff;
422 if (opcode != 0x0093 /* ocbi */
423 && opcode != 0x00c3 /* movca.l */)
424 {
425 gen_helper_discard_movcal_backup(cpu_env);
426 ctx->has_movcal = 0;
427 }
428 }
429
430#if 0
431 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
432#endif
433
434 switch (ctx->opcode) {
435 case 0x0019: /* div0u */
436 tcg_gen_movi_i32(cpu_sr_m, 0);
437 tcg_gen_movi_i32(cpu_sr_q, 0);
438 tcg_gen_movi_i32(cpu_sr_t, 0);
439 return;
440 case 0x000b: /* rts */
441 CHECK_NOT_DELAY_SLOT
442 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
443 ctx->envflags |= DELAY_SLOT;
444 ctx->delayed_pc = (uint32_t) - 1;
445 return;
446 case 0x0028: /* clrmac */
447 tcg_gen_movi_i32(cpu_mach, 0);
448 tcg_gen_movi_i32(cpu_macl, 0);
449 return;
450 case 0x0048: /* clrs */
451 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
452 return;
453 case 0x0008: /* clrt */
454 tcg_gen_movi_i32(cpu_sr_t, 0);
455 return;
456 case 0x0038: /* ldtlb */
457 CHECK_PRIVILEGED
458 gen_helper_ldtlb(cpu_env);
459 return;
460 case 0x002b: /* rte */
461 CHECK_PRIVILEGED
462 CHECK_NOT_DELAY_SLOT
463 gen_write_sr(cpu_ssr);
464 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
465 ctx->envflags |= DELAY_SLOT_RTE;
466 ctx->delayed_pc = (uint32_t) - 1;
467 ctx->bstate = BS_STOP;
468 return;
469 case 0x0058: /* sets */
470 tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
471 return;
472 case 0x0018: /* sett */
473 tcg_gen_movi_i32(cpu_sr_t, 1);
474 return;
475 case 0xfbfd: /* frchg */
476 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
477 ctx->bstate = BS_STOP;
478 return;
479 case 0xf3fd: /* fschg */
480 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
481 ctx->bstate = BS_STOP;
482 return;
483 case 0xf7fd: /* fpchg */
484 CHECK_SH4A
485 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_PR);
486 ctx->bstate = BS_STOP;
487 return;
488 case 0x0009: /* nop */
489 return;
490 case 0x001b: /* sleep */
491 CHECK_PRIVILEGED
492 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
493 gen_helper_sleep(cpu_env);
494 return;
495 }
496
497 switch (ctx->opcode & 0xf000) {
498 case 0x1000: /* mov.l Rm,@(disp,Rn) */
499 {
500 TCGv addr = tcg_temp_new();
501 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
502 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
503 tcg_temp_free(addr);
504 }
505 return;
506 case 0x5000: /* mov.l @(disp,Rm),Rn */
507 {
508 TCGv addr = tcg_temp_new();
509 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
510 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
511 tcg_temp_free(addr);
512 }
513 return;
514 case 0xe000: /* mov #imm,Rn */
515#ifdef CONFIG_USER_ONLY
516 /* Detect the start of a gUSA region. If so, update envflags
517 and end the TB. This will allow us to see the end of the
518 region (stored in R0) in the next TB. */
519 if (B11_8 == 15 && B7_0s < 0 && parallel_cpus) {
520 ctx->envflags = deposit32(ctx->envflags, GUSA_SHIFT, 8, B7_0s);
521 ctx->bstate = BS_STOP;
522 }
523#endif
524 tcg_gen_movi_i32(REG(B11_8), B7_0s);
525 return;
526 case 0x9000: /* mov.w @(disp,PC),Rn */
527 {
528 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
529 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
530 tcg_temp_free(addr);
531 }
532 return;
533 case 0xd000: /* mov.l @(disp,PC),Rn */
534 {
535 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
536 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
537 tcg_temp_free(addr);
538 }
539 return;
540 case 0x7000: /* add #imm,Rn */
541 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
542 return;
543 case 0xa000: /* bra disp */
544 CHECK_NOT_DELAY_SLOT
545 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
546 ctx->envflags |= DELAY_SLOT;
547 return;
548 case 0xb000: /* bsr disp */
549 CHECK_NOT_DELAY_SLOT
550 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
551 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
552 ctx->envflags |= DELAY_SLOT;
553 return;
554 }
555
556 switch (ctx->opcode & 0xf00f) {
557 case 0x6003: /* mov Rm,Rn */
558 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
559 return;
560 case 0x2000: /* mov.b Rm,@Rn */
561 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
562 return;
563 case 0x2001: /* mov.w Rm,@Rn */
564 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
565 return;
566 case 0x2002: /* mov.l Rm,@Rn */
567 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
568 return;
569 case 0x6000: /* mov.b @Rm,Rn */
570 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
571 return;
572 case 0x6001: /* mov.w @Rm,Rn */
573 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
574 return;
575 case 0x6002: /* mov.l @Rm,Rn */
576 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
577 return;
578 case 0x2004: /* mov.b Rm,@-Rn */
579 {
580 TCGv addr = tcg_temp_new();
581 tcg_gen_subi_i32(addr, REG(B11_8), 1);
582 /* might cause re-execution */
583 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
584 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
585 tcg_temp_free(addr);
586 }
587 return;
588 case 0x2005: /* mov.w Rm,@-Rn */
589 {
590 TCGv addr = tcg_temp_new();
591 tcg_gen_subi_i32(addr, REG(B11_8), 2);
592 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
593 tcg_gen_mov_i32(REG(B11_8), addr);
594 tcg_temp_free(addr);
595 }
596 return;
597 case 0x2006: /* mov.l Rm,@-Rn */
598 {
599 TCGv addr = tcg_temp_new();
600 tcg_gen_subi_i32(addr, REG(B11_8), 4);
601 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
602 tcg_gen_mov_i32(REG(B11_8), addr);
603 }
604 return;
605 case 0x6004: /* mov.b @Rm+,Rn */
606 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
607 if ( B11_8 != B7_4 )
608 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
609 return;
610 case 0x6005: /* mov.w @Rm+,Rn */
611 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
612 if ( B11_8 != B7_4 )
613 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
614 return;
615 case 0x6006: /* mov.l @Rm+,Rn */
616 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
617 if ( B11_8 != B7_4 )
618 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
619 return;
620 case 0x0004: /* mov.b Rm,@(R0,Rn) */
621 {
622 TCGv addr = tcg_temp_new();
623 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
624 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
625 tcg_temp_free(addr);
626 }
627 return;
628 case 0x0005: /* mov.w Rm,@(R0,Rn) */
629 {
630 TCGv addr = tcg_temp_new();
631 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
632 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
633 tcg_temp_free(addr);
634 }
635 return;
636 case 0x0006: /* mov.l Rm,@(R0,Rn) */
637 {
638 TCGv addr = tcg_temp_new();
639 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
640 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
641 tcg_temp_free(addr);
642 }
643 return;
644 case 0x000c: /* mov.b @(R0,Rm),Rn */
645 {
646 TCGv addr = tcg_temp_new();
647 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
648 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
649 tcg_temp_free(addr);
650 }
651 return;
652 case 0x000d: /* mov.w @(R0,Rm),Rn */
653 {
654 TCGv addr = tcg_temp_new();
655 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
656 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
657 tcg_temp_free(addr);
658 }
659 return;
660 case 0x000e: /* mov.l @(R0,Rm),Rn */
661 {
662 TCGv addr = tcg_temp_new();
663 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
664 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
665 tcg_temp_free(addr);
666 }
667 return;
668 case 0x6008: /* swap.b Rm,Rn */
669 {
670 TCGv low = tcg_temp_new();;
671 tcg_gen_ext16u_i32(low, REG(B7_4));
672 tcg_gen_bswap16_i32(low, low);
673 tcg_gen_deposit_i32(REG(B11_8), REG(B7_4), low, 0, 16);
674 tcg_temp_free(low);
675 }
676 return;
677 case 0x6009: /* swap.w Rm,Rn */
678 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
679 return;
680 case 0x200d: /* xtrct Rm,Rn */
681 {
682 TCGv high, low;
683 high = tcg_temp_new();
684 tcg_gen_shli_i32(high, REG(B7_4), 16);
685 low = tcg_temp_new();
686 tcg_gen_shri_i32(low, REG(B11_8), 16);
687 tcg_gen_or_i32(REG(B11_8), high, low);
688 tcg_temp_free(low);
689 tcg_temp_free(high);
690 }
691 return;
692 case 0x300c: /* add Rm,Rn */
693 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
694 return;
695 case 0x300e: /* addc Rm,Rn */
696 {
697 TCGv t0, t1;
698 t0 = tcg_const_tl(0);
699 t1 = tcg_temp_new();
700 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
701 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
702 REG(B11_8), t0, t1, cpu_sr_t);
703 tcg_temp_free(t0);
704 tcg_temp_free(t1);
705 }
706 return;
707 case 0x300f: /* addv Rm,Rn */
708 {
709 TCGv t0, t1, t2;
710 t0 = tcg_temp_new();
711 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
712 t1 = tcg_temp_new();
713 tcg_gen_xor_i32(t1, t0, REG(B11_8));
714 t2 = tcg_temp_new();
715 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
716 tcg_gen_andc_i32(cpu_sr_t, t1, t2);
717 tcg_temp_free(t2);
718 tcg_gen_shri_i32(cpu_sr_t, cpu_sr_t, 31);
719 tcg_temp_free(t1);
720 tcg_gen_mov_i32(REG(B7_4), t0);
721 tcg_temp_free(t0);
722 }
723 return;
724 case 0x2009: /* and Rm,Rn */
725 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
726 return;
727 case 0x3000: /* cmp/eq Rm,Rn */
728 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), REG(B7_4));
729 return;
730 case 0x3003: /* cmp/ge Rm,Rn */
731 tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), REG(B7_4));
732 return;
733 case 0x3007: /* cmp/gt Rm,Rn */
734 tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), REG(B7_4));
735 return;
736 case 0x3006: /* cmp/hi Rm,Rn */
737 tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_t, REG(B11_8), REG(B7_4));
738 return;
739 case 0x3002: /* cmp/hs Rm,Rn */
740 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_t, REG(B11_8), REG(B7_4));
741 return;
742 case 0x200c: /* cmp/str Rm,Rn */
743 {
744 TCGv cmp1 = tcg_temp_new();
745 TCGv cmp2 = tcg_temp_new();
746 tcg_gen_xor_i32(cmp2, REG(B7_4), REG(B11_8));
747 tcg_gen_subi_i32(cmp1, cmp2, 0x01010101);
748 tcg_gen_andc_i32(cmp1, cmp1, cmp2);
749 tcg_gen_andi_i32(cmp1, cmp1, 0x80808080);
750 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_t, cmp1, 0);
751 tcg_temp_free(cmp2);
752 tcg_temp_free(cmp1);
753 }
754 return;
755 case 0x2007: /* div0s Rm,Rn */
756 tcg_gen_shri_i32(cpu_sr_q, REG(B11_8), 31); /* SR_Q */
757 tcg_gen_shri_i32(cpu_sr_m, REG(B7_4), 31); /* SR_M */
758 tcg_gen_xor_i32(cpu_sr_t, cpu_sr_q, cpu_sr_m); /* SR_T */
759 return;
760 case 0x3004: /* div1 Rm,Rn */
761 {
762 TCGv t0 = tcg_temp_new();
763 TCGv t1 = tcg_temp_new();
764 TCGv t2 = tcg_temp_new();
765 TCGv zero = tcg_const_i32(0);
766
767 /* shift left arg1, saving the bit being pushed out and inserting
768 T on the right */
769 tcg_gen_shri_i32(t0, REG(B11_8), 31);
770 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
771 tcg_gen_or_i32(REG(B11_8), REG(B11_8), cpu_sr_t);
772
773 /* Add or subtract arg0 from arg1 depending if Q == M. To avoid
774 using 64-bit temps, we compute arg0's high part from q ^ m, so
775 that it is 0x00000000 when adding the value or 0xffffffff when
776 subtracting it. */
777 tcg_gen_xor_i32(t1, cpu_sr_q, cpu_sr_m);
778 tcg_gen_subi_i32(t1, t1, 1);
779 tcg_gen_neg_i32(t2, REG(B7_4));
780 tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, zero, REG(B7_4), t2);
781 tcg_gen_add2_i32(REG(B11_8), t1, REG(B11_8), zero, t2, t1);
782
783 /* compute T and Q depending on carry */
784 tcg_gen_andi_i32(t1, t1, 1);
785 tcg_gen_xor_i32(t1, t1, t0);
786 tcg_gen_xori_i32(cpu_sr_t, t1, 1);
787 tcg_gen_xor_i32(cpu_sr_q, cpu_sr_m, t1);
788
789 tcg_temp_free(zero);
790 tcg_temp_free(t2);
791 tcg_temp_free(t1);
792 tcg_temp_free(t0);
793 }
794 return;
795 case 0x300d: /* dmuls.l Rm,Rn */
796 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
797 return;
798 case 0x3005: /* dmulu.l Rm,Rn */
799 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
800 return;
801 case 0x600e: /* exts.b Rm,Rn */
802 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
803 return;
804 case 0x600f: /* exts.w Rm,Rn */
805 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
806 return;
807 case 0x600c: /* extu.b Rm,Rn */
808 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
809 return;
810 case 0x600d: /* extu.w Rm,Rn */
811 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
812 return;
813 case 0x000f: /* mac.l @Rm+,@Rn+ */
814 {
815 TCGv arg0, arg1;
816 arg0 = tcg_temp_new();
817 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
818 arg1 = tcg_temp_new();
819 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
820 gen_helper_macl(cpu_env, arg0, arg1);
821 tcg_temp_free(arg1);
822 tcg_temp_free(arg0);
823 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
824 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
825 }
826 return;
827 case 0x400f: /* mac.w @Rm+,@Rn+ */
828 {
829 TCGv arg0, arg1;
830 arg0 = tcg_temp_new();
831 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
832 arg1 = tcg_temp_new();
833 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
834 gen_helper_macw(cpu_env, arg0, arg1);
835 tcg_temp_free(arg1);
836 tcg_temp_free(arg0);
837 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
838 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
839 }
840 return;
841 case 0x0007: /* mul.l Rm,Rn */
842 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
843 return;
844 case 0x200f: /* muls.w Rm,Rn */
845 {
846 TCGv arg0, arg1;
847 arg0 = tcg_temp_new();
848 tcg_gen_ext16s_i32(arg0, REG(B7_4));
849 arg1 = tcg_temp_new();
850 tcg_gen_ext16s_i32(arg1, REG(B11_8));
851 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
852 tcg_temp_free(arg1);
853 tcg_temp_free(arg0);
854 }
855 return;
856 case 0x200e: /* mulu.w Rm,Rn */
857 {
858 TCGv arg0, arg1;
859 arg0 = tcg_temp_new();
860 tcg_gen_ext16u_i32(arg0, REG(B7_4));
861 arg1 = tcg_temp_new();
862 tcg_gen_ext16u_i32(arg1, REG(B11_8));
863 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
864 tcg_temp_free(arg1);
865 tcg_temp_free(arg0);
866 }
867 return;
868 case 0x600b: /* neg Rm,Rn */
869 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
870 return;
871 case 0x600a: /* negc Rm,Rn */
872 {
873 TCGv t0 = tcg_const_i32(0);
874 tcg_gen_add2_i32(REG(B11_8), cpu_sr_t,
875 REG(B7_4), t0, cpu_sr_t, t0);
876 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
877 t0, t0, REG(B11_8), cpu_sr_t);
878 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
879 tcg_temp_free(t0);
880 }
881 return;
882 case 0x6007: /* not Rm,Rn */
883 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
884 return;
885 case 0x200b: /* or Rm,Rn */
886 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
887 return;
888 case 0x400c: /* shad Rm,Rn */
889 {
890 TCGv t0 = tcg_temp_new();
891 TCGv t1 = tcg_temp_new();
892 TCGv t2 = tcg_temp_new();
893
894 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
895
896 /* positive case: shift to the left */
897 tcg_gen_shl_i32(t1, REG(B11_8), t0);
898
899 /* negative case: shift to the right in two steps to
900 correctly handle the -32 case */
901 tcg_gen_xori_i32(t0, t0, 0x1f);
902 tcg_gen_sar_i32(t2, REG(B11_8), t0);
903 tcg_gen_sari_i32(t2, t2, 1);
904
905 /* select between the two cases */
906 tcg_gen_movi_i32(t0, 0);
907 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
908
909 tcg_temp_free(t0);
910 tcg_temp_free(t1);
911 tcg_temp_free(t2);
912 }
913 return;
914 case 0x400d: /* shld Rm,Rn */
915 {
916 TCGv t0 = tcg_temp_new();
917 TCGv t1 = tcg_temp_new();
918 TCGv t2 = tcg_temp_new();
919
920 tcg_gen_andi_i32(t0, REG(B7_4), 0x1f);
921
922 /* positive case: shift to the left */
923 tcg_gen_shl_i32(t1, REG(B11_8), t0);
924
925 /* negative case: shift to the right in two steps to
926 correctly handle the -32 case */
927 tcg_gen_xori_i32(t0, t0, 0x1f);
928 tcg_gen_shr_i32(t2, REG(B11_8), t0);
929 tcg_gen_shri_i32(t2, t2, 1);
930
931 /* select between the two cases */
932 tcg_gen_movi_i32(t0, 0);
933 tcg_gen_movcond_i32(TCG_COND_GE, REG(B11_8), REG(B7_4), t0, t1, t2);
934
935 tcg_temp_free(t0);
936 tcg_temp_free(t1);
937 tcg_temp_free(t2);
938 }
939 return;
940 case 0x3008: /* sub Rm,Rn */
941 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
942 return;
943 case 0x300a: /* subc Rm,Rn */
944 {
945 TCGv t0, t1;
946 t0 = tcg_const_tl(0);
947 t1 = tcg_temp_new();
948 tcg_gen_add2_i32(t1, cpu_sr_t, cpu_sr_t, t0, REG(B7_4), t0);
949 tcg_gen_sub2_i32(REG(B11_8), cpu_sr_t,
950 REG(B11_8), t0, t1, cpu_sr_t);
951 tcg_gen_andi_i32(cpu_sr_t, cpu_sr_t, 1);
952 tcg_temp_free(t0);
953 tcg_temp_free(t1);
954 }
955 return;
956 case 0x300b: /* subv Rm,Rn */
957 {
958 TCGv t0, t1, t2;
959 t0 = tcg_temp_new();
960 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
961 t1 = tcg_temp_new();
962 tcg_gen_xor_i32(t1, t0, REG(B7_4));
963 t2 = tcg_temp_new();
964 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
965 tcg_gen_and_i32(t1, t1, t2);
966 tcg_temp_free(t2);
967 tcg_gen_shri_i32(cpu_sr_t, t1, 31);
968 tcg_temp_free(t1);
969 tcg_gen_mov_i32(REG(B11_8), t0);
970 tcg_temp_free(t0);
971 }
972 return;
973 case 0x2008: /* tst Rm,Rn */
974 {
975 TCGv val = tcg_temp_new();
976 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
977 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
978 tcg_temp_free(val);
979 }
980 return;
981 case 0x200a: /* xor Rm,Rn */
982 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
983 return;
984 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
985 CHECK_FPU_ENABLED
986 if (ctx->tbflags & FPSCR_SZ) {
987 int xsrc = XHACK(B7_4);
988 int xdst = XHACK(B11_8);
989 tcg_gen_mov_i32(FREG(xdst), FREG(xsrc));
990 tcg_gen_mov_i32(FREG(xdst + 1), FREG(xsrc + 1));
991 } else {
992 tcg_gen_mov_i32(FREG(B11_8), FREG(B7_4));
993 }
994 return;
995 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
996 CHECK_FPU_ENABLED
997 if (ctx->tbflags & FPSCR_SZ) {
998 TCGv_i64 fp = tcg_temp_new_i64();
999 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1000 tcg_gen_qemu_st_i64(fp, REG(B11_8), ctx->memidx, MO_TEQ);
1001 tcg_temp_free_i64(fp);
1002 } else {
1003 tcg_gen_qemu_st_i32(FREG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
1004 }
1005 return;
1006 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1007 CHECK_FPU_ENABLED
1008 if (ctx->tbflags & FPSCR_SZ) {
1009 TCGv_i64 fp = tcg_temp_new_i64();
1010 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1011 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1012 tcg_temp_free_i64(fp);
1013 } else {
1014 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1015 }
1016 return;
1017 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1018 CHECK_FPU_ENABLED
1019 if (ctx->tbflags & FPSCR_SZ) {
1020 TCGv_i64 fp = tcg_temp_new_i64();
1021 tcg_gen_qemu_ld_i64(fp, REG(B7_4), ctx->memidx, MO_TEQ);
1022 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1023 tcg_temp_free_i64(fp);
1024 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1025 } else {
1026 tcg_gen_qemu_ld_i32(FREG(B11_8), REG(B7_4), ctx->memidx, MO_TEUL);
1027 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1028 }
1029 return;
1030 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1031 CHECK_FPU_ENABLED
1032 {
1033 TCGv addr = tcg_temp_new_i32();
1034 if (ctx->tbflags & FPSCR_SZ) {
1035 TCGv_i64 fp = tcg_temp_new_i64();
1036 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1037 tcg_gen_subi_i32(addr, REG(B11_8), 8);
1038 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1039 tcg_temp_free_i64(fp);
1040 } else {
1041 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1042 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1043 }
1044 tcg_gen_mov_i32(REG(B11_8), addr);
1045 tcg_temp_free(addr);
1046 }
1047 return;
1048 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1049 CHECK_FPU_ENABLED
1050 {
1051 TCGv addr = tcg_temp_new_i32();
1052 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1053 if (ctx->tbflags & FPSCR_SZ) {
1054 TCGv_i64 fp = tcg_temp_new_i64();
1055 tcg_gen_qemu_ld_i64(fp, addr, ctx->memidx, MO_TEQ);
1056 gen_store_fpr64(ctx, fp, XHACK(B11_8));
1057 tcg_temp_free_i64(fp);
1058 } else {
1059 tcg_gen_qemu_ld_i32(FREG(B11_8), addr, ctx->memidx, MO_TEUL);
1060 }
1061 tcg_temp_free(addr);
1062 }
1063 return;
1064 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1065 CHECK_FPU_ENABLED
1066 {
1067 TCGv addr = tcg_temp_new();
1068 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1069 if (ctx->tbflags & FPSCR_SZ) {
1070 TCGv_i64 fp = tcg_temp_new_i64();
1071 gen_load_fpr64(ctx, fp, XHACK(B7_4));
1072 tcg_gen_qemu_st_i64(fp, addr, ctx->memidx, MO_TEQ);
1073 tcg_temp_free_i64(fp);
1074 } else {
1075 tcg_gen_qemu_st_i32(FREG(B7_4), addr, ctx->memidx, MO_TEUL);
1076 }
1077 tcg_temp_free(addr);
1078 }
1079 return;
1080 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1081 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1082 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1085 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1086 {
1087 CHECK_FPU_ENABLED
1088 if (ctx->tbflags & FPSCR_PR) {
1089 TCGv_i64 fp0, fp1;
1090
1091 if (ctx->opcode & 0x0110) {
1092 goto do_illegal;
1093 }
1094 fp0 = tcg_temp_new_i64();
1095 fp1 = tcg_temp_new_i64();
1096 gen_load_fpr64(ctx, fp0, B11_8);
1097 gen_load_fpr64(ctx, fp1, B7_4);
1098 switch (ctx->opcode & 0xf00f) {
1099 case 0xf000: /* fadd Rm,Rn */
1100 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1101 break;
1102 case 0xf001: /* fsub Rm,Rn */
1103 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1104 break;
1105 case 0xf002: /* fmul Rm,Rn */
1106 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1107 break;
1108 case 0xf003: /* fdiv Rm,Rn */
1109 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1110 break;
1111 case 0xf004: /* fcmp/eq Rm,Rn */
1112 gen_helper_fcmp_eq_DT(cpu_sr_t, cpu_env, fp0, fp1);
1113 return;
1114 case 0xf005: /* fcmp/gt Rm,Rn */
1115 gen_helper_fcmp_gt_DT(cpu_sr_t, cpu_env, fp0, fp1);
1116 return;
1117 }
1118 gen_store_fpr64(ctx, fp0, B11_8);
1119 tcg_temp_free_i64(fp0);
1120 tcg_temp_free_i64(fp1);
1121 } else {
1122 switch (ctx->opcode & 0xf00f) {
1123 case 0xf000: /* fadd Rm,Rn */
1124 gen_helper_fadd_FT(FREG(B11_8), cpu_env,
1125 FREG(B11_8), FREG(B7_4));
1126 break;
1127 case 0xf001: /* fsub Rm,Rn */
1128 gen_helper_fsub_FT(FREG(B11_8), cpu_env,
1129 FREG(B11_8), FREG(B7_4));
1130 break;
1131 case 0xf002: /* fmul Rm,Rn */
1132 gen_helper_fmul_FT(FREG(B11_8), cpu_env,
1133 FREG(B11_8), FREG(B7_4));
1134 break;
1135 case 0xf003: /* fdiv Rm,Rn */
1136 gen_helper_fdiv_FT(FREG(B11_8), cpu_env,
1137 FREG(B11_8), FREG(B7_4));
1138 break;
1139 case 0xf004: /* fcmp/eq Rm,Rn */
1140 gen_helper_fcmp_eq_FT(cpu_sr_t, cpu_env,
1141 FREG(B11_8), FREG(B7_4));
1142 return;
1143 case 0xf005: /* fcmp/gt Rm,Rn */
1144 gen_helper_fcmp_gt_FT(cpu_sr_t, cpu_env,
1145 FREG(B11_8), FREG(B7_4));
1146 return;
1147 }
1148 }
1149 }
1150 return;
1151 case 0xf00e: /* fmac FR0,RM,Rn */
1152 CHECK_FPU_ENABLED
1153 CHECK_FPSCR_PR_0
1154 gen_helper_fmac_FT(FREG(B11_8), cpu_env,
1155 FREG(0), FREG(B7_4), FREG(B11_8));
1156 return;
1157 }
1158
1159 switch (ctx->opcode & 0xff00) {
1160 case 0xc900: /* and #imm,R0 */
1161 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1162 return;
1163 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1164 {
1165 TCGv addr, val;
1166 addr = tcg_temp_new();
1167 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1168 val = tcg_temp_new();
1169 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1170 tcg_gen_andi_i32(val, val, B7_0);
1171 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1172 tcg_temp_free(val);
1173 tcg_temp_free(addr);
1174 }
1175 return;
1176 case 0x8b00: /* bf label */
1177 CHECK_NOT_DELAY_SLOT
1178 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, false);
1179 return;
1180 case 0x8f00: /* bf/s label */
1181 CHECK_NOT_DELAY_SLOT
1182 tcg_gen_xori_i32(cpu_delayed_cond, cpu_sr_t, 1);
1183 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1184 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1185 return;
1186 case 0x8900: /* bt label */
1187 CHECK_NOT_DELAY_SLOT
1188 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2, true);
1189 return;
1190 case 0x8d00: /* bt/s label */
1191 CHECK_NOT_DELAY_SLOT
1192 tcg_gen_mov_i32(cpu_delayed_cond, cpu_sr_t);
1193 ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2;
1194 ctx->envflags |= DELAY_SLOT_CONDITIONAL;
1195 return;
1196 case 0x8800: /* cmp/eq #imm,R0 */
1197 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(0), B7_0s);
1198 return;
1199 case 0xc400: /* mov.b @(disp,GBR),R0 */
1200 {
1201 TCGv addr = tcg_temp_new();
1202 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1203 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1204 tcg_temp_free(addr);
1205 }
1206 return;
1207 case 0xc500: /* mov.w @(disp,GBR),R0 */
1208 {
1209 TCGv addr = tcg_temp_new();
1210 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1211 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1212 tcg_temp_free(addr);
1213 }
1214 return;
1215 case 0xc600: /* mov.l @(disp,GBR),R0 */
1216 {
1217 TCGv addr = tcg_temp_new();
1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1219 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1220 tcg_temp_free(addr);
1221 }
1222 return;
1223 case 0xc000: /* mov.b R0,@(disp,GBR) */
1224 {
1225 TCGv addr = tcg_temp_new();
1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1227 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1228 tcg_temp_free(addr);
1229 }
1230 return;
1231 case 0xc100: /* mov.w R0,@(disp,GBR) */
1232 {
1233 TCGv addr = tcg_temp_new();
1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1235 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1236 tcg_temp_free(addr);
1237 }
1238 return;
1239 case 0xc200: /* mov.l R0,@(disp,GBR) */
1240 {
1241 TCGv addr = tcg_temp_new();
1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1243 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1244 tcg_temp_free(addr);
1245 }
1246 return;
1247 case 0x8000: /* mov.b R0,@(disp,Rn) */
1248 {
1249 TCGv addr = tcg_temp_new();
1250 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1251 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1252 tcg_temp_free(addr);
1253 }
1254 return;
1255 case 0x8100: /* mov.w R0,@(disp,Rn) */
1256 {
1257 TCGv addr = tcg_temp_new();
1258 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1259 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1260 tcg_temp_free(addr);
1261 }
1262 return;
1263 case 0x8400: /* mov.b @(disp,Rn),R0 */
1264 {
1265 TCGv addr = tcg_temp_new();
1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1267 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1268 tcg_temp_free(addr);
1269 }
1270 return;
1271 case 0x8500: /* mov.w @(disp,Rn),R0 */
1272 {
1273 TCGv addr = tcg_temp_new();
1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1275 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1276 tcg_temp_free(addr);
1277 }
1278 return;
1279 case 0xc700: /* mova @(disp,PC),R0 */
1280 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1281 return;
1282 case 0xcb00: /* or #imm,R0 */
1283 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1284 return;
1285 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1286 {
1287 TCGv addr, val;
1288 addr = tcg_temp_new();
1289 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1290 val = tcg_temp_new();
1291 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1292 tcg_gen_ori_i32(val, val, B7_0);
1293 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1294 tcg_temp_free(val);
1295 tcg_temp_free(addr);
1296 }
1297 return;
1298 case 0xc300: /* trapa #imm */
1299 {
1300 TCGv imm;
1301 CHECK_NOT_DELAY_SLOT
1302 gen_save_cpu_state(ctx, true);
1303 imm = tcg_const_i32(B7_0);
1304 gen_helper_trapa(cpu_env, imm);
1305 tcg_temp_free(imm);
1306 ctx->bstate = BS_EXCP;
1307 }
1308 return;
1309 case 0xc800: /* tst #imm,R0 */
1310 {
1311 TCGv val = tcg_temp_new();
1312 tcg_gen_andi_i32(val, REG(0), B7_0);
1313 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1314 tcg_temp_free(val);
1315 }
1316 return;
1317 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1318 {
1319 TCGv val = tcg_temp_new();
1320 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1321 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1322 tcg_gen_andi_i32(val, val, B7_0);
1323 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1324 tcg_temp_free(val);
1325 }
1326 return;
1327 case 0xca00: /* xor #imm,R0 */
1328 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1329 return;
1330 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1331 {
1332 TCGv addr, val;
1333 addr = tcg_temp_new();
1334 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1335 val = tcg_temp_new();
1336 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1337 tcg_gen_xori_i32(val, val, B7_0);
1338 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1339 tcg_temp_free(val);
1340 tcg_temp_free(addr);
1341 }
1342 return;
1343 }
1344
1345 switch (ctx->opcode & 0xf08f) {
1346 case 0x408e: /* ldc Rm,Rn_BANK */
1347 CHECK_PRIVILEGED
1348 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1349 return;
1350 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1351 CHECK_PRIVILEGED
1352 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1353 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1354 return;
1355 case 0x0082: /* stc Rm_BANK,Rn */
1356 CHECK_PRIVILEGED
1357 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1358 return;
1359 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1360 CHECK_PRIVILEGED
1361 {
1362 TCGv addr = tcg_temp_new();
1363 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1364 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1365 tcg_gen_mov_i32(REG(B11_8), addr);
1366 tcg_temp_free(addr);
1367 }
1368 return;
1369 }
1370
1371 switch (ctx->opcode & 0xf0ff) {
1372 case 0x0023: /* braf Rn */
1373 CHECK_NOT_DELAY_SLOT
1374 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1375 ctx->envflags |= DELAY_SLOT;
1376 ctx->delayed_pc = (uint32_t) - 1;
1377 return;
1378 case 0x0003: /* bsrf Rn */
1379 CHECK_NOT_DELAY_SLOT
1380 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1381 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1382 ctx->envflags |= DELAY_SLOT;
1383 ctx->delayed_pc = (uint32_t) - 1;
1384 return;
1385 case 0x4015: /* cmp/pl Rn */
1386 tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_t, REG(B11_8), 0);
1387 return;
1388 case 0x4011: /* cmp/pz Rn */
1389 tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_t, REG(B11_8), 0);
1390 return;
1391 case 0x4010: /* dt Rn */
1392 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1393 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, REG(B11_8), 0);
1394 return;
1395 case 0x402b: /* jmp @Rn */
1396 CHECK_NOT_DELAY_SLOT
1397 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1398 ctx->envflags |= DELAY_SLOT;
1399 ctx->delayed_pc = (uint32_t) - 1;
1400 return;
1401 case 0x400b: /* jsr @Rn */
1402 CHECK_NOT_DELAY_SLOT
1403 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1404 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1405 ctx->envflags |= DELAY_SLOT;
1406 ctx->delayed_pc = (uint32_t) - 1;
1407 return;
1408 case 0x400e: /* ldc Rm,SR */
1409 CHECK_PRIVILEGED
1410 {
1411 TCGv val = tcg_temp_new();
1412 tcg_gen_andi_i32(val, REG(B11_8), 0x700083f3);
1413 gen_write_sr(val);
1414 tcg_temp_free(val);
1415 ctx->bstate = BS_STOP;
1416 }
1417 return;
1418 case 0x4007: /* ldc.l @Rm+,SR */
1419 CHECK_PRIVILEGED
1420 {
1421 TCGv val = tcg_temp_new();
1422 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1423 tcg_gen_andi_i32(val, val, 0x700083f3);
1424 gen_write_sr(val);
1425 tcg_temp_free(val);
1426 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1427 ctx->bstate = BS_STOP;
1428 }
1429 return;
1430 case 0x0002: /* stc SR,Rn */
1431 CHECK_PRIVILEGED
1432 gen_read_sr(REG(B11_8));
1433 return;
1434 case 0x4003: /* stc SR,@-Rn */
1435 CHECK_PRIVILEGED
1436 {
1437 TCGv addr = tcg_temp_new();
1438 TCGv val = tcg_temp_new();
1439 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1440 gen_read_sr(val);
1441 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1442 tcg_gen_mov_i32(REG(B11_8), addr);
1443 tcg_temp_free(val);
1444 tcg_temp_free(addr);
1445 }
1446 return;
1447#define LD(reg,ldnum,ldpnum,prechk) \
1448 case ldnum: \
1449 prechk \
1450 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1451 return; \
1452 case ldpnum: \
1453 prechk \
1454 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1455 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1456 return;
1457#define ST(reg,stnum,stpnum,prechk) \
1458 case stnum: \
1459 prechk \
1460 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1461 return; \
1462 case stpnum: \
1463 prechk \
1464 { \
1465 TCGv addr = tcg_temp_new(); \
1466 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1467 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1468 tcg_gen_mov_i32(REG(B11_8), addr); \
1469 tcg_temp_free(addr); \
1470 } \
1471 return;
1472#define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1473 LD(reg,ldnum,ldpnum,prechk) \
1474 ST(reg,stnum,stpnum,prechk)
1475 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1476 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1477 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1478 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1479 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1480 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED CHECK_SH4A)
1481 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1482 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1483 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1484 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1485 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1486 case 0x406a: /* lds Rm,FPSCR */
1487 CHECK_FPU_ENABLED
1488 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1489 ctx->bstate = BS_STOP;
1490 return;
1491 case 0x4066: /* lds.l @Rm+,FPSCR */
1492 CHECK_FPU_ENABLED
1493 {
1494 TCGv addr = tcg_temp_new();
1495 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1496 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1497 gen_helper_ld_fpscr(cpu_env, addr);
1498 tcg_temp_free(addr);
1499 ctx->bstate = BS_STOP;
1500 }
1501 return;
1502 case 0x006a: /* sts FPSCR,Rn */
1503 CHECK_FPU_ENABLED
1504 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1505 return;
1506 case 0x4062: /* sts FPSCR,@-Rn */
1507 CHECK_FPU_ENABLED
1508 {
1509 TCGv addr, val;
1510 val = tcg_temp_new();
1511 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1512 addr = tcg_temp_new();
1513 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1514 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1515 tcg_gen_mov_i32(REG(B11_8), addr);
1516 tcg_temp_free(addr);
1517 tcg_temp_free(val);
1518 }
1519 return;
1520 case 0x00c3: /* movca.l R0,@Rm */
1521 {
1522 TCGv val = tcg_temp_new();
1523 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1524 gen_helper_movcal(cpu_env, REG(B11_8), val);
1525 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1526 }
1527 ctx->has_movcal = 1;
1528 return;
1529 case 0x40a9: /* movua.l @Rm,R0 */
1530 CHECK_SH4A
1531 /* Load non-boundary-aligned data */
1532 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1533 MO_TEUL | MO_UNALN);
1534 return;
1535 break;
1536 case 0x40e9: /* movua.l @Rm+,R0 */
1537 CHECK_SH4A
1538 /* Load non-boundary-aligned data */
1539 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx,
1540 MO_TEUL | MO_UNALN);
1541 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1542 return;
1543 break;
1544 case 0x0029: /* movt Rn */
1545 tcg_gen_mov_i32(REG(B11_8), cpu_sr_t);
1546 return;
1547 case 0x0073:
1548 /* MOVCO.L
1549 LDST -> T
1550 If (T == 1) R0 -> (Rn)
1551 0 -> LDST
1552 */
1553 CHECK_SH4A
1554 {
1555 TCGLabel *label = gen_new_label();
1556 tcg_gen_mov_i32(cpu_sr_t, cpu_ldst);
1557 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1558 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1559 gen_set_label(label);
1560 tcg_gen_movi_i32(cpu_ldst, 0);
1561 return;
1562 }
1563 case 0x0063:
1564 /* MOVLI.L @Rm,R0
1565 1 -> LDST
1566 (Rm) -> R0
1567 When interrupt/exception
1568 occurred 0 -> LDST
1569 */
1570 CHECK_SH4A
1571 tcg_gen_movi_i32(cpu_ldst, 0);
1572 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1573 tcg_gen_movi_i32(cpu_ldst, 1);
1574 return;
1575 case 0x0093: /* ocbi @Rn */
1576 {
1577 gen_helper_ocbi(cpu_env, REG(B11_8));
1578 }
1579 return;
1580 case 0x00a3: /* ocbp @Rn */
1581 case 0x00b3: /* ocbwb @Rn */
1582 /* These instructions are supposed to do nothing in case of
1583 a cache miss. Given that we only partially emulate caches
1584 it is safe to simply ignore them. */
1585 return;
1586 case 0x0083: /* pref @Rn */
1587 return;
1588 case 0x00d3: /* prefi @Rn */
1589 CHECK_SH4A
1590 return;
1591 case 0x00e3: /* icbi @Rn */
1592 CHECK_SH4A
1593 return;
1594 case 0x00ab: /* synco */
1595 CHECK_SH4A
1596 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
1597 return;
1598 break;
1599 case 0x4024: /* rotcl Rn */
1600 {
1601 TCGv tmp = tcg_temp_new();
1602 tcg_gen_mov_i32(tmp, cpu_sr_t);
1603 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1604 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1605 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1606 tcg_temp_free(tmp);
1607 }
1608 return;
1609 case 0x4025: /* rotcr Rn */
1610 {
1611 TCGv tmp = tcg_temp_new();
1612 tcg_gen_shli_i32(tmp, cpu_sr_t, 31);
1613 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1614 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1615 tcg_gen_or_i32(REG(B11_8), REG(B11_8), tmp);
1616 tcg_temp_free(tmp);
1617 }
1618 return;
1619 case 0x4004: /* rotl Rn */
1620 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1621 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1622 return;
1623 case 0x4005: /* rotr Rn */
1624 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 0);
1625 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1626 return;
1627 case 0x4000: /* shll Rn */
1628 case 0x4020: /* shal Rn */
1629 tcg_gen_shri_i32(cpu_sr_t, REG(B11_8), 31);
1630 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1631 return;
1632 case 0x4021: /* shar Rn */
1633 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1634 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1635 return;
1636 case 0x4001: /* shlr Rn */
1637 tcg_gen_andi_i32(cpu_sr_t, REG(B11_8), 1);
1638 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1639 return;
1640 case 0x4008: /* shll2 Rn */
1641 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1642 return;
1643 case 0x4018: /* shll8 Rn */
1644 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1645 return;
1646 case 0x4028: /* shll16 Rn */
1647 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1648 return;
1649 case 0x4009: /* shlr2 Rn */
1650 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1651 return;
1652 case 0x4019: /* shlr8 Rn */
1653 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1654 return;
1655 case 0x4029: /* shlr16 Rn */
1656 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1657 return;
1658 case 0x401b: /* tas.b @Rn */
1659 {
1660 TCGv val = tcg_const_i32(0x80);
1661 tcg_gen_atomic_fetch_or_i32(val, REG(B11_8), val,
1662 ctx->memidx, MO_UB);
1663 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_t, val, 0);
1664 tcg_temp_free(val);
1665 }
1666 return;
1667 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1668 CHECK_FPU_ENABLED
1669 tcg_gen_mov_i32(FREG(B11_8), cpu_fpul);
1670 return;
1671 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1672 CHECK_FPU_ENABLED
1673 tcg_gen_mov_i32(cpu_fpul, FREG(B11_8));
1674 return;
1675 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1676 CHECK_FPU_ENABLED
1677 if (ctx->tbflags & FPSCR_PR) {
1678 TCGv_i64 fp;
1679 if (ctx->opcode & 0x0100) {
1680 goto do_illegal;
1681 }
1682 fp = tcg_temp_new_i64();
1683 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1684 gen_store_fpr64(ctx, fp, B11_8);
1685 tcg_temp_free_i64(fp);
1686 }
1687 else {
1688 gen_helper_float_FT(FREG(B11_8), cpu_env, cpu_fpul);
1689 }
1690 return;
1691 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1692 CHECK_FPU_ENABLED
1693 if (ctx->tbflags & FPSCR_PR) {
1694 TCGv_i64 fp;
1695 if (ctx->opcode & 0x0100) {
1696 goto do_illegal;
1697 }
1698 fp = tcg_temp_new_i64();
1699 gen_load_fpr64(ctx, fp, B11_8);
1700 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1701 tcg_temp_free_i64(fp);
1702 }
1703 else {
1704 gen_helper_ftrc_FT(cpu_fpul, cpu_env, FREG(B11_8));
1705 }
1706 return;
1707 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1708 CHECK_FPU_ENABLED
1709 tcg_gen_xori_i32(FREG(B11_8), FREG(B11_8), 0x80000000);
1710 return;
1711 case 0xf05d: /* fabs FRn/DRn - FPCSR: Nothing */
1712 CHECK_FPU_ENABLED
1713 tcg_gen_andi_i32(FREG(B11_8), FREG(B11_8), 0x7fffffff);
1714 return;
1715 case 0xf06d: /* fsqrt FRn */
1716 CHECK_FPU_ENABLED
1717 if (ctx->tbflags & FPSCR_PR) {
1718 if (ctx->opcode & 0x0100) {
1719 goto do_illegal;
1720 }
1721 TCGv_i64 fp = tcg_temp_new_i64();
1722 gen_load_fpr64(ctx, fp, B11_8);
1723 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1724 gen_store_fpr64(ctx, fp, B11_8);
1725 tcg_temp_free_i64(fp);
1726 } else {
1727 gen_helper_fsqrt_FT(FREG(B11_8), cpu_env, FREG(B11_8));
1728 }
1729 return;
1730 case 0xf07d: /* fsrra FRn */
1731 CHECK_FPU_ENABLED
1732 break;
1733 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1734 CHECK_FPU_ENABLED
1735 CHECK_FPSCR_PR_0
1736 tcg_gen_movi_i32(FREG(B11_8), 0);
1737 return;
1738 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1739 CHECK_FPU_ENABLED
1740 CHECK_FPSCR_PR_0
1741 tcg_gen_movi_i32(FREG(B11_8), 0x3f800000);
1742 return;
1743 case 0xf0ad: /* fcnvsd FPUL,DRn */
1744 CHECK_FPU_ENABLED
1745 {
1746 TCGv_i64 fp = tcg_temp_new_i64();
1747 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1748 gen_store_fpr64(ctx, fp, B11_8);
1749 tcg_temp_free_i64(fp);
1750 }
1751 return;
1752 case 0xf0bd: /* fcnvds DRn,FPUL */
1753 CHECK_FPU_ENABLED
1754 {
1755 TCGv_i64 fp = tcg_temp_new_i64();
1756 gen_load_fpr64(ctx, fp, B11_8);
1757 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1758 tcg_temp_free_i64(fp);
1759 }
1760 return;
1761 case 0xf0ed: /* fipr FVm,FVn */
1762 CHECK_FPU_ENABLED
1763 CHECK_FPSCR_PR_1
1764 {
1765 TCGv m = tcg_const_i32((ctx->opcode >> 8) & 3);
1766 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1767 gen_helper_fipr(cpu_env, m, n);
1768 tcg_temp_free(m);
1769 tcg_temp_free(n);
1770 return;
1771 }
1772 break;
1773 case 0xf0fd: /* ftrv XMTRX,FVn */
1774 CHECK_FPU_ENABLED
1775 CHECK_FPSCR_PR_1
1776 {
1777 if ((ctx->opcode & 0x0300) != 0x0100) {
1778 goto do_illegal;
1779 }
1780 TCGv n = tcg_const_i32((ctx->opcode >> 10) & 3);
1781 gen_helper_ftrv(cpu_env, n);
1782 tcg_temp_free(n);
1783 return;
1784 }
1785 break;
1786 }
1787#if 0
1788 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1789 ctx->opcode, ctx->pc);
1790 fflush(stderr);
1791#endif
1792 do_illegal:
1793 if (ctx->envflags & DELAY_SLOT_MASK) {
1794 do_illegal_slot:
1795 gen_save_cpu_state(ctx, true);
1796 gen_helper_raise_slot_illegal_instruction(cpu_env);
1797 } else {
1798 gen_save_cpu_state(ctx, true);
1799 gen_helper_raise_illegal_instruction(cpu_env);
1800 }
1801 ctx->bstate = BS_EXCP;
1802 return;
1803
1804 do_fpu_disabled:
1805 gen_save_cpu_state(ctx, true);
1806 if (ctx->envflags & DELAY_SLOT_MASK) {
1807 gen_helper_raise_slot_fpu_disable(cpu_env);
1808 } else {
1809 gen_helper_raise_fpu_disable(cpu_env);
1810 }
1811 ctx->bstate = BS_EXCP;
1812 return;
1813}
1814
1815static void decode_opc(DisasContext * ctx)
1816{
1817 uint32_t old_flags = ctx->envflags;
1818
1819 _decode_opc(ctx);
1820
1821 if (old_flags & DELAY_SLOT_MASK) {
1822 /* go out of the delay slot */
1823 ctx->envflags &= ~DELAY_SLOT_MASK;
1824
1825 /* When in an exclusive region, we must continue to the end
1826 for conditional branches. */
1827 if (ctx->tbflags & GUSA_EXCLUSIVE
1828 && old_flags & DELAY_SLOT_CONDITIONAL) {
1829 gen_delayed_conditional_jump(ctx);
1830 return;
1831 }
1832 /* Otherwise this is probably an invalid gUSA region.
1833 Drop the GUSA bits so the next TB doesn't see them. */
1834 ctx->envflags &= ~GUSA_MASK;
1835
1836 tcg_gen_movi_i32(cpu_flags, ctx->envflags);
1837 ctx->bstate = BS_BRANCH;
1838 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1839 gen_delayed_conditional_jump(ctx);
1840 } else {
1841 gen_jump(ctx);
1842 }
1843 }
1844}
1845
1846#ifdef CONFIG_USER_ONLY
1847/* For uniprocessors, SH4 uses optimistic restartable atomic sequences.
1848 Upon an interrupt, a real kernel would simply notice magic values in
1849 the registers and reset the PC to the start of the sequence.
1850
1851 For QEMU, we cannot do this in quite the same way. Instead, we notice
1852 the normal start of such a sequence (mov #-x,r15). While we can handle
1853 any sequence via cpu_exec_step_atomic, we can recognize the "normal"
1854 sequences and transform them into atomic operations as seen by the host.
1855*/
1856static int decode_gusa(DisasContext *ctx, CPUSH4State *env, int *pmax_insns)
1857{
1858 uint16_t insns[5];
1859 int ld_adr, ld_dst, ld_mop;
1860 int op_dst, op_src, op_opc;
1861 int mv_src, mt_dst, st_src, st_mop;
1862 TCGv op_arg;
1863
1864 uint32_t pc = ctx->pc;
1865 uint32_t pc_end = ctx->tb->cs_base;
1866 int backup = sextract32(ctx->tbflags, GUSA_SHIFT, 8);
1867 int max_insns = (pc_end - pc) / 2;
1868 int i;
1869
1870 if (pc != pc_end + backup || max_insns < 2) {
1871 /* This is a malformed gUSA region. Don't do anything special,
1872 since the interpreter is likely to get confused. */
1873 ctx->envflags &= ~GUSA_MASK;
1874 return 0;
1875 }
1876
1877 if (ctx->tbflags & GUSA_EXCLUSIVE) {
1878 /* Regardless of single-stepping or the end of the page,
1879 we must complete execution of the gUSA region while
1880 holding the exclusive lock. */
1881 *pmax_insns = max_insns;
1882 return 0;
1883 }
1884
1885 /* The state machine below will consume only a few insns.
1886 If there are more than that in a region, fail now. */
1887 if (max_insns > ARRAY_SIZE(insns)) {
1888 goto fail;
1889 }
1890
1891 /* Read all of the insns for the region. */
1892 for (i = 0; i < max_insns; ++i) {
1893 insns[i] = cpu_lduw_code(env, pc + i * 2);
1894 }
1895
1896 ld_adr = ld_dst = ld_mop = -1;
1897 mv_src = -1;
1898 op_dst = op_src = op_opc = -1;
1899 mt_dst = -1;
1900 st_src = st_mop = -1;
1901 TCGV_UNUSED(op_arg);
1902 i = 0;
1903
1904#define NEXT_INSN \
1905 do { if (i >= max_insns) goto fail; ctx->opcode = insns[i++]; } while (0)
1906
1907 /*
1908 * Expect a load to begin the region.
1909 */
1910 NEXT_INSN;
1911 switch (ctx->opcode & 0xf00f) {
1912 case 0x6000: /* mov.b @Rm,Rn */
1913 ld_mop = MO_SB;
1914 break;
1915 case 0x6001: /* mov.w @Rm,Rn */
1916 ld_mop = MO_TESW;
1917 break;
1918 case 0x6002: /* mov.l @Rm,Rn */
1919 ld_mop = MO_TESL;
1920 break;
1921 default:
1922 goto fail;
1923 }
1924 ld_adr = B7_4;
1925 ld_dst = B11_8;
1926 if (ld_adr == ld_dst) {
1927 goto fail;
1928 }
1929 /* Unless we see a mov, any two-operand operation must use ld_dst. */
1930 op_dst = ld_dst;
1931
1932 /*
1933 * Expect an optional register move.
1934 */
1935 NEXT_INSN;
1936 switch (ctx->opcode & 0xf00f) {
1937 case 0x6003: /* mov Rm,Rn */
1938 /* Here we want to recognize ld_dst being saved for later consumtion,
1939 or for another input register being copied so that ld_dst need not
1940 be clobbered during the operation. */
1941 op_dst = B11_8;
1942 mv_src = B7_4;
1943 if (op_dst == ld_dst) {
1944 /* Overwriting the load output. */
1945 goto fail;
1946 }
1947 if (mv_src != ld_dst) {
1948 /* Copying a new input; constrain op_src to match the load. */
1949 op_src = ld_dst;
1950 }
1951 break;
1952
1953 default:
1954 /* Put back and re-examine as operation. */
1955 --i;
1956 }
1957
1958 /*
1959 * Expect the operation.
1960 */
1961 NEXT_INSN;
1962 switch (ctx->opcode & 0xf00f) {
1963 case 0x300c: /* add Rm,Rn */
1964 op_opc = INDEX_op_add_i32;
1965 goto do_reg_op;
1966 case 0x2009: /* and Rm,Rn */
1967 op_opc = INDEX_op_and_i32;
1968 goto do_reg_op;
1969 case 0x200a: /* xor Rm,Rn */
1970 op_opc = INDEX_op_xor_i32;
1971 goto do_reg_op;
1972 case 0x200b: /* or Rm,Rn */
1973 op_opc = INDEX_op_or_i32;
1974 do_reg_op:
1975 /* The operation register should be as expected, and the
1976 other input cannot depend on the load. */
1977 if (op_dst != B11_8) {
1978 goto fail;
1979 }
1980 if (op_src < 0) {
1981 /* Unconstrainted input. */
1982 op_src = B7_4;
1983 } else if (op_src == B7_4) {
1984 /* Constrained input matched load. All operations are
1985 commutative; "swap" them by "moving" the load output
1986 to the (implicit) first argument and the move source
1987 to the (explicit) second argument. */
1988 op_src = mv_src;
1989 } else {
1990 goto fail;
1991 }
1992 op_arg = REG(op_src);
1993 break;
1994
1995 case 0x6007: /* not Rm,Rn */
1996 if (ld_dst != B7_4 || mv_src >= 0) {
1997 goto fail;
1998 }
1999 op_dst = B11_8;
2000 op_opc = INDEX_op_xor_i32;
2001 op_arg = tcg_const_i32(-1);
2002 break;
2003
2004 case 0x7000 ... 0x700f: /* add #imm,Rn */
2005 if (op_dst != B11_8 || mv_src >= 0) {
2006 goto fail;
2007 }
2008 op_opc = INDEX_op_add_i32;
2009 op_arg = tcg_const_i32(B7_0s);
2010 break;
2011
2012 case 0x3000: /* cmp/eq Rm,Rn */
2013 /* Looking for the middle of a compare-and-swap sequence,
2014 beginning with the compare. Operands can be either order,
2015 but with only one overlapping the load. */
2016 if ((ld_dst == B11_8) + (ld_dst == B7_4) != 1 || mv_src >= 0) {
2017 goto fail;
2018 }
2019 op_opc = INDEX_op_setcond_i32; /* placeholder */
2020 op_src = (ld_dst == B11_8 ? B7_4 : B11_8);
2021 op_arg = REG(op_src);
2022
2023 NEXT_INSN;
2024 switch (ctx->opcode & 0xff00) {
2025 case 0x8b00: /* bf label */
2026 case 0x8f00: /* bf/s label */
2027 if (pc + (i + 1 + B7_0s) * 2 != pc_end) {
2028 goto fail;
2029 }
2030 if ((ctx->opcode & 0xff00) == 0x8b00) { /* bf label */
2031 break;
2032 }
2033 /* We're looking to unconditionally modify Rn with the
2034 result of the comparison, within the delay slot of
2035 the branch. This is used by older gcc. */
2036 NEXT_INSN;
2037 if ((ctx->opcode & 0xf0ff) == 0x0029) { /* movt Rn */
2038 mt_dst = B11_8;
2039 } else {
2040 goto fail;
2041 }
2042 break;
2043
2044 default:
2045 goto fail;
2046 }
2047 break;
2048
2049 case 0x2008: /* tst Rm,Rn */
2050 /* Looking for a compare-and-swap against zero. */
2051 if (ld_dst != B11_8 || ld_dst != B7_4 || mv_src >= 0) {
2052 goto fail;
2053 }
2054 op_opc = INDEX_op_setcond_i32;
2055 op_arg = tcg_const_i32(0);
2056
2057 NEXT_INSN;
2058 if ((ctx->opcode & 0xff00) != 0x8900 /* bt label */
2059 || pc + (i + 1 + B7_0s) * 2 != pc_end) {
2060 goto fail;
2061 }
2062 break;
2063
2064 default:
2065 /* Put back and re-examine as store. */
2066 --i;
2067 }
2068
2069 /*
2070 * Expect the store.
2071 */
2072 /* The store must be the last insn. */
2073 if (i != max_insns - 1) {
2074 goto fail;
2075 }
2076 NEXT_INSN;
2077 switch (ctx->opcode & 0xf00f) {
2078 case 0x2000: /* mov.b Rm,@Rn */
2079 st_mop = MO_UB;
2080 break;
2081 case 0x2001: /* mov.w Rm,@Rn */
2082 st_mop = MO_UW;
2083 break;
2084 case 0x2002: /* mov.l Rm,@Rn */
2085 st_mop = MO_UL;
2086 break;
2087 default:
2088 goto fail;
2089 }
2090 /* The store must match the load. */
2091 if (ld_adr != B11_8 || st_mop != (ld_mop & MO_SIZE)) {
2092 goto fail;
2093 }
2094 st_src = B7_4;
2095
2096#undef NEXT_INSN
2097
2098 /*
2099 * Emit the operation.
2100 */
2101 tcg_gen_insn_start(pc, ctx->envflags);
2102 switch (op_opc) {
2103 case -1:
2104 /* No operation found. Look for exchange pattern. */
2105 if (st_src == ld_dst || mv_src >= 0) {
2106 goto fail;
2107 }
2108 tcg_gen_atomic_xchg_i32(REG(ld_dst), REG(ld_adr), REG(st_src),
2109 ctx->memidx, ld_mop);
2110 break;
2111
2112 case INDEX_op_add_i32:
2113 if (op_dst != st_src) {
2114 goto fail;
2115 }
2116 if (op_dst == ld_dst && st_mop == MO_UL) {
2117 tcg_gen_atomic_add_fetch_i32(REG(ld_dst), REG(ld_adr),
2118 op_arg, ctx->memidx, ld_mop);
2119 } else {
2120 tcg_gen_atomic_fetch_add_i32(REG(ld_dst), REG(ld_adr),
2121 op_arg, ctx->memidx, ld_mop);
2122 if (op_dst != ld_dst) {
2123 /* Note that mop sizes < 4 cannot use add_fetch
2124 because it won't carry into the higher bits. */
2125 tcg_gen_add_i32(REG(op_dst), REG(ld_dst), op_arg);
2126 }
2127 }
2128 break;
2129
2130 case INDEX_op_and_i32:
2131 if (op_dst != st_src) {
2132 goto fail;
2133 }
2134 if (op_dst == ld_dst) {
2135 tcg_gen_atomic_and_fetch_i32(REG(ld_dst), REG(ld_adr),
2136 op_arg, ctx->memidx, ld_mop);
2137 } else {
2138 tcg_gen_atomic_fetch_and_i32(REG(ld_dst), REG(ld_adr),
2139 op_arg, ctx->memidx, ld_mop);
2140 tcg_gen_and_i32(REG(op_dst), REG(ld_dst), op_arg);
2141 }
2142 break;
2143
2144 case INDEX_op_or_i32:
2145 if (op_dst != st_src) {
2146 goto fail;
2147 }
2148 if (op_dst == ld_dst) {
2149 tcg_gen_atomic_or_fetch_i32(REG(ld_dst), REG(ld_adr),
2150 op_arg, ctx->memidx, ld_mop);
2151 } else {
2152 tcg_gen_atomic_fetch_or_i32(REG(ld_dst), REG(ld_adr),
2153 op_arg, ctx->memidx, ld_mop);
2154 tcg_gen_or_i32(REG(op_dst), REG(ld_dst), op_arg);
2155 }
2156 break;
2157
2158 case INDEX_op_xor_i32:
2159 if (op_dst != st_src) {
2160 goto fail;
2161 }
2162 if (op_dst == ld_dst) {
2163 tcg_gen_atomic_xor_fetch_i32(REG(ld_dst), REG(ld_adr),
2164 op_arg, ctx->memidx, ld_mop);
2165 } else {
2166 tcg_gen_atomic_fetch_xor_i32(REG(ld_dst), REG(ld_adr),
2167 op_arg, ctx->memidx, ld_mop);
2168 tcg_gen_xor_i32(REG(op_dst), REG(ld_dst), op_arg);
2169 }
2170 break;
2171
2172 case INDEX_op_setcond_i32:
2173 if (st_src == ld_dst) {
2174 goto fail;
2175 }
2176 tcg_gen_atomic_cmpxchg_i32(REG(ld_dst), REG(ld_adr), op_arg,
2177 REG(st_src), ctx->memidx, ld_mop);
2178 tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_t, REG(ld_dst), op_arg);
2179 if (mt_dst >= 0) {
2180 tcg_gen_mov_i32(REG(mt_dst), cpu_sr_t);
2181 }
2182 break;
2183
2184 default:
2185 g_assert_not_reached();
2186 }
2187
2188 /* If op_src is not a valid register, then op_arg was a constant. */
2189 if (op_src < 0) {
2190 tcg_temp_free_i32(op_arg);
2191 }
2192
2193 /* The entire region has been translated. */
2194 ctx->envflags &= ~GUSA_MASK;
2195 ctx->pc = pc_end;
2196 return max_insns;
2197
2198 fail:
2199 qemu_log_mask(LOG_UNIMP, "Unrecognized gUSA sequence %08x-%08x\n",
2200 pc, pc_end);
2201
2202 /* Restart with the EXCLUSIVE bit set, within a TB run via
2203 cpu_exec_step_atomic holding the exclusive lock. */
2204 tcg_gen_insn_start(pc, ctx->envflags);
2205 ctx->envflags |= GUSA_EXCLUSIVE;
2206 gen_save_cpu_state(ctx, false);
2207 gen_helper_exclusive(cpu_env);
2208 ctx->bstate = BS_EXCP;
2209
2210 /* We're not executing an instruction, but we must report one for the
2211 purposes of accounting within the TB. We might as well report the
2212 entire region consumed via ctx->pc so that it's immediately available
2213 in the disassembly dump. */
2214 ctx->pc = pc_end;
2215 return 1;
2216}
2217#endif
2218
2219void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2220{
2221 SuperHCPU *cpu = sh_env_get_cpu(env);
2222 CPUState *cs = CPU(cpu);
2223 DisasContext ctx;
2224 target_ulong pc_start;
2225 int num_insns;
2226 int max_insns;
2227
2228 pc_start = tb->pc;
2229 ctx.pc = pc_start;
2230 ctx.tbflags = (uint32_t)tb->flags;
2231 ctx.envflags = tb->flags & TB_FLAG_ENVFLAGS_MASK;
2232 ctx.bstate = BS_NONE;
2233 ctx.memidx = (ctx.tbflags & (1u << SR_MD)) == 0 ? 1 : 0;
2234 /* We don't know if the delayed pc came from a dynamic or static branch,
2235 so assume it is a dynamic branch. */
2236 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
2237 ctx.tb = tb;
2238 ctx.singlestep_enabled = cs->singlestep_enabled;
2239 ctx.features = env->features;
2240 ctx.has_movcal = (ctx.tbflags & TB_FLAG_PENDING_MOVCA);
2241 ctx.gbank = ((ctx.tbflags & (1 << SR_MD)) &&
2242 (ctx.tbflags & (1 << SR_RB))) * 0x10;
2243 ctx.fbank = ctx.tbflags & FPSCR_FR ? 0x10 : 0;
2244
2245 max_insns = tb->cflags & CF_COUNT_MASK;
2246 if (max_insns == 0) {
2247 max_insns = CF_COUNT_MASK;
2248 }
2249 max_insns = MIN(max_insns, TCG_MAX_INSNS);
2250
2251 /* Since the ISA is fixed-width, we can bound by the number
2252 of instructions remaining on the page. */
2253 num_insns = -(ctx.pc | TARGET_PAGE_MASK) / 2;
2254 max_insns = MIN(max_insns, num_insns);
2255
2256 /* Single stepping means just that. */
2257 if (ctx.singlestep_enabled || singlestep) {
2258 max_insns = 1;
2259 }
2260
2261 gen_tb_start(tb);
2262 num_insns = 0;
2263
2264#ifdef CONFIG_USER_ONLY
2265 if (ctx.tbflags & GUSA_MASK) {
2266 num_insns = decode_gusa(&ctx, env, &max_insns);
2267 }
2268#endif
2269
2270 while (ctx.bstate == BS_NONE
2271 && num_insns < max_insns
2272 && !tcg_op_buf_full()) {
2273 tcg_gen_insn_start(ctx.pc, ctx.envflags);
2274 num_insns++;
2275
2276 if (unlikely(cpu_breakpoint_test(cs, ctx.pc, BP_ANY))) {
2277 /* We have hit a breakpoint - make sure PC is up-to-date */
2278 gen_save_cpu_state(&ctx, true);
2279 gen_helper_debug(cpu_env);
2280 ctx.bstate = BS_EXCP;
2281 /* The address covered by the breakpoint must be included in
2282 [tb->pc, tb->pc + tb->size) in order to for it to be
2283 properly cleared -- thus we increment the PC here so that
2284 the logic setting tb->size below does the right thing. */
2285 ctx.pc += 2;
2286 break;
2287 }
2288
2289 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
2290 gen_io_start();
2291 }
2292
2293 ctx.opcode = cpu_lduw_code(env, ctx.pc);
2294 decode_opc(&ctx);
2295 ctx.pc += 2;
2296 }
2297 if (tb->cflags & CF_LAST_IO) {
2298 gen_io_end();
2299 }
2300
2301 if (ctx.tbflags & GUSA_EXCLUSIVE) {
2302 /* Ending the region of exclusivity. Clear the bits. */
2303 ctx.envflags &= ~GUSA_MASK;
2304 }
2305
2306 if (cs->singlestep_enabled) {
2307 gen_save_cpu_state(&ctx, true);
2308 gen_helper_debug(cpu_env);
2309 } else {
2310 switch (ctx.bstate) {
2311 case BS_STOP:
2312 gen_save_cpu_state(&ctx, true);
2313 tcg_gen_exit_tb(0);
2314 break;
2315 case BS_NONE:
2316 gen_save_cpu_state(&ctx, false);
2317 gen_goto_tb(&ctx, 0, ctx.pc);
2318 break;
2319 case BS_EXCP:
2320 /* fall through */
2321 case BS_BRANCH:
2322 default:
2323 break;
2324 }
2325 }
2326
2327 gen_tb_end(tb, num_insns);
2328
2329 tb->size = ctx.pc - pc_start;
2330 tb->icount = num_insns;
2331
2332#ifdef DEBUG_DISAS
2333 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
2334 && qemu_log_in_addr_range(pc_start)) {
2335 qemu_log_lock();
2336 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2337 log_target_disas(cs, pc_start, ctx.pc - pc_start, 0);
2338 qemu_log("\n");
2339 qemu_log_unlock();
2340 }
2341#endif
2342}
2343
2344void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb,
2345 target_ulong *data)
2346{
2347 env->pc = data[0];
2348 env->flags = data[1];
2349 /* Theoretically delayed_pc should also be restored. In practice the
2350 branch instruction is re-executed after exception, so the delayed
2351 branch target will be recomputed. */
2352}