]> git.proxmox.com Git - qemu.git/blob - target-sh4/translate.c
SH4: convert a few helpers to TCG
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <assert.h>
26
27 #define DEBUG_DISAS
28 #define SH4_DEBUG_DISAS
29 //#define SH4_SINGLE_STEP
30
31 #include "cpu.h"
32 #include "exec-all.h"
33 #include "disas.h"
34 #include "helper.h"
35 #include "tcg-op.h"
36 #include "qemu-common.h"
37
38 typedef struct DisasContext {
39 struct TranslationBlock *tb;
40 target_ulong pc;
41 uint32_t sr;
42 uint32_t fpscr;
43 uint16_t opcode;
44 uint32_t flags;
45 int bstate;
46 int memidx;
47 uint32_t delayed_pc;
48 int singlestep_enabled;
49 } DisasContext;
50
51 enum {
52 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
53 * exception condition
54 */
55 BS_STOP = 1, /* We want to stop translation for any reason */
56 BS_BRANCH = 2, /* We reached a branch condition */
57 BS_EXCP = 3, /* We reached an exception condition */
58 };
59
60 /* global register indexes */
61 static TCGv cpu_env;
62 static TCGv cpu_gregs[24];
63 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
64 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
65 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_flags;
66
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
69
70 /* dyngen register indexes */
71 static TCGv cpu_T[2];
72
73 #include "gen-icount.h"
74
75 static void sh4_translate_init(void)
76 {
77 int i;
78 static int done_init = 0;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 };
86
87 if (done_init)
88 return;
89
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
92 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
93
94 for (i = 0; i < 24; i++)
95 cpu_gregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
96 offsetof(CPUState, gregs[i]),
97 gregnames[i]);
98
99 cpu_pc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
100 offsetof(CPUState, pc), "PC");
101 cpu_sr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
102 offsetof(CPUState, sr), "SR");
103 cpu_ssr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
104 offsetof(CPUState, ssr), "SSR");
105 cpu_spc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
106 offsetof(CPUState, spc), "SPC");
107 cpu_gbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
108 offsetof(CPUState, gbr), "GBR");
109 cpu_vbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
110 offsetof(CPUState, vbr), "VBR");
111 cpu_sgr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
112 offsetof(CPUState, sgr), "SGR");
113 cpu_dbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
114 offsetof(CPUState, dbr), "DBR");
115 cpu_mach = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
116 offsetof(CPUState, mach), "MACH");
117 cpu_macl = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
118 offsetof(CPUState, macl), "MACL");
119 cpu_pr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
120 offsetof(CPUState, pr), "PR");
121 cpu_fpscr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
122 offsetof(CPUState, fpscr), "FPSCR");
123 cpu_fpul = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
124 offsetof(CPUState, fpul), "FPUL");
125
126 cpu_flags = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
127 offsetof(CPUState, flags), "_flags_");
128 cpu_delayed_pc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
129 offsetof(CPUState, delayed_pc),
130 "_delayed_pc_");
131
132 /* register helpers */
133 #undef DEF_HELPER
134 #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
135 #include "helper.h"
136
137 done_init = 1;
138 }
139
140 #ifdef CONFIG_USER_ONLY
141
142 #define GEN_OP_LD(width, reg) \
143 void gen_op_ld##width##_T0_##reg (DisasContext *ctx) { \
144 gen_op_ld##width##_T0_##reg##_raw(); \
145 }
146 #define GEN_OP_ST(width, reg) \
147 void gen_op_st##width##_##reg##_T1 (DisasContext *ctx) { \
148 gen_op_st##width##_##reg##_T1_raw(); \
149 }
150
151 #else
152
153 #define GEN_OP_LD(width, reg) \
154 void gen_op_ld##width##_T0_##reg (DisasContext *ctx) { \
155 if (ctx->memidx) gen_op_ld##width##_T0_##reg##_kernel(); \
156 else gen_op_ld##width##_T0_##reg##_user();\
157 }
158 #define GEN_OP_ST(width, reg) \
159 void gen_op_st##width##_##reg##_T1 (DisasContext *ctx) { \
160 if (ctx->memidx) gen_op_st##width##_##reg##_T1_kernel(); \
161 else gen_op_st##width##_##reg##_T1_user();\
162 }
163
164 #endif
165
166 GEN_OP_LD(ub, T0)
167 GEN_OP_LD(b, T0)
168 GEN_OP_ST(b, T0)
169 GEN_OP_LD(uw, T0)
170 GEN_OP_LD(w, T0)
171 GEN_OP_ST(w, T0)
172 GEN_OP_LD(l, T0)
173 GEN_OP_ST(l, T0)
174 GEN_OP_LD(fl, FT0)
175 GEN_OP_ST(fl, FT0)
176 GEN_OP_LD(fq, DT0)
177 GEN_OP_ST(fq, DT0)
178
179 void cpu_dump_state(CPUState * env, FILE * f,
180 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
181 int flags)
182 {
183 int i;
184 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
185 env->pc, env->sr, env->pr, env->fpscr);
186 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
187 env->spc, env->ssr, env->gbr, env->vbr);
188 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
189 env->sgr, env->dbr, env->delayed_pc, env->fpul);
190 for (i = 0; i < 24; i += 4) {
191 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
192 i, env->gregs[i], i + 1, env->gregs[i + 1],
193 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
194 }
195 if (env->flags & DELAY_SLOT) {
196 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
197 env->delayed_pc);
198 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
199 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
200 env->delayed_pc);
201 }
202 }
203
204 void cpu_sh4_reset(CPUSH4State * env)
205 {
206 #if defined(CONFIG_USER_ONLY)
207 env->sr = SR_FD; /* FD - kernel does lazy fpu context switch */
208 #else
209 env->sr = 0x700000F0; /* MD, RB, BL, I3-I0 */
210 #endif
211 env->vbr = 0;
212 env->pc = 0xA0000000;
213 #if defined(CONFIG_USER_ONLY)
214 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
215 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
216 #else
217 env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */
218 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
219 #endif
220 env->mmucr = 0;
221 }
222
223 CPUSH4State *cpu_sh4_init(const char *cpu_model)
224 {
225 CPUSH4State *env;
226
227 env = qemu_mallocz(sizeof(CPUSH4State));
228 if (!env)
229 return NULL;
230 cpu_exec_init(env);
231 sh4_translate_init();
232 cpu_sh4_reset(env);
233 tlb_flush(env, 1);
234 return env;
235 }
236
237 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
238 {
239 TranslationBlock *tb;
240 tb = ctx->tb;
241
242 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
243 !ctx->singlestep_enabled) {
244 /* Use a direct jump if in same page and singlestep not enabled */
245 tcg_gen_goto_tb(n);
246 tcg_gen_movi_i32(cpu_pc, dest);
247 tcg_gen_exit_tb((long) tb + n);
248 } else {
249 tcg_gen_movi_i32(cpu_pc, dest);
250 if (ctx->singlestep_enabled)
251 tcg_gen_helper_0_0(helper_debug);
252 tcg_gen_exit_tb(0);
253 }
254 }
255
256 static void gen_jump(DisasContext * ctx)
257 {
258 if (ctx->delayed_pc == (uint32_t) - 1) {
259 /* Target is not statically known, it comes necessarily from a
260 delayed jump as immediate jump are conditinal jumps */
261 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
262 if (ctx->singlestep_enabled)
263 tcg_gen_helper_0_0(helper_debug);
264 tcg_gen_exit_tb(0);
265 } else {
266 gen_goto_tb(ctx, 0, ctx->delayed_pc);
267 }
268 }
269
270 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
271 {
272 int label = gen_new_label();
273 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
274 tcg_gen_andi_i32(cpu_T[0], cpu_sr, SR_T);
275 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], t ? SR_T : 0, label);
276 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
277 gen_set_label(label);
278 }
279
280 /* Immediate conditional jump (bt or bf) */
281 static void gen_conditional_jump(DisasContext * ctx,
282 target_ulong ift, target_ulong ifnott)
283 {
284 int l1;
285
286 l1 = gen_new_label();
287 tcg_gen_andi_i32(cpu_T[0], cpu_sr, SR_T);
288 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_T[0], SR_T, l1);
289 gen_goto_tb(ctx, 0, ifnott);
290 gen_set_label(l1);
291 gen_goto_tb(ctx, 1, ift);
292 }
293
294 /* Delayed conditional jump (bt or bf) */
295 static void gen_delayed_conditional_jump(DisasContext * ctx)
296 {
297 int l1;
298
299 l1 = gen_new_label();
300 tcg_gen_andi_i32(cpu_T[0], cpu_flags, DELAY_SLOT_TRUE);
301 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_T[0], DELAY_SLOT_TRUE, l1);
302 gen_goto_tb(ctx, 1, ctx->pc + 2);
303 gen_set_label(l1);
304 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
305 gen_jump(ctx);
306 }
307
308 static inline void gen_set_t(void)
309 {
310 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
311 }
312
313 static inline void gen_clr_t(void)
314 {
315 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
316 }
317
318 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
319 {
320 int label1 = gen_new_label();
321 int label2 = gen_new_label();
322 tcg_gen_brcond_i32(cond, t1, t0, label1);
323 gen_clr_t();
324 tcg_gen_br(label2);
325 gen_set_label(label1);
326 gen_set_t();
327 gen_set_label(label2);
328 }
329
330 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
331 {
332 int label1 = gen_new_label();
333 int label2 = gen_new_label();
334 tcg_gen_brcondi_i32(cond, t0, imm, label1);
335 gen_clr_t();
336 tcg_gen_br(label2);
337 gen_set_label(label1);
338 gen_set_t();
339 gen_set_label(label2);
340 }
341
342 static inline void gen_store_flags(uint32_t flags)
343 {
344 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
345 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
346 }
347
348 #define B3_0 (ctx->opcode & 0xf)
349 #define B6_4 ((ctx->opcode >> 4) & 0x7)
350 #define B7_4 ((ctx->opcode >> 4) & 0xf)
351 #define B7_0 (ctx->opcode & 0xff)
352 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
353 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
354 (ctx->opcode & 0xfff))
355 #define B11_8 ((ctx->opcode >> 8) & 0xf)
356 #define B15_12 ((ctx->opcode >> 12) & 0xf)
357
358 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
359 (x) + 16 : (x))
360
361 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
362 ? (x) + 16 : (x))
363
364 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
365 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
366 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
367 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
368
369 #define CHECK_NOT_DELAY_SLOT \
370 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
371 {tcg_gen_helper_0_0(helper_raise_slot_illegal_instruction); ctx->bstate = BS_EXCP; \
372 return;}
373
374 void _decode_opc(DisasContext * ctx)
375 {
376 #if 0
377 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
378 #endif
379 switch (ctx->opcode) {
380 case 0x0019: /* div0u */
381 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
382 return;
383 case 0x000b: /* rts */
384 CHECK_NOT_DELAY_SLOT
385 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
386 ctx->flags |= DELAY_SLOT;
387 ctx->delayed_pc = (uint32_t) - 1;
388 return;
389 case 0x0028: /* clrmac */
390 tcg_gen_movi_i32(cpu_mach, 0);
391 tcg_gen_movi_i32(cpu_macl, 0);
392 return;
393 case 0x0048: /* clrs */
394 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
395 return;
396 case 0x0008: /* clrt */
397 gen_clr_t();
398 return;
399 case 0x0038: /* ldtlb */
400 #if defined(CONFIG_USER_ONLY)
401 assert(0); /* XXXXX */
402 #else
403 tcg_gen_helper_0_0(helper_ldtlb);
404 #endif
405 return;
406 case 0x002b: /* rte */
407 CHECK_NOT_DELAY_SLOT
408 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
409 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
410 ctx->flags |= DELAY_SLOT;
411 ctx->delayed_pc = (uint32_t) - 1;
412 return;
413 case 0x0058: /* sets */
414 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
415 return;
416 case 0x0018: /* sett */
417 gen_set_t();
418 return;
419 case 0xfbfd: /* frchg */
420 gen_op_frchg();
421 ctx->bstate = BS_STOP;
422 return;
423 case 0xf3fd: /* fschg */
424 gen_op_fschg();
425 ctx->bstate = BS_STOP;
426 return;
427 case 0x0009: /* nop */
428 return;
429 case 0x001b: /* sleep */
430 if (ctx->memidx) {
431 tcg_gen_helper_0_0(helper_sleep);
432 } else {
433 tcg_gen_helper_0_0(helper_raise_illegal_instruction);
434 ctx->bstate = BS_EXCP;
435 }
436 return;
437 }
438
439 switch (ctx->opcode & 0xf000) {
440 case 0x1000: /* mov.l Rm,@(disp,Rn) */
441 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
442 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
443 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0 * 4);
444 gen_op_stl_T0_T1(ctx);
445 return;
446 case 0x5000: /* mov.l @(disp,Rm),Rn */
447 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
448 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0 * 4);
449 gen_op_ldl_T0_T0(ctx);
450 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
451 return;
452 case 0xe000: /* mov #imm,Rn */
453 tcg_gen_movi_i32(cpu_gregs[REG(B11_8)], B7_0s);
454 return;
455 case 0x9000: /* mov.w @(disp,PC),Rn */
456 tcg_gen_movi_i32(cpu_T[0], ctx->pc + 4 + B7_0 * 2);
457 gen_op_ldw_T0_T0(ctx);
458 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
459 return;
460 case 0xd000: /* mov.l @(disp,PC),Rn */
461 tcg_gen_movi_i32(cpu_T[0], (ctx->pc + 4 + B7_0 * 4) & ~3);
462 gen_op_ldl_T0_T0(ctx);
463 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
464 return;
465 case 0x7000: /* add #imm,Rn */
466 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], B7_0s);
467 return;
468 case 0xa000: /* bra disp */
469 CHECK_NOT_DELAY_SLOT
470 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
471 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
472 ctx->flags |= DELAY_SLOT;
473 return;
474 case 0xb000: /* bsr disp */
475 CHECK_NOT_DELAY_SLOT
476 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
477 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
478 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
479 ctx->flags |= DELAY_SLOT;
480 return;
481 }
482
483 switch (ctx->opcode & 0xf00f) {
484 case 0x6003: /* mov Rm,Rn */
485 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
486 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
487 return;
488 case 0x2000: /* mov.b Rm,@Rn */
489 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
490 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
491 gen_op_stb_T0_T1(ctx);
492 return;
493 case 0x2001: /* mov.w Rm,@Rn */
494 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
495 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
496 gen_op_stw_T0_T1(ctx);
497 return;
498 case 0x2002: /* mov.l Rm,@Rn */
499 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
500 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
501 gen_op_stl_T0_T1(ctx);
502 return;
503 case 0x6000: /* mov.b @Rm,Rn */
504 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
505 gen_op_ldb_T0_T0(ctx);
506 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
507 return;
508 case 0x6001: /* mov.w @Rm,Rn */
509 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
510 gen_op_ldw_T0_T0(ctx);
511 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
512 return;
513 case 0x6002: /* mov.l @Rm,Rn */
514 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
515 gen_op_ldl_T0_T0(ctx);
516 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
517 return;
518 case 0x2004: /* mov.b Rm,@-Rn */
519 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
520 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
521 cpu_gregs[REG(B11_8)], 1); /* modify register status */
522 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
523 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)],
524 cpu_gregs[REG(B11_8)], 1); /* recover register status */
525 gen_op_stb_T0_T1(ctx); /* might cause re-execution */
526 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
527 cpu_gregs[REG(B11_8)], 1); /* modify register status */
528 return;
529 case 0x2005: /* mov.w Rm,@-Rn */
530 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
531 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
532 cpu_gregs[REG(B11_8)], 2);
533 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
534 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)],
535 cpu_gregs[REG(B11_8)], 2);
536 gen_op_stw_T0_T1(ctx);
537 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
538 cpu_gregs[REG(B11_8)], 2);
539 return;
540 case 0x2006: /* mov.l Rm,@-Rn */
541 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
542 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
543 cpu_gregs[REG(B11_8)], 4);
544 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
545 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)],
546 cpu_gregs[REG(B11_8)], 4);
547 gen_op_stl_T0_T1(ctx);
548 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
549 cpu_gregs[REG(B11_8)], 4);
550 return;
551 case 0x6004: /* mov.b @Rm+,Rn */
552 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
553 gen_op_ldb_T0_T0(ctx);
554 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
555 if ( B11_8 != B7_4 )
556 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
557 cpu_gregs[REG(B7_4)], 1);
558 return;
559 case 0x6005: /* mov.w @Rm+,Rn */
560 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
561 gen_op_ldw_T0_T0(ctx);
562 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
563 if ( B11_8 != B7_4 )
564 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
565 cpu_gregs[REG(B7_4)], 2);
566 return;
567 case 0x6006: /* mov.l @Rm+,Rn */
568 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
569 gen_op_ldl_T0_T0(ctx);
570 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
571 if ( B11_8 != B7_4 )
572 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
573 cpu_gregs[REG(B7_4)], 4);
574 return;
575 case 0x0004: /* mov.b Rm,@(R0,Rn) */
576 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
577 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
578 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
579 gen_op_stb_T0_T1(ctx);
580 return;
581 case 0x0005: /* mov.w Rm,@(R0,Rn) */
582 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
583 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
584 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
585 gen_op_stw_T0_T1(ctx);
586 return;
587 case 0x0006: /* mov.l Rm,@(R0,Rn) */
588 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
589 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
590 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
591 gen_op_stl_T0_T1(ctx);
592 return;
593 case 0x000c: /* mov.b @(R0,Rm),Rn */
594 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
595 gen_op_ldb_T0_T0(ctx);
596 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
597 return;
598 case 0x000d: /* mov.w @(R0,Rm),Rn */
599 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
600 gen_op_ldw_T0_T0(ctx);
601 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
602 return;
603 case 0x000e: /* mov.l @(R0,Rm),Rn */
604 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
605 gen_op_ldl_T0_T0(ctx);
606 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
607 return;
608 case 0x6008: /* swap.b Rm,Rn */
609 tcg_gen_andi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], 0xffff0000);
610 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xff);
611 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 8);
612 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_T[0]);
613 tcg_gen_shri_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 8);
614 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
615 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_T[0]);
616 return;
617 case 0x6009: /* swap.w Rm,Rn */
618 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xffff);
619 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 16);
620 tcg_gen_shri_i32(cpu_T[1], cpu_gregs[REG(B7_4)], 16);
621 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
622 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_T[0], cpu_T[1]);
623 return;
624 case 0x200d: /* xtrct Rm,Rn */
625 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xffff);
626 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 16);
627 tcg_gen_shri_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 16);
628 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
629 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_T[0], cpu_T[1]);
630 return;
631 case 0x300c: /* add Rm,Rn */
632 tcg_gen_add_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
633 return;
634 case 0x300e: /* addc Rm,Rn */
635 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
636 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
637 gen_op_addc_T0_T1();
638 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
639 return;
640 case 0x300f: /* addv Rm,Rn */
641 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
642 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
643 gen_op_addv_T0_T1();
644 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
645 return;
646 case 0x2009: /* and Rm,Rn */
647 tcg_gen_and_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
648 return;
649 case 0x3000: /* cmp/eq Rm,Rn */
650 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
651 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
652 gen_cmp(TCG_COND_EQ, cpu_T[0], cpu_T[1]);
653 return;
654 case 0x3003: /* cmp/ge Rm,Rn */
655 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
656 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
657 gen_cmp(TCG_COND_GE, cpu_T[0], cpu_T[1]);
658 return;
659 case 0x3007: /* cmp/gt Rm,Rn */
660 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
661 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
662 gen_cmp(TCG_COND_GT, cpu_T[0], cpu_T[1]);
663 return;
664 case 0x3006: /* cmp/hi Rm,Rn */
665 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
666 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
667 gen_cmp(TCG_COND_GTU, cpu_T[0], cpu_T[1]);
668 return;
669 case 0x3002: /* cmp/hs Rm,Rn */
670 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
671 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
672 gen_cmp(TCG_COND_GEU, cpu_T[0], cpu_T[1]);
673 return;
674 case 0x200c: /* cmp/str Rm,Rn */
675 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
676 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
677 gen_op_cmp_str_T0_T1();
678 return;
679 case 0x2007: /* div0s Rm,Rn */
680 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
681 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
682 gen_op_div0s_T0_T1();
683 return;
684 case 0x3004: /* div1 Rm,Rn */
685 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
686 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
687 gen_op_div1_T0_T1();
688 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
689 return;
690 case 0x300d: /* dmuls.l Rm,Rn */
691 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
692 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
693 gen_op_dmulsl_T0_T1();
694 return;
695 case 0x3005: /* dmulu.l Rm,Rn */
696 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
697 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
698 gen_op_dmulul_T0_T1();
699 return;
700 case 0x600e: /* exts.b Rm,Rn */
701 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
702 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
703 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
704 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
705 return;
706 case 0x600f: /* exts.w Rm,Rn */
707 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
708 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
709 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
710 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
711 return;
712 case 0x600c: /* extu.b Rm,Rn */
713 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
714 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
715 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
716 return;
717 case 0x600d: /* extu.w Rm,Rn */
718 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
719 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
720 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
721 return;
722 case 0x000f: /* mac.l @Rm+,@Rn+ */
723 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
724 gen_op_ldl_T0_T0(ctx);
725 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
726 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
727 gen_op_ldl_T0_T0(ctx);
728 gen_op_macl_T0_T1();
729 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)], cpu_gregs[REG(B7_4)], 4);
730 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
731 return;
732 case 0x400f: /* mac.w @Rm+,@Rn+ */
733 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
734 gen_op_ldl_T0_T0(ctx);
735 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
736 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
737 gen_op_ldl_T0_T0(ctx);
738 gen_op_macw_T0_T1();
739 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
740 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)], cpu_gregs[REG(B7_4)], 2);
741 return;
742 case 0x0007: /* mul.l Rm,Rn */
743 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
744 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
745 gen_op_mull_T0_T1();
746 return;
747 case 0x200f: /* muls.w Rm,Rn */
748 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
749 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
750 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
751 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
752 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
753 tcg_gen_ext16s_i32(cpu_T[1], cpu_T[1]);
754 gen_op_mulsw_T0_T1();
755 return;
756 case 0x200e: /* mulu.w Rm,Rn */
757 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
758 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
759 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
760 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
761 gen_op_muluw_T0_T1();
762 return;
763 case 0x600b: /* neg Rm,Rn */
764 tcg_gen_neg_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
765 return;
766 case 0x600a: /* negc Rm,Rn */
767 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
768 gen_op_negc_T0();
769 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
770 return;
771 case 0x6007: /* not Rm,Rn */
772 tcg_gen_not_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
773 return;
774 case 0x200b: /* or Rm,Rn */
775 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
776 return;
777 case 0x400c: /* shad Rm,Rn */
778 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
779 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
780 gen_op_shad_T0_T1();
781 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
782 return;
783 case 0x400d: /* shld Rm,Rn */
784 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
785 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
786 gen_op_shld_T0_T1();
787 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
788 return;
789 case 0x3008: /* sub Rm,Rn */
790 tcg_gen_sub_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
791 return;
792 case 0x300a: /* subc Rm,Rn */
793 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
794 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
795 gen_op_subc_T0_T1();
796 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
797 return;
798 case 0x300b: /* subv Rm,Rn */
799 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
800 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
801 gen_op_subv_T0_T1();
802 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
803 return;
804 case 0x2008: /* tst Rm,Rn */
805 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
806 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
807 tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]);
808 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
809 return;
810 case 0x200a: /* xor Rm,Rn */
811 tcg_gen_xor_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
812 return;
813 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
814 if (ctx->fpscr & FPSCR_SZ) {
815 gen_op_fmov_drN_DT0(XREG(B7_4));
816 gen_op_fmov_DT0_drN(XREG(B11_8));
817 } else {
818 gen_op_fmov_frN_FT0(FREG(B7_4));
819 gen_op_fmov_FT0_frN(FREG(B11_8));
820 }
821 return;
822 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
823 if (ctx->fpscr & FPSCR_SZ) {
824 gen_op_fmov_drN_DT0(XREG(B7_4));
825 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
826 gen_op_stfq_DT0_T1(ctx);
827 } else {
828 gen_op_fmov_frN_FT0(FREG(B7_4));
829 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
830 gen_op_stfl_FT0_T1(ctx);
831 }
832 return;
833 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
834 if (ctx->fpscr & FPSCR_SZ) {
835 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
836 gen_op_ldfq_T0_DT0(ctx);
837 gen_op_fmov_DT0_drN(XREG(B11_8));
838 } else {
839 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
840 gen_op_ldfl_T0_FT0(ctx);
841 gen_op_fmov_FT0_frN(FREG(B11_8));
842 }
843 return;
844 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
845 if (ctx->fpscr & FPSCR_SZ) {
846 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
847 gen_op_ldfq_T0_DT0(ctx);
848 gen_op_fmov_DT0_drN(XREG(B11_8));
849 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
850 cpu_gregs[REG(B7_4)], 8);
851 } else {
852 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
853 gen_op_ldfl_T0_FT0(ctx);
854 gen_op_fmov_FT0_frN(FREG(B11_8));
855 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
856 cpu_gregs[REG(B7_4)], 4);
857 }
858 return;
859 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
860 if (ctx->fpscr & FPSCR_SZ) {
861 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
862 gen_op_fmov_drN_DT0(XREG(B7_4));
863 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
864 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
865 gen_op_stfq_DT0_T1(ctx);
866 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
867 } else {
868 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
869 gen_op_fmov_frN_FT0(FREG(B7_4));
870 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
871 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
872 gen_op_stfl_FT0_T1(ctx);
873 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
874 }
875 return;
876 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
877 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
878 if (ctx->fpscr & FPSCR_SZ) {
879 gen_op_ldfq_T0_DT0(ctx);
880 gen_op_fmov_DT0_drN(XREG(B11_8));
881 } else {
882 gen_op_ldfl_T0_FT0(ctx);
883 gen_op_fmov_FT0_frN(FREG(B11_8));
884 }
885 return;
886 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
887 if (ctx->fpscr & FPSCR_SZ) {
888 gen_op_fmov_drN_DT0(XREG(B7_4));
889 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
890 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
891 gen_op_stfq_DT0_T1(ctx);
892 } else {
893 gen_op_fmov_frN_FT0(FREG(B7_4));
894 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
895 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
896 gen_op_stfl_FT0_T1(ctx);
897 }
898 return;
899 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
900 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
901 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
902 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
903 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
904 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
905 if (ctx->fpscr & FPSCR_PR) {
906 if (ctx->opcode & 0x0110)
907 break; /* illegal instruction */
908 gen_op_fmov_drN_DT1(DREG(B7_4));
909 gen_op_fmov_drN_DT0(DREG(B11_8));
910 }
911 else {
912 gen_op_fmov_frN_FT1(FREG(B7_4));
913 gen_op_fmov_frN_FT0(FREG(B11_8));
914 }
915
916 switch (ctx->opcode & 0xf00f) {
917 case 0xf000: /* fadd Rm,Rn */
918 ctx->fpscr & FPSCR_PR ? gen_op_fadd_DT() : gen_op_fadd_FT();
919 break;
920 case 0xf001: /* fsub Rm,Rn */
921 ctx->fpscr & FPSCR_PR ? gen_op_fsub_DT() : gen_op_fsub_FT();
922 break;
923 case 0xf002: /* fmul Rm,Rn */
924 ctx->fpscr & FPSCR_PR ? gen_op_fmul_DT() : gen_op_fmul_FT();
925 break;
926 case 0xf003: /* fdiv Rm,Rn */
927 ctx->fpscr & FPSCR_PR ? gen_op_fdiv_DT() : gen_op_fdiv_FT();
928 break;
929 case 0xf004: /* fcmp/eq Rm,Rn */
930 ctx->fpscr & FPSCR_PR ? gen_op_fcmp_eq_DT() : gen_op_fcmp_eq_FT();
931 return;
932 case 0xf005: /* fcmp/gt Rm,Rn */
933 ctx->fpscr & FPSCR_PR ? gen_op_fcmp_gt_DT() : gen_op_fcmp_gt_FT();
934 return;
935 }
936
937 if (ctx->fpscr & FPSCR_PR) {
938 gen_op_fmov_DT0_drN(DREG(B11_8));
939 }
940 else {
941 gen_op_fmov_FT0_frN(FREG(B11_8));
942 }
943 return;
944 }
945
946 switch (ctx->opcode & 0xff00) {
947 case 0xc900: /* and #imm,R0 */
948 tcg_gen_andi_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
949 return;
950 case 0xcd00: /* and.b #imm,@(R0,GBR) */
951 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
952 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
953 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
954 gen_op_ldub_T0_T0(ctx);
955 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], B7_0);
956 gen_op_stb_T0_T1(ctx);
957 return;
958 case 0x8b00: /* bf label */
959 CHECK_NOT_DELAY_SLOT
960 gen_conditional_jump(ctx, ctx->pc + 2,
961 ctx->pc + 4 + B7_0s * 2);
962 ctx->bstate = BS_BRANCH;
963 return;
964 case 0x8f00: /* bf/s label */
965 CHECK_NOT_DELAY_SLOT
966 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
967 ctx->flags |= DELAY_SLOT_CONDITIONAL;
968 return;
969 case 0x8900: /* bt label */
970 CHECK_NOT_DELAY_SLOT
971 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
972 ctx->pc + 2);
973 ctx->bstate = BS_BRANCH;
974 return;
975 case 0x8d00: /* bt/s label */
976 CHECK_NOT_DELAY_SLOT
977 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
978 ctx->flags |= DELAY_SLOT_CONDITIONAL;
979 return;
980 case 0x8800: /* cmp/eq #imm,R0 */
981 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
982 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], B7_0s);
983 return;
984 case 0xc400: /* mov.b @(disp,GBR),R0 */
985 gen_op_stc_gbr_T0();
986 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0);
987 gen_op_ldb_T0_T0(ctx);
988 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
989 return;
990 case 0xc500: /* mov.w @(disp,GBR),R0 */
991 gen_op_stc_gbr_T0();
992 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 2);
993 gen_op_ldw_T0_T0(ctx);
994 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
995 return;
996 case 0xc600: /* mov.l @(disp,GBR),R0 */
997 gen_op_stc_gbr_T0();
998 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 4);
999 gen_op_ldl_T0_T0(ctx);
1000 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1001 return;
1002 case 0xc000: /* mov.b R0,@(disp,GBR) */
1003 gen_op_stc_gbr_T0();
1004 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0);
1005 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1006 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1007 gen_op_stb_T0_T1(ctx);
1008 return;
1009 case 0xc100: /* mov.w R0,@(disp,GBR) */
1010 gen_op_stc_gbr_T0();
1011 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 2);
1012 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1013 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1014 gen_op_stw_T0_T1(ctx);
1015 return;
1016 case 0xc200: /* mov.l R0,@(disp,GBR) */
1017 gen_op_stc_gbr_T0();
1018 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 4);
1019 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1020 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1021 gen_op_stl_T0_T1(ctx);
1022 return;
1023 case 0x8000: /* mov.b R0,@(disp,Rn) */
1024 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1025 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B7_4)]);
1026 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0);
1027 gen_op_stb_T0_T1(ctx);
1028 return;
1029 case 0x8100: /* mov.w R0,@(disp,Rn) */
1030 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1031 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B7_4)]);
1032 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0 * 2);
1033 gen_op_stw_T0_T1(ctx);
1034 return;
1035 case 0x8400: /* mov.b @(disp,Rn),R0 */
1036 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
1037 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0);
1038 gen_op_ldb_T0_T0(ctx);
1039 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1040 return;
1041 case 0x8500: /* mov.w @(disp,Rn),R0 */
1042 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
1043 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0 * 2);
1044 gen_op_ldw_T0_T0(ctx);
1045 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1046 return;
1047 case 0xc700: /* mova @(disp,PC),R0 */
1048 tcg_gen_movi_i32(cpu_gregs[REG(0)],
1049 ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1050 return;
1051 case 0xcb00: /* or #imm,R0 */
1052 tcg_gen_ori_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
1053 return;
1054 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1055 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1056 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1057 tcg_gen_mov_i32(cpu_T[0], cpu_T[1]);
1058 gen_op_ldub_T0_T0(ctx);
1059 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], B7_0);
1060 gen_op_stb_T0_T1(ctx);
1061 return;
1062 case 0xc300: /* trapa #imm */
1063 CHECK_NOT_DELAY_SLOT
1064 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1065 tcg_gen_movi_i32(cpu_T[0], B7_0);
1066 tcg_gen_helper_0_1(helper_trapa, cpu_T[0]);
1067 ctx->bstate = BS_BRANCH;
1068 return;
1069 case 0xc800: /* tst #imm,R0 */
1070 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(0)], B7_0);
1071 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1072 return;
1073 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1074 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1075 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1076 gen_op_ldub_T0_T0(ctx);
1077 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], B7_0);
1078 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1079 return;
1080 case 0xca00: /* xor #imm,R0 */
1081 tcg_gen_xori_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
1082 return;
1083 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1084 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1085 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1086 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1087 gen_op_ldub_T0_T0(ctx);
1088 tcg_gen_xori_i32(cpu_T[0], cpu_T[0], B7_0);
1089 gen_op_stb_T0_T1(ctx);
1090 return;
1091 }
1092
1093 switch (ctx->opcode & 0xf08f) {
1094 case 0x408e: /* ldc Rm,Rn_BANK */
1095 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1096 tcg_gen_mov_i32(cpu_gregs[ALTREG(B6_4)], cpu_T[0]);
1097 return;
1098 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1099 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1100 gen_op_ldl_T0_T0(ctx);
1101 tcg_gen_mov_i32(cpu_gregs[ALTREG(B6_4)], cpu_T[0]);
1102 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1103 return;
1104 case 0x0082: /* stc Rm_BANK,Rn */
1105 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[ALTREG(B6_4)]);
1106 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
1107 return;
1108 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1109 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1110 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
1111 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[ALTREG(B6_4)]);
1112 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1113 gen_op_stl_T0_T1(ctx);
1114 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1115 return;
1116 }
1117
1118 switch (ctx->opcode & 0xf0ff) {
1119 case 0x0023: /* braf Rn */
1120 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1121 tcg_gen_addi_i32(cpu_delayed_pc, cpu_T[0], ctx->pc + 4);
1122 ctx->flags |= DELAY_SLOT;
1123 ctx->delayed_pc = (uint32_t) - 1;
1124 return;
1125 case 0x0003: /* bsrf Rn */
1126 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1127 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1128 tcg_gen_add_i32(cpu_delayed_pc, cpu_T[0], cpu_pr);
1129 ctx->flags |= DELAY_SLOT;
1130 ctx->delayed_pc = (uint32_t) - 1;
1131 return;
1132 case 0x4015: /* cmp/pl Rn */
1133 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1134 gen_cmp_imm(TCG_COND_GT, cpu_T[0], 0);
1135 return;
1136 case 0x4011: /* cmp/pz Rn */
1137 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1138 gen_cmp_imm(TCG_COND_GE, cpu_T[0], 0);
1139 return;
1140 case 0x4010: /* dt Rn */
1141 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 1);
1142 gen_cmp_imm(TCG_COND_EQ, cpu_gregs[REG(B11_8)], 0);
1143 return;
1144 case 0x402b: /* jmp @Rn */
1145 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1146 tcg_gen_mov_i32(cpu_delayed_pc, cpu_T[0]);
1147 ctx->flags |= DELAY_SLOT;
1148 ctx->delayed_pc = (uint32_t) - 1;
1149 return;
1150 case 0x400b: /* jsr @Rn */
1151 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1152 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1153 tcg_gen_mov_i32(cpu_delayed_pc, cpu_T[0]);
1154 ctx->flags |= DELAY_SLOT;
1155 ctx->delayed_pc = (uint32_t) - 1;
1156 return;
1157 #define LDST(reg,ldnum,ldpnum,ldop,stnum,stpnum,stop,extrald) \
1158 case ldnum: \
1159 tcg_gen_mov_i32 (cpu_T[0], cpu_gregs[REG(B11_8)]); \
1160 gen_op_##ldop##_T0_##reg (); \
1161 extrald \
1162 return; \
1163 case ldpnum: \
1164 tcg_gen_mov_i32 (cpu_T[0], cpu_gregs[REG(B11_8)]); \
1165 gen_op_ldl_T0_T0 (ctx); \
1166 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], \
1167 cpu_gregs[REG(B11_8)], 4); \
1168 gen_op_##ldop##_T0_##reg (); \
1169 extrald \
1170 return; \
1171 case stnum: \
1172 gen_op_##stop##_##reg##_T0 (); \
1173 tcg_gen_mov_i32 (cpu_gregs[REG(B11_8)], cpu_T[0]); \
1174 return; \
1175 case stpnum: \
1176 gen_op_##stop##_##reg##_T0 (); \
1177 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], \
1178 cpu_gregs[REG(B11_8)], 4); \
1179 tcg_gen_mov_i32 (cpu_T[1], cpu_gregs[REG(B11_8)]); \
1180 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], \
1181 cpu_gregs[REG(B11_8)], 4); \
1182 gen_op_stl_T0_T1 (ctx); \
1183 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], \
1184 cpu_gregs[REG(B11_8)], 4); \
1185 return;
1186 LDST(sr, 0x400e, 0x4007, ldc, 0x0002, 0x4003, stc, ctx->bstate =
1187 BS_STOP;)
1188 LDST(gbr, 0x401e, 0x4017, ldc, 0x0012, 0x4013, stc,)
1189 LDST(vbr, 0x402e, 0x4027, ldc, 0x0022, 0x4023, stc,)
1190 LDST(ssr, 0x403e, 0x4037, ldc, 0x0032, 0x4033, stc,)
1191 LDST(spc, 0x404e, 0x4047, ldc, 0x0042, 0x4043, stc,)
1192 LDST(dbr, 0x40fa, 0x40f6, ldc, 0x00fa, 0x40f2, stc,)
1193 LDST(mach, 0x400a, 0x4006, lds, 0x000a, 0x4002, sts,)
1194 LDST(macl, 0x401a, 0x4016, lds, 0x001a, 0x4012, sts,)
1195 LDST(pr, 0x402a, 0x4026, lds, 0x002a, 0x4022, sts,)
1196 LDST(fpul, 0x405a, 0x4056, lds, 0x005a, 0x4052, sts,)
1197 LDST(fpscr, 0x406a, 0x4066, lds, 0x006a, 0x4062, sts, ctx->bstate =
1198 BS_STOP;)
1199 case 0x00c3: /* movca.l R0,@Rm */
1200 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1201 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
1202 gen_op_stl_T0_T1(ctx);
1203 return;
1204 case 0x0029: /* movt Rn */
1205 tcg_gen_andi_i32(cpu_gregs[REG(B11_8)], cpu_sr, SR_T);
1206 return;
1207 case 0x0093: /* ocbi @Rn */
1208 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1209 gen_op_ldl_T0_T0(ctx);
1210 return;
1211 case 0x00a3: /* ocbp @Rn */
1212 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1213 gen_op_ldl_T0_T0(ctx);
1214 return;
1215 case 0x00b3: /* ocbwb @Rn */
1216 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1217 gen_op_ldl_T0_T0(ctx);
1218 return;
1219 case 0x0083: /* pref @Rn */
1220 return;
1221 case 0x4024: /* rotcl Rn */
1222 gen_op_rotcl_Rn(REG(B11_8));
1223 return;
1224 case 0x4025: /* rotcr Rn */
1225 gen_op_rotcr_Rn(REG(B11_8));
1226 return;
1227 case 0x4004: /* rotl Rn */
1228 gen_op_rotl_Rn(REG(B11_8));
1229 return;
1230 case 0x4005: /* rotr Rn */
1231 gen_op_rotr_Rn(REG(B11_8));
1232 return;
1233 case 0x4000: /* shll Rn */
1234 case 0x4020: /* shal Rn */
1235 gen_op_shal_Rn(REG(B11_8));
1236 return;
1237 case 0x4021: /* shar Rn */
1238 gen_op_shar_Rn(REG(B11_8));
1239 return;
1240 case 0x4001: /* shlr Rn */
1241 gen_op_shlr_Rn(REG(B11_8));
1242 return;
1243 case 0x4008: /* shll2 Rn */
1244 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
1245 return;
1246 case 0x4018: /* shll8 Rn */
1247 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
1248 return;
1249 case 0x4028: /* shll16 Rn */
1250 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 16);
1251 return;
1252 case 0x4009: /* shlr2 Rn */
1253 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
1254 return;
1255 case 0x4019: /* shlr8 Rn */
1256 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
1257 return;
1258 case 0x4029: /* shlr16 Rn */
1259 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 16);
1260 return;
1261 case 0x401b: /* tas.b @Rn */
1262 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1263 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1264 gen_op_ldub_T0_T0(ctx);
1265 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1266 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], 0x80);
1267 gen_op_stb_T0_T1(ctx);
1268 return;
1269 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1270 gen_op_movl_fpul_FT0();
1271 gen_op_fmov_FT0_frN(FREG(B11_8));
1272 return;
1273 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1274 gen_op_fmov_frN_FT0(FREG(B11_8));
1275 gen_op_movl_FT0_fpul();
1276 return;
1277 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1278 if (ctx->fpscr & FPSCR_PR) {
1279 if (ctx->opcode & 0x0100)
1280 break; /* illegal instruction */
1281 gen_op_float_DT();
1282 gen_op_fmov_DT0_drN(DREG(B11_8));
1283 }
1284 else {
1285 gen_op_float_FT();
1286 gen_op_fmov_FT0_frN(FREG(B11_8));
1287 }
1288 return;
1289 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1290 if (ctx->fpscr & FPSCR_PR) {
1291 if (ctx->opcode & 0x0100)
1292 break; /* illegal instruction */
1293 gen_op_fmov_drN_DT0(DREG(B11_8));
1294 gen_op_ftrc_DT();
1295 }
1296 else {
1297 gen_op_fmov_frN_FT0(FREG(B11_8));
1298 gen_op_ftrc_FT();
1299 }
1300 return;
1301 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1302 gen_op_fneg_frN(FREG(B11_8));
1303 return;
1304 case 0xf05d: /* fabs FRn/DRn */
1305 if (ctx->fpscr & FPSCR_PR) {
1306 if (ctx->opcode & 0x0100)
1307 break; /* illegal instruction */
1308 gen_op_fmov_drN_DT0(DREG(B11_8));
1309 gen_op_fabs_DT();
1310 gen_op_fmov_DT0_drN(DREG(B11_8));
1311 } else {
1312 gen_op_fmov_frN_FT0(FREG(B11_8));
1313 gen_op_fabs_FT();
1314 gen_op_fmov_FT0_frN(FREG(B11_8));
1315 }
1316 return;
1317 case 0xf06d: /* fsqrt FRn */
1318 if (ctx->fpscr & FPSCR_PR) {
1319 if (ctx->opcode & 0x0100)
1320 break; /* illegal instruction */
1321 gen_op_fmov_drN_DT0(FREG(B11_8));
1322 gen_op_fsqrt_DT();
1323 gen_op_fmov_DT0_drN(FREG(B11_8));
1324 } else {
1325 gen_op_fmov_frN_FT0(FREG(B11_8));
1326 gen_op_fsqrt_FT();
1327 gen_op_fmov_FT0_frN(FREG(B11_8));
1328 }
1329 return;
1330 case 0xf07d: /* fsrra FRn */
1331 break;
1332 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1333 if (!(ctx->fpscr & FPSCR_PR)) {
1334 tcg_gen_movi_i32(cpu_T[0], 0);
1335 gen_op_fmov_T0_frN(FREG(B11_8));
1336 return;
1337 }
1338 break;
1339 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1340 if (!(ctx->fpscr & FPSCR_PR)) {
1341 tcg_gen_movi_i32(cpu_T[0], 0x3f800000);
1342 gen_op_fmov_T0_frN(FREG(B11_8));
1343 return;
1344 }
1345 break;
1346 case 0xf0ad: /* fcnvsd FPUL,DRn */
1347 gen_op_movl_fpul_FT0();
1348 gen_op_fcnvsd_FT_DT();
1349 gen_op_fmov_DT0_drN(DREG(B11_8));
1350 return;
1351 case 0xf0bd: /* fcnvds DRn,FPUL */
1352 gen_op_fmov_drN_DT0(DREG(B11_8));
1353 gen_op_fcnvds_DT_FT();
1354 gen_op_movl_FT0_fpul();
1355 return;
1356 }
1357
1358 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1359 ctx->opcode, ctx->pc);
1360 tcg_gen_helper_0_0(helper_raise_illegal_instruction);
1361 ctx->bstate = BS_EXCP;
1362 }
1363
1364 void decode_opc(DisasContext * ctx)
1365 {
1366 uint32_t old_flags = ctx->flags;
1367
1368 _decode_opc(ctx);
1369
1370 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1371 if (ctx->flags & DELAY_SLOT_CLEARME) {
1372 gen_store_flags(0);
1373 } else {
1374 /* go out of the delay slot */
1375 uint32_t new_flags = ctx->flags;
1376 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1377 gen_store_flags(new_flags);
1378 }
1379 ctx->flags = 0;
1380 ctx->bstate = BS_BRANCH;
1381 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1382 gen_delayed_conditional_jump(ctx);
1383 } else if (old_flags & DELAY_SLOT) {
1384 gen_jump(ctx);
1385 }
1386
1387 }
1388
1389 /* go into a delay slot */
1390 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1391 gen_store_flags(ctx->flags);
1392 }
1393
1394 static inline void
1395 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1396 int search_pc)
1397 {
1398 DisasContext ctx;
1399 target_ulong pc_start;
1400 static uint16_t *gen_opc_end;
1401 int i, ii;
1402 int num_insns;
1403 int max_insns;
1404
1405 pc_start = tb->pc;
1406 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1407 ctx.pc = pc_start;
1408 ctx.flags = (uint32_t)tb->flags;
1409 ctx.bstate = BS_NONE;
1410 ctx.sr = env->sr;
1411 ctx.fpscr = env->fpscr;
1412 ctx.memidx = (env->sr & SR_MD) ? 1 : 0;
1413 /* We don't know if the delayed pc came from a dynamic or static branch,
1414 so assume it is a dynamic branch. */
1415 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1416 ctx.tb = tb;
1417 ctx.singlestep_enabled = env->singlestep_enabled;
1418
1419 #ifdef DEBUG_DISAS
1420 if (loglevel & CPU_LOG_TB_CPU) {
1421 fprintf(logfile,
1422 "------------------------------------------------\n");
1423 cpu_dump_state(env, logfile, fprintf, 0);
1424 }
1425 #endif
1426
1427 ii = -1;
1428 num_insns = 0;
1429 max_insns = tb->cflags & CF_COUNT_MASK;
1430 if (max_insns == 0)
1431 max_insns = CF_COUNT_MASK;
1432 gen_icount_start();
1433 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1434 if (env->nb_breakpoints > 0) {
1435 for (i = 0; i < env->nb_breakpoints; i++) {
1436 if (ctx.pc == env->breakpoints[i]) {
1437 /* We have hit a breakpoint - make sure PC is up-to-date */
1438 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1439 tcg_gen_helper_0_0(helper_debug);
1440 ctx.bstate = BS_EXCP;
1441 break;
1442 }
1443 }
1444 }
1445 if (search_pc) {
1446 i = gen_opc_ptr - gen_opc_buf;
1447 if (ii < i) {
1448 ii++;
1449 while (ii < i)
1450 gen_opc_instr_start[ii++] = 0;
1451 }
1452 gen_opc_pc[ii] = ctx.pc;
1453 gen_opc_hflags[ii] = ctx.flags;
1454 gen_opc_instr_start[ii] = 1;
1455 gen_opc_icount[ii] = num_insns;
1456 }
1457 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1458 gen_io_start();
1459 #if 0
1460 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1461 fflush(stderr);
1462 #endif
1463 ctx.opcode = lduw_code(ctx.pc);
1464 decode_opc(&ctx);
1465 num_insns++;
1466 ctx.pc += 2;
1467 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1468 break;
1469 if (env->singlestep_enabled)
1470 break;
1471 if (num_insns >= max_insns)
1472 break;
1473 #ifdef SH4_SINGLE_STEP
1474 break;
1475 #endif
1476 }
1477 if (tb->cflags & CF_LAST_IO)
1478 gen_io_end();
1479 if (env->singlestep_enabled) {
1480 tcg_gen_helper_0_0(helper_debug);
1481 } else {
1482 switch (ctx.bstate) {
1483 case BS_STOP:
1484 /* gen_op_interrupt_restart(); */
1485 /* fall through */
1486 case BS_NONE:
1487 if (ctx.flags) {
1488 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1489 }
1490 gen_goto_tb(&ctx, 0, ctx.pc);
1491 break;
1492 case BS_EXCP:
1493 /* gen_op_interrupt_restart(); */
1494 tcg_gen_exit_tb(0);
1495 break;
1496 case BS_BRANCH:
1497 default:
1498 break;
1499 }
1500 }
1501
1502 gen_icount_end(tb, num_insns);
1503 *gen_opc_ptr = INDEX_op_end;
1504 if (search_pc) {
1505 i = gen_opc_ptr - gen_opc_buf;
1506 ii++;
1507 while (ii <= i)
1508 gen_opc_instr_start[ii++] = 0;
1509 } else {
1510 tb->size = ctx.pc - pc_start;
1511 tb->icount = num_insns;
1512 }
1513
1514 #ifdef DEBUG_DISAS
1515 #ifdef SH4_DEBUG_DISAS
1516 if (loglevel & CPU_LOG_TB_IN_ASM)
1517 fprintf(logfile, "\n");
1518 #endif
1519 if (loglevel & CPU_LOG_TB_IN_ASM) {
1520 fprintf(logfile, "IN:\n"); /* , lookup_symbol(pc_start)); */
1521 target_disas(logfile, pc_start, ctx.pc - pc_start, 0);
1522 fprintf(logfile, "\n");
1523 }
1524 #endif
1525 }
1526
1527 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
1528 {
1529 gen_intermediate_code_internal(env, tb, 0);
1530 }
1531
1532 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
1533 {
1534 gen_intermediate_code_internal(env, tb, 1);
1535 }
1536
1537 void gen_pc_load(CPUState *env, TranslationBlock *tb,
1538 unsigned long searched_pc, int pc_pos, void *puc)
1539 {
1540 env->pc = gen_opc_pc[pc_pos];
1541 env->flags = gen_opc_hflags[pc_pos];
1542 }