]> git.proxmox.com Git - qemu.git/blob - target-sh4/translate.c
87c3f43ac839c409ce4c77f61b3cb54124d8b940
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <assert.h>
26
27 #define DEBUG_DISAS
28 #define SH4_DEBUG_DISAS
29 //#define SH4_SINGLE_STEP
30
31 #include "cpu.h"
32 #include "exec-all.h"
33 #include "disas.h"
34 #include "helper.h"
35 #include "tcg-op.h"
36 #include "qemu-common.h"
37
38 typedef struct DisasContext {
39 struct TranslationBlock *tb;
40 target_ulong pc;
41 uint32_t sr;
42 uint32_t fpscr;
43 uint16_t opcode;
44 uint32_t flags;
45 int bstate;
46 int memidx;
47 uint32_t delayed_pc;
48 int singlestep_enabled;
49 } DisasContext;
50
51 enum {
52 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
53 * exception condition
54 */
55 BS_STOP = 1, /* We want to stop translation for any reason */
56 BS_BRANCH = 2, /* We reached a branch condition */
57 BS_EXCP = 3, /* We reached an exception condition */
58 };
59
60 /* global register indexes */
61 static TCGv cpu_env;
62 static TCGv cpu_gregs[24];
63 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
64 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
65 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_flags;
66
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
69
70 /* dyngen register indexes */
71 static TCGv cpu_T[2];
72
73 #include "gen-icount.h"
74
75 static void sh4_translate_init(void)
76 {
77 int i;
78 static int done_init = 0;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 };
86
87 if (done_init)
88 return;
89
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
92 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
93
94 for (i = 0; i < 24; i++)
95 cpu_gregs[i] = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
96 offsetof(CPUState, gregs[i]),
97 gregnames[i]);
98
99 cpu_pc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
100 offsetof(CPUState, pc), "PC");
101 cpu_sr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
102 offsetof(CPUState, sr), "SR");
103 cpu_ssr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
104 offsetof(CPUState, ssr), "SSR");
105 cpu_spc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
106 offsetof(CPUState, spc), "SPC");
107 cpu_gbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
108 offsetof(CPUState, gbr), "GBR");
109 cpu_vbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
110 offsetof(CPUState, vbr), "VBR");
111 cpu_sgr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
112 offsetof(CPUState, sgr), "SGR");
113 cpu_dbr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
114 offsetof(CPUState, dbr), "DBR");
115 cpu_mach = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
116 offsetof(CPUState, mach), "MACH");
117 cpu_macl = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
118 offsetof(CPUState, macl), "MACL");
119 cpu_pr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
120 offsetof(CPUState, pr), "PR");
121 cpu_fpscr = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
122 offsetof(CPUState, fpscr), "FPSCR");
123 cpu_fpul = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
124 offsetof(CPUState, fpul), "FPUL");
125
126 cpu_flags = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
127 offsetof(CPUState, flags), "_flags_");
128 cpu_delayed_pc = tcg_global_mem_new(TCG_TYPE_I32, TCG_AREG0,
129 offsetof(CPUState, delayed_pc),
130 "_delayed_pc_");
131
132 /* register helpers */
133 #undef DEF_HELPER
134 #define DEF_HELPER(ret, name, params) tcg_register_helper(name, #name);
135 #include "helper.h"
136
137 done_init = 1;
138 }
139
140 #ifdef CONFIG_USER_ONLY
141
142 #define GEN_OP_LD(width, reg) \
143 void gen_op_ld##width##_T0_##reg (DisasContext *ctx) { \
144 gen_op_ld##width##_T0_##reg##_raw(); \
145 }
146 #define GEN_OP_ST(width, reg) \
147 void gen_op_st##width##_##reg##_T1 (DisasContext *ctx) { \
148 gen_op_st##width##_##reg##_T1_raw(); \
149 }
150
151 #else
152
153 #define GEN_OP_LD(width, reg) \
154 void gen_op_ld##width##_T0_##reg (DisasContext *ctx) { \
155 if (ctx->memidx) gen_op_ld##width##_T0_##reg##_kernel(); \
156 else gen_op_ld##width##_T0_##reg##_user();\
157 }
158 #define GEN_OP_ST(width, reg) \
159 void gen_op_st##width##_##reg##_T1 (DisasContext *ctx) { \
160 if (ctx->memidx) gen_op_st##width##_##reg##_T1_kernel(); \
161 else gen_op_st##width##_##reg##_T1_user();\
162 }
163
164 #endif
165
166 GEN_OP_LD(ub, T0)
167 GEN_OP_LD(b, T0)
168 GEN_OP_ST(b, T0)
169 GEN_OP_LD(uw, T0)
170 GEN_OP_LD(w, T0)
171 GEN_OP_ST(w, T0)
172 GEN_OP_LD(l, T0)
173 GEN_OP_ST(l, T0)
174 GEN_OP_LD(fl, FT0)
175 GEN_OP_ST(fl, FT0)
176 GEN_OP_LD(fq, DT0)
177 GEN_OP_ST(fq, DT0)
178
179 void cpu_dump_state(CPUState * env, FILE * f,
180 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
181 int flags)
182 {
183 int i;
184 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
185 env->pc, env->sr, env->pr, env->fpscr);
186 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
187 env->spc, env->ssr, env->gbr, env->vbr);
188 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
189 env->sgr, env->dbr, env->delayed_pc, env->fpul);
190 for (i = 0; i < 24; i += 4) {
191 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
192 i, env->gregs[i], i + 1, env->gregs[i + 1],
193 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
194 }
195 if (env->flags & DELAY_SLOT) {
196 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
197 env->delayed_pc);
198 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
199 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
200 env->delayed_pc);
201 }
202 }
203
204 void cpu_sh4_reset(CPUSH4State * env)
205 {
206 #if defined(CONFIG_USER_ONLY)
207 env->sr = SR_FD; /* FD - kernel does lazy fpu context switch */
208 #else
209 env->sr = 0x700000F0; /* MD, RB, BL, I3-I0 */
210 #endif
211 env->vbr = 0;
212 env->pc = 0xA0000000;
213 #if defined(CONFIG_USER_ONLY)
214 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
215 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
216 #else
217 env->fpscr = 0x00040001; /* CPU reset value according to SH4 manual */
218 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
219 #endif
220 env->mmucr = 0;
221 }
222
223 CPUSH4State *cpu_sh4_init(const char *cpu_model)
224 {
225 CPUSH4State *env;
226
227 env = qemu_mallocz(sizeof(CPUSH4State));
228 if (!env)
229 return NULL;
230 cpu_exec_init(env);
231 sh4_translate_init();
232 cpu_sh4_reset(env);
233 tlb_flush(env, 1);
234 return env;
235 }
236
237 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
238 {
239 TranslationBlock *tb;
240 tb = ctx->tb;
241
242 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
243 !ctx->singlestep_enabled) {
244 /* Use a direct jump if in same page and singlestep not enabled */
245 tcg_gen_goto_tb(n);
246 tcg_gen_movi_i32(cpu_pc, dest);
247 tcg_gen_exit_tb((long) tb + n);
248 } else {
249 tcg_gen_movi_i32(cpu_pc, dest);
250 if (ctx->singlestep_enabled)
251 gen_op_debug();
252 tcg_gen_exit_tb(0);
253 }
254 }
255
256 static void gen_jump(DisasContext * ctx)
257 {
258 if (ctx->delayed_pc == (uint32_t) - 1) {
259 /* Target is not statically known, it comes necessarily from a
260 delayed jump as immediate jump are conditinal jumps */
261 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
262 if (ctx->singlestep_enabled)
263 gen_op_debug();
264 tcg_gen_exit_tb(0);
265 } else {
266 gen_goto_tb(ctx, 0, ctx->delayed_pc);
267 }
268 }
269
270 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
271 {
272 int label = gen_new_label();
273 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
274 tcg_gen_andi_i32(cpu_T[0], cpu_sr, SR_T);
275 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], t ? SR_T : 0, label);
276 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
277 gen_set_label(label);
278 }
279
280 /* Immediate conditional jump (bt or bf) */
281 static void gen_conditional_jump(DisasContext * ctx,
282 target_ulong ift, target_ulong ifnott)
283 {
284 int l1;
285
286 l1 = gen_new_label();
287 tcg_gen_andi_i32(cpu_T[0], cpu_sr, SR_T);
288 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_T[0], SR_T, l1);
289 gen_goto_tb(ctx, 0, ifnott);
290 gen_set_label(l1);
291 gen_goto_tb(ctx, 1, ift);
292 }
293
294 /* Delayed conditional jump (bt or bf) */
295 static void gen_delayed_conditional_jump(DisasContext * ctx)
296 {
297 int l1;
298
299 l1 = gen_new_label();
300 tcg_gen_andi_i32(cpu_T[0], cpu_flags, DELAY_SLOT_TRUE);
301 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_T[0], DELAY_SLOT_TRUE, l1);
302 gen_goto_tb(ctx, 1, ctx->pc + 2);
303 gen_set_label(l1);
304 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
305 gen_jump(ctx);
306 }
307
308 static inline void gen_set_t(void)
309 {
310 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
311 }
312
313 static inline void gen_clr_t(void)
314 {
315 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
316 }
317
318 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
319 {
320 int label1 = gen_new_label();
321 int label2 = gen_new_label();
322 tcg_gen_brcond_i32(cond, t1, t0, label1);
323 gen_clr_t();
324 tcg_gen_br(label2);
325 gen_set_label(label1);
326 gen_set_t();
327 gen_set_label(label2);
328 }
329
330 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
331 {
332 int label1 = gen_new_label();
333 int label2 = gen_new_label();
334 tcg_gen_brcondi_i32(cond, t0, imm, label1);
335 gen_clr_t();
336 tcg_gen_br(label2);
337 gen_set_label(label1);
338 gen_set_t();
339 gen_set_label(label2);
340 }
341
342 static inline void gen_store_flags(uint32_t flags)
343 {
344 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
345 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
346 }
347
348 #define B3_0 (ctx->opcode & 0xf)
349 #define B6_4 ((ctx->opcode >> 4) & 0x7)
350 #define B7_4 ((ctx->opcode >> 4) & 0xf)
351 #define B7_0 (ctx->opcode & 0xff)
352 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
353 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
354 (ctx->opcode & 0xfff))
355 #define B11_8 ((ctx->opcode >> 8) & 0xf)
356 #define B15_12 ((ctx->opcode >> 12) & 0xf)
357
358 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
359 (x) + 16 : (x))
360
361 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
362 ? (x) + 16 : (x))
363
364 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
365 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
366 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
367 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
368
369 #define CHECK_NOT_DELAY_SLOT \
370 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
371 {gen_op_raise_slot_illegal_instruction (); ctx->bstate = BS_EXCP; \
372 return;}
373
374 void _decode_opc(DisasContext * ctx)
375 {
376 #if 0
377 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
378 #endif
379 switch (ctx->opcode) {
380 case 0x0019: /* div0u */
381 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
382 return;
383 case 0x000b: /* rts */
384 CHECK_NOT_DELAY_SLOT
385 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
386 ctx->flags |= DELAY_SLOT;
387 ctx->delayed_pc = (uint32_t) - 1;
388 return;
389 case 0x0028: /* clrmac */
390 tcg_gen_movi_i32(cpu_mach, 0);
391 tcg_gen_movi_i32(cpu_macl, 0);
392 return;
393 case 0x0048: /* clrs */
394 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
395 return;
396 case 0x0008: /* clrt */
397 gen_clr_t();
398 return;
399 case 0x0038: /* ldtlb */
400 #if defined(CONFIG_USER_ONLY)
401 assert(0); /* XXXXX */
402 #else
403 gen_op_ldtlb();
404 #endif
405 return;
406 case 0x002b: /* rte */
407 CHECK_NOT_DELAY_SLOT
408 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
409 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
410 ctx->flags |= DELAY_SLOT;
411 ctx->delayed_pc = (uint32_t) - 1;
412 return;
413 case 0x0058: /* sets */
414 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
415 return;
416 case 0x0018: /* sett */
417 gen_set_t();
418 return;
419 case 0xfbfd: /* frchg */
420 gen_op_frchg();
421 ctx->bstate = BS_STOP;
422 return;
423 case 0xf3fd: /* fschg */
424 gen_op_fschg();
425 ctx->bstate = BS_STOP;
426 return;
427 case 0x0009: /* nop */
428 return;
429 case 0x001b: /* sleep */
430 if (ctx->memidx) {
431 gen_op_sleep();
432 } else {
433 gen_op_raise_illegal_instruction();
434 ctx->bstate = BS_EXCP;
435 }
436 return;
437 }
438
439 switch (ctx->opcode & 0xf000) {
440 case 0x1000: /* mov.l Rm,@(disp,Rn) */
441 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
442 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
443 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0 * 4);
444 gen_op_stl_T0_T1(ctx);
445 return;
446 case 0x5000: /* mov.l @(disp,Rm),Rn */
447 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
448 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0 * 4);
449 gen_op_ldl_T0_T0(ctx);
450 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
451 return;
452 case 0xe000: /* mov #imm,Rn */
453 tcg_gen_movi_i32(cpu_gregs[REG(B11_8)], B7_0s);
454 return;
455 case 0x9000: /* mov.w @(disp,PC),Rn */
456 tcg_gen_movi_i32(cpu_T[0], ctx->pc + 4 + B7_0 * 2);
457 gen_op_ldw_T0_T0(ctx);
458 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
459 return;
460 case 0xd000: /* mov.l @(disp,PC),Rn */
461 tcg_gen_movi_i32(cpu_T[0], (ctx->pc + 4 + B7_0 * 4) & ~3);
462 gen_op_ldl_T0_T0(ctx);
463 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
464 return;
465 case 0x7000: /* add #imm,Rn */
466 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], B7_0s);
467 return;
468 case 0xa000: /* bra disp */
469 CHECK_NOT_DELAY_SLOT
470 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
471 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
472 ctx->flags |= DELAY_SLOT;
473 return;
474 case 0xb000: /* bsr disp */
475 CHECK_NOT_DELAY_SLOT
476 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
477 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
478 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
479 ctx->flags |= DELAY_SLOT;
480 return;
481 }
482
483 switch (ctx->opcode & 0xf00f) {
484 case 0x6003: /* mov Rm,Rn */
485 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
486 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
487 return;
488 case 0x2000: /* mov.b Rm,@Rn */
489 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
490 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
491 gen_op_stb_T0_T1(ctx);
492 return;
493 case 0x2001: /* mov.w Rm,@Rn */
494 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
495 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
496 gen_op_stw_T0_T1(ctx);
497 return;
498 case 0x2002: /* mov.l Rm,@Rn */
499 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
500 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
501 gen_op_stl_T0_T1(ctx);
502 return;
503 case 0x6000: /* mov.b @Rm,Rn */
504 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
505 gen_op_ldb_T0_T0(ctx);
506 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
507 return;
508 case 0x6001: /* mov.w @Rm,Rn */
509 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
510 gen_op_ldw_T0_T0(ctx);
511 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
512 return;
513 case 0x6002: /* mov.l @Rm,Rn */
514 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
515 gen_op_ldl_T0_T0(ctx);
516 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
517 return;
518 case 0x2004: /* mov.b Rm,@-Rn */
519 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
520 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
521 cpu_gregs[REG(B11_8)], 1); /* modify register status */
522 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
523 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)],
524 cpu_gregs[REG(B11_8)], 1); /* recover register status */
525 gen_op_stb_T0_T1(ctx); /* might cause re-execution */
526 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
527 cpu_gregs[REG(B11_8)], 1); /* modify register status */
528 return;
529 case 0x2005: /* mov.w Rm,@-Rn */
530 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
531 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
532 cpu_gregs[REG(B11_8)], 2);
533 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
534 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)],
535 cpu_gregs[REG(B11_8)], 2);
536 gen_op_stw_T0_T1(ctx);
537 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
538 cpu_gregs[REG(B11_8)], 2);
539 return;
540 case 0x2006: /* mov.l Rm,@-Rn */
541 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
542 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
543 cpu_gregs[REG(B11_8)], 4);
544 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
545 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)],
546 cpu_gregs[REG(B11_8)], 4);
547 gen_op_stl_T0_T1(ctx);
548 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)],
549 cpu_gregs[REG(B11_8)], 4);
550 return;
551 case 0x6004: /* mov.b @Rm+,Rn */
552 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
553 gen_op_ldb_T0_T0(ctx);
554 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
555 if ( B11_8 != B7_4 )
556 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
557 cpu_gregs[REG(B7_4)], 1);
558 return;
559 case 0x6005: /* mov.w @Rm+,Rn */
560 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
561 gen_op_ldw_T0_T0(ctx);
562 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
563 if ( B11_8 != B7_4 )
564 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
565 cpu_gregs[REG(B7_4)], 2);
566 return;
567 case 0x6006: /* mov.l @Rm+,Rn */
568 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
569 gen_op_ldl_T0_T0(ctx);
570 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
571 if ( B11_8 != B7_4 )
572 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
573 cpu_gregs[REG(B7_4)], 4);
574 return;
575 case 0x0004: /* mov.b Rm,@(R0,Rn) */
576 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
577 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
578 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
579 gen_op_stb_T0_T1(ctx);
580 return;
581 case 0x0005: /* mov.w Rm,@(R0,Rn) */
582 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
583 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
584 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
585 gen_op_stw_T0_T1(ctx);
586 return;
587 case 0x0006: /* mov.l Rm,@(R0,Rn) */
588 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
589 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
590 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
591 gen_op_stl_T0_T1(ctx);
592 return;
593 case 0x000c: /* mov.b @(R0,Rm),Rn */
594 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
595 gen_op_ldb_T0_T0(ctx);
596 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
597 return;
598 case 0x000d: /* mov.w @(R0,Rm),Rn */
599 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
600 gen_op_ldw_T0_T0(ctx);
601 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
602 return;
603 case 0x000e: /* mov.l @(R0,Rm),Rn */
604 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
605 gen_op_ldl_T0_T0(ctx);
606 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
607 return;
608 case 0x6008: /* swap.b Rm,Rn */
609 tcg_gen_andi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)], 0xffff0000);
610 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xff);
611 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 8);
612 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_T[0]);
613 tcg_gen_shri_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 8);
614 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
615 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_T[0]);
616 return;
617 case 0x6009: /* swap.w Rm,Rn */
618 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xffff);
619 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 16);
620 tcg_gen_shri_i32(cpu_T[1], cpu_gregs[REG(B7_4)], 16);
621 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
622 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_T[0], cpu_T[1]);
623 return;
624 case 0x200d: /* xtrct Rm,Rn */
625 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(B7_4)], 0xffff);
626 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 16);
627 tcg_gen_shri_i32(cpu_T[1], cpu_gregs[REG(B11_8)], 16);
628 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
629 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_T[0], cpu_T[1]);
630 return;
631 case 0x300c: /* add Rm,Rn */
632 tcg_gen_add_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
633 return;
634 case 0x300e: /* addc Rm,Rn */
635 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
636 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
637 gen_op_addc_T0_T1();
638 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
639 return;
640 case 0x300f: /* addv Rm,Rn */
641 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
642 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
643 gen_op_addv_T0_T1();
644 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
645 return;
646 case 0x2009: /* and Rm,Rn */
647 tcg_gen_and_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
648 return;
649 case 0x3000: /* cmp/eq Rm,Rn */
650 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
651 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
652 gen_cmp(TCG_COND_EQ, cpu_T[0], cpu_T[1]);
653 return;
654 case 0x3003: /* cmp/ge Rm,Rn */
655 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
656 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
657 gen_cmp(TCG_COND_GE, cpu_T[0], cpu_T[1]);
658 return;
659 case 0x3007: /* cmp/gt Rm,Rn */
660 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
661 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
662 gen_cmp(TCG_COND_GT, cpu_T[0], cpu_T[1]);
663 return;
664 case 0x3006: /* cmp/hi Rm,Rn */
665 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
666 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
667 gen_cmp(TCG_COND_GTU, cpu_T[0], cpu_T[1]);
668 return;
669 case 0x3002: /* cmp/hs Rm,Rn */
670 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
671 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
672 gen_cmp(TCG_COND_GEU, cpu_T[0], cpu_T[1]);
673 return;
674 case 0x200c: /* cmp/str Rm,Rn */
675 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
676 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
677 gen_op_cmp_str_T0_T1();
678 return;
679 case 0x2007: /* div0s Rm,Rn */
680 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
681 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
682 gen_op_div0s_T0_T1();
683 return;
684 case 0x3004: /* div1 Rm,Rn */
685 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
686 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
687 gen_op_div1_T0_T1();
688 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
689 return;
690 case 0x300d: /* dmuls.l Rm,Rn */
691 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
692 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
693 gen_op_dmulsl_T0_T1();
694 return;
695 case 0x3005: /* dmulu.l Rm,Rn */
696 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
697 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
698 gen_op_dmulul_T0_T1();
699 return;
700 case 0x600e: /* exts.b Rm,Rn */
701 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
702 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
703 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
704 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
705 return;
706 case 0x600f: /* exts.w Rm,Rn */
707 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
708 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
709 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
710 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
711 return;
712 case 0x600c: /* extu.b Rm,Rn */
713 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
714 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xff);
715 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
716 return;
717 case 0x600d: /* extu.w Rm,Rn */
718 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
719 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
720 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
721 return;
722 case 0x000f: /* mac.l @Rm+,@Rn+ */
723 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
724 gen_op_ldl_T0_T0(ctx);
725 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
726 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
727 gen_op_ldl_T0_T0(ctx);
728 gen_op_macl_T0_T1();
729 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)], cpu_gregs[REG(B7_4)], 4);
730 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
731 return;
732 case 0x400f: /* mac.w @Rm+,@Rn+ */
733 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
734 gen_op_ldl_T0_T0(ctx);
735 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
736 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
737 gen_op_ldl_T0_T0(ctx);
738 gen_op_macw_T0_T1();
739 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
740 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)], cpu_gregs[REG(B7_4)], 2);
741 return;
742 case 0x0007: /* mul.l Rm,Rn */
743 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
744 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
745 gen_op_mull_T0_T1();
746 return;
747 case 0x200f: /* muls.w Rm,Rn */
748 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
749 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
750 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
751 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
752 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
753 tcg_gen_ext16s_i32(cpu_T[1], cpu_T[1]);
754 gen_op_mulsw_T0_T1();
755 return;
756 case 0x200e: /* mulu.w Rm,Rn */
757 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
758 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
759 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
760 tcg_gen_andi_i32(cpu_T[1], cpu_T[1], 0xffff);
761 gen_op_muluw_T0_T1();
762 return;
763 case 0x600b: /* neg Rm,Rn */
764 tcg_gen_neg_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
765 return;
766 case 0x600a: /* negc Rm,Rn */
767 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
768 gen_op_negc_T0();
769 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
770 return;
771 case 0x6007: /* not Rm,Rn */
772 tcg_gen_not_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
773 return;
774 case 0x200b: /* or Rm,Rn */
775 tcg_gen_or_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
776 return;
777 case 0x400c: /* shad Rm,Rn */
778 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
779 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
780 gen_op_shad_T0_T1();
781 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
782 return;
783 case 0x400d: /* shld Rm,Rn */
784 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
785 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
786 gen_op_shld_T0_T1();
787 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
788 return;
789 case 0x3008: /* sub Rm,Rn */
790 tcg_gen_sub_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
791 return;
792 case 0x300a: /* subc Rm,Rn */
793 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
794 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
795 gen_op_subc_T0_T1();
796 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
797 return;
798 case 0x300b: /* subv Rm,Rn */
799 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
800 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
801 gen_op_subv_T0_T1();
802 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[1]);
803 return;
804 case 0x2008: /* tst Rm,Rn */
805 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
806 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
807 tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]);
808 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
809 return;
810 case 0x200a: /* xor Rm,Rn */
811 tcg_gen_xor_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], cpu_gregs[REG(B7_4)]);
812 return;
813 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
814 if (ctx->fpscr & FPSCR_SZ) {
815 gen_op_fmov_drN_DT0(XREG(B7_4));
816 gen_op_fmov_DT0_drN(XREG(B11_8));
817 } else {
818 gen_op_fmov_frN_FT0(FREG(B7_4));
819 gen_op_fmov_FT0_frN(FREG(B11_8));
820 }
821 return;
822 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
823 if (ctx->fpscr & FPSCR_SZ) {
824 gen_op_fmov_drN_DT0(XREG(B7_4));
825 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
826 gen_op_stfq_DT0_T1(ctx);
827 } else {
828 gen_op_fmov_frN_FT0(FREG(B7_4));
829 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
830 gen_op_stfl_FT0_T1(ctx);
831 }
832 return;
833 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
834 if (ctx->fpscr & FPSCR_SZ) {
835 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
836 gen_op_ldfq_T0_DT0(ctx);
837 gen_op_fmov_DT0_drN(XREG(B11_8));
838 } else {
839 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
840 gen_op_ldfl_T0_FT0(ctx);
841 gen_op_fmov_FT0_frN(FREG(B11_8));
842 }
843 return;
844 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
845 if (ctx->fpscr & FPSCR_SZ) {
846 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
847 gen_op_ldfq_T0_DT0(ctx);
848 gen_op_fmov_DT0_drN(XREG(B11_8));
849 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
850 cpu_gregs[REG(B7_4)], 8);
851 } else {
852 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
853 gen_op_ldfl_T0_FT0(ctx);
854 gen_op_fmov_FT0_frN(FREG(B11_8));
855 tcg_gen_addi_i32(cpu_gregs[REG(B7_4)],
856 cpu_gregs[REG(B7_4)], 4);
857 }
858 return;
859 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
860 if (ctx->fpscr & FPSCR_SZ) {
861 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
862 gen_op_fmov_drN_DT0(XREG(B7_4));
863 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
864 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
865 gen_op_stfq_DT0_T1(ctx);
866 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
867 } else {
868 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
869 gen_op_fmov_frN_FT0(FREG(B7_4));
870 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
871 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
872 gen_op_stfl_FT0_T1(ctx);
873 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
874 }
875 return;
876 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
877 tcg_gen_add_i32(cpu_T[0], cpu_gregs[REG(B7_4)], cpu_gregs[REG(0)]);
878 if (ctx->fpscr & FPSCR_SZ) {
879 gen_op_ldfq_T0_DT0(ctx);
880 gen_op_fmov_DT0_drN(XREG(B11_8));
881 } else {
882 gen_op_ldfl_T0_FT0(ctx);
883 gen_op_fmov_FT0_frN(FREG(B11_8));
884 }
885 return;
886 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
887 if (ctx->fpscr & FPSCR_SZ) {
888 gen_op_fmov_drN_DT0(XREG(B7_4));
889 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
890 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
891 gen_op_stfq_DT0_T1(ctx);
892 } else {
893 gen_op_fmov_frN_FT0(FREG(B7_4));
894 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
895 tcg_gen_add_i32(cpu_T[1], cpu_T[1], cpu_gregs[REG(0)]);
896 gen_op_stfl_FT0_T1(ctx);
897 }
898 return;
899 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
900 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
901 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
902 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
903 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
904 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
905 if (ctx->fpscr & FPSCR_PR) {
906 if (ctx->opcode & 0x0110)
907 break; /* illegal instruction */
908 gen_op_fmov_drN_DT1(DREG(B7_4));
909 gen_op_fmov_drN_DT0(DREG(B11_8));
910 }
911 else {
912 gen_op_fmov_frN_FT1(FREG(B7_4));
913 gen_op_fmov_frN_FT0(FREG(B11_8));
914 }
915
916 switch (ctx->opcode & 0xf00f) {
917 case 0xf000: /* fadd Rm,Rn */
918 ctx->fpscr & FPSCR_PR ? gen_op_fadd_DT() : gen_op_fadd_FT();
919 break;
920 case 0xf001: /* fsub Rm,Rn */
921 ctx->fpscr & FPSCR_PR ? gen_op_fsub_DT() : gen_op_fsub_FT();
922 break;
923 case 0xf002: /* fmul Rm,Rn */
924 ctx->fpscr & FPSCR_PR ? gen_op_fmul_DT() : gen_op_fmul_FT();
925 break;
926 case 0xf003: /* fdiv Rm,Rn */
927 ctx->fpscr & FPSCR_PR ? gen_op_fdiv_DT() : gen_op_fdiv_FT();
928 break;
929 case 0xf004: /* fcmp/eq Rm,Rn */
930 ctx->fpscr & FPSCR_PR ? gen_op_fcmp_eq_DT() : gen_op_fcmp_eq_FT();
931 return;
932 case 0xf005: /* fcmp/gt Rm,Rn */
933 ctx->fpscr & FPSCR_PR ? gen_op_fcmp_gt_DT() : gen_op_fcmp_gt_FT();
934 return;
935 }
936
937 if (ctx->fpscr & FPSCR_PR) {
938 gen_op_fmov_DT0_drN(DREG(B11_8));
939 }
940 else {
941 gen_op_fmov_FT0_frN(FREG(B11_8));
942 }
943 return;
944 }
945
946 switch (ctx->opcode & 0xff00) {
947 case 0xc900: /* and #imm,R0 */
948 tcg_gen_andi_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
949 return;
950 case 0xcd00: /* and.b #imm,@(R0,GBR) */
951 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
952 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
953 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
954 gen_op_ldub_T0_T0(ctx);
955 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], B7_0);
956 gen_op_stb_T0_T1(ctx);
957 return;
958 case 0x8b00: /* bf label */
959 CHECK_NOT_DELAY_SLOT
960 gen_conditional_jump(ctx, ctx->pc + 2,
961 ctx->pc + 4 + B7_0s * 2);
962 ctx->bstate = BS_BRANCH;
963 return;
964 case 0x8f00: /* bf/s label */
965 CHECK_NOT_DELAY_SLOT
966 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
967 ctx->flags |= DELAY_SLOT_CONDITIONAL;
968 return;
969 case 0x8900: /* bt label */
970 CHECK_NOT_DELAY_SLOT
971 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
972 ctx->pc + 2);
973 ctx->bstate = BS_BRANCH;
974 return;
975 case 0x8d00: /* bt/s label */
976 CHECK_NOT_DELAY_SLOT
977 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
978 ctx->flags |= DELAY_SLOT_CONDITIONAL;
979 return;
980 case 0x8800: /* cmp/eq #imm,R0 */
981 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
982 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], B7_0s);
983 return;
984 case 0xc400: /* mov.b @(disp,GBR),R0 */
985 gen_op_stc_gbr_T0();
986 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0);
987 gen_op_ldb_T0_T0(ctx);
988 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
989 return;
990 case 0xc500: /* mov.w @(disp,GBR),R0 */
991 gen_op_stc_gbr_T0();
992 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 2);
993 gen_op_ldw_T0_T0(ctx);
994 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
995 return;
996 case 0xc600: /* mov.l @(disp,GBR),R0 */
997 gen_op_stc_gbr_T0();
998 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 4);
999 gen_op_ldl_T0_T0(ctx);
1000 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1001 return;
1002 case 0xc000: /* mov.b R0,@(disp,GBR) */
1003 gen_op_stc_gbr_T0();
1004 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0);
1005 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1006 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1007 gen_op_stb_T0_T1(ctx);
1008 return;
1009 case 0xc100: /* mov.w R0,@(disp,GBR) */
1010 gen_op_stc_gbr_T0();
1011 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 2);
1012 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1013 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1014 gen_op_stw_T0_T1(ctx);
1015 return;
1016 case 0xc200: /* mov.l R0,@(disp,GBR) */
1017 gen_op_stc_gbr_T0();
1018 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B7_0 * 4);
1019 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1020 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1021 gen_op_stl_T0_T1(ctx);
1022 return;
1023 case 0x8000: /* mov.b R0,@(disp,Rn) */
1024 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1025 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B7_4)]);
1026 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0);
1027 gen_op_stb_T0_T1(ctx);
1028 return;
1029 case 0x8100: /* mov.w R0,@(disp,Rn) */
1030 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1031 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B7_4)]);
1032 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], B3_0 * 2);
1033 gen_op_stw_T0_T1(ctx);
1034 return;
1035 case 0x8400: /* mov.b @(disp,Rn),R0 */
1036 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
1037 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0);
1038 gen_op_ldb_T0_T0(ctx);
1039 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1040 return;
1041 case 0x8500: /* mov.w @(disp,Rn),R0 */
1042 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B7_4)]);
1043 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], B3_0 * 2);
1044 gen_op_ldw_T0_T0(ctx);
1045 tcg_gen_mov_i32(cpu_gregs[REG(0)], cpu_T[0]);
1046 return;
1047 case 0xc700: /* mova @(disp,PC),R0 */
1048 tcg_gen_movi_i32(cpu_gregs[REG(0)],
1049 ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1050 return;
1051 case 0xcb00: /* or #imm,R0 */
1052 tcg_gen_ori_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
1053 return;
1054 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1055 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1056 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1057 tcg_gen_mov_i32(cpu_T[0], cpu_T[1]);
1058 gen_op_ldub_T0_T0(ctx);
1059 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], B7_0);
1060 gen_op_stb_T0_T1(ctx);
1061 return;
1062 case 0xc300: /* trapa #imm */
1063 CHECK_NOT_DELAY_SLOT tcg_gen_movi_i32(cpu_pc, ctx->pc);
1064 gen_op_trapa(B7_0);
1065 ctx->bstate = BS_BRANCH;
1066 return;
1067 case 0xc800: /* tst #imm,R0 */
1068 tcg_gen_andi_i32(cpu_T[0], cpu_gregs[REG(0)], B7_0);
1069 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1070 return;
1071 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1072 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1073 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1074 gen_op_ldub_T0_T0(ctx);
1075 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], B7_0);
1076 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1077 return;
1078 case 0xca00: /* xor #imm,R0 */
1079 tcg_gen_xori_i32(cpu_gregs[REG(0)], cpu_gregs[REG(0)], B7_0);
1080 return;
1081 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1082 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1083 tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_gbr);
1084 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1085 gen_op_ldub_T0_T0(ctx);
1086 tcg_gen_xori_i32(cpu_T[0], cpu_T[0], B7_0);
1087 gen_op_stb_T0_T1(ctx);
1088 return;
1089 }
1090
1091 switch (ctx->opcode & 0xf08f) {
1092 case 0x408e: /* ldc Rm,Rn_BANK */
1093 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1094 tcg_gen_mov_i32(cpu_gregs[ALTREG(B6_4)], cpu_T[0]);
1095 return;
1096 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1097 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1098 gen_op_ldl_T0_T0(ctx);
1099 tcg_gen_mov_i32(cpu_gregs[ALTREG(B6_4)], cpu_T[0]);
1100 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1101 return;
1102 case 0x0082: /* stc Rm_BANK,Rn */
1103 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[ALTREG(B6_4)]);
1104 tcg_gen_mov_i32(cpu_gregs[REG(B11_8)], cpu_T[0]);
1105 return;
1106 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1107 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1108 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
1109 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[ALTREG(B6_4)]);
1110 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1111 gen_op_stl_T0_T1(ctx);
1112 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 4);
1113 return;
1114 }
1115
1116 switch (ctx->opcode & 0xf0ff) {
1117 case 0x0023: /* braf Rn */
1118 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1119 tcg_gen_addi_i32(cpu_delayed_pc, cpu_T[0], ctx->pc + 4);
1120 ctx->flags |= DELAY_SLOT;
1121 ctx->delayed_pc = (uint32_t) - 1;
1122 return;
1123 case 0x0003: /* bsrf Rn */
1124 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1125 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1126 tcg_gen_add_i32(cpu_delayed_pc, cpu_T[0], cpu_pr);
1127 ctx->flags |= DELAY_SLOT;
1128 ctx->delayed_pc = (uint32_t) - 1;
1129 return;
1130 case 0x4015: /* cmp/pl Rn */
1131 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1132 gen_cmp_imm(TCG_COND_GT, cpu_T[0], 0);
1133 return;
1134 case 0x4011: /* cmp/pz Rn */
1135 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1136 gen_cmp_imm(TCG_COND_GE, cpu_T[0], 0);
1137 return;
1138 case 0x4010: /* dt Rn */
1139 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 1);
1140 gen_cmp_imm(TCG_COND_EQ, cpu_gregs[REG(B11_8)], 0);
1141 return;
1142 case 0x402b: /* jmp @Rn */
1143 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1144 tcg_gen_mov_i32(cpu_delayed_pc, cpu_T[0]);
1145 ctx->flags |= DELAY_SLOT;
1146 ctx->delayed_pc = (uint32_t) - 1;
1147 return;
1148 case 0x400b: /* jsr @Rn */
1149 CHECK_NOT_DELAY_SLOT tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1150 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1151 tcg_gen_mov_i32(cpu_delayed_pc, cpu_T[0]);
1152 ctx->flags |= DELAY_SLOT;
1153 ctx->delayed_pc = (uint32_t) - 1;
1154 return;
1155 #define LDST(reg,ldnum,ldpnum,ldop,stnum,stpnum,stop,extrald) \
1156 case ldnum: \
1157 tcg_gen_mov_i32 (cpu_T[0], cpu_gregs[REG(B11_8)]); \
1158 gen_op_##ldop##_T0_##reg (); \
1159 extrald \
1160 return; \
1161 case ldpnum: \
1162 tcg_gen_mov_i32 (cpu_T[0], cpu_gregs[REG(B11_8)]); \
1163 gen_op_ldl_T0_T0 (ctx); \
1164 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], \
1165 cpu_gregs[REG(B11_8)], 4); \
1166 gen_op_##ldop##_T0_##reg (); \
1167 extrald \
1168 return; \
1169 case stnum: \
1170 gen_op_##stop##_##reg##_T0 (); \
1171 tcg_gen_mov_i32 (cpu_gregs[REG(B11_8)], cpu_T[0]); \
1172 return; \
1173 case stpnum: \
1174 gen_op_##stop##_##reg##_T0 (); \
1175 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], \
1176 cpu_gregs[REG(B11_8)], 4); \
1177 tcg_gen_mov_i32 (cpu_T[1], cpu_gregs[REG(B11_8)]); \
1178 tcg_gen_addi_i32(cpu_gregs[REG(B11_8)], \
1179 cpu_gregs[REG(B11_8)], 4); \
1180 gen_op_stl_T0_T1 (ctx); \
1181 tcg_gen_subi_i32(cpu_gregs[REG(B11_8)], \
1182 cpu_gregs[REG(B11_8)], 4); \
1183 return;
1184 LDST(sr, 0x400e, 0x4007, ldc, 0x0002, 0x4003, stc, ctx->bstate =
1185 BS_STOP;)
1186 LDST(gbr, 0x401e, 0x4017, ldc, 0x0012, 0x4013, stc,)
1187 LDST(vbr, 0x402e, 0x4027, ldc, 0x0022, 0x4023, stc,)
1188 LDST(ssr, 0x403e, 0x4037, ldc, 0x0032, 0x4033, stc,)
1189 LDST(spc, 0x404e, 0x4047, ldc, 0x0042, 0x4043, stc,)
1190 LDST(dbr, 0x40fa, 0x40f6, ldc, 0x00fa, 0x40f2, stc,)
1191 LDST(mach, 0x400a, 0x4006, lds, 0x000a, 0x4002, sts,)
1192 LDST(macl, 0x401a, 0x4016, lds, 0x001a, 0x4012, sts,)
1193 LDST(pr, 0x402a, 0x4026, lds, 0x002a, 0x4022, sts,)
1194 LDST(fpul, 0x405a, 0x4056, lds, 0x005a, 0x4052, sts,)
1195 LDST(fpscr, 0x406a, 0x4066, lds, 0x006a, 0x4062, sts, ctx->bstate =
1196 BS_STOP;)
1197 case 0x00c3: /* movca.l R0,@Rm */
1198 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(0)]);
1199 tcg_gen_mov_i32(cpu_T[1], cpu_gregs[REG(B11_8)]);
1200 gen_op_stl_T0_T1(ctx);
1201 return;
1202 case 0x0029: /* movt Rn */
1203 tcg_gen_andi_i32(cpu_gregs[REG(B11_8)], cpu_sr, SR_T);
1204 return;
1205 case 0x0093: /* ocbi @Rn */
1206 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1207 gen_op_ldl_T0_T0(ctx);
1208 return;
1209 case 0x00a3: /* ocbp @Rn */
1210 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1211 gen_op_ldl_T0_T0(ctx);
1212 return;
1213 case 0x00b3: /* ocbwb @Rn */
1214 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1215 gen_op_ldl_T0_T0(ctx);
1216 return;
1217 case 0x0083: /* pref @Rn */
1218 return;
1219 case 0x4024: /* rotcl Rn */
1220 gen_op_rotcl_Rn(REG(B11_8));
1221 return;
1222 case 0x4025: /* rotcr Rn */
1223 gen_op_rotcr_Rn(REG(B11_8));
1224 return;
1225 case 0x4004: /* rotl Rn */
1226 gen_op_rotl_Rn(REG(B11_8));
1227 return;
1228 case 0x4005: /* rotr Rn */
1229 gen_op_rotr_Rn(REG(B11_8));
1230 return;
1231 case 0x4000: /* shll Rn */
1232 case 0x4020: /* shal Rn */
1233 gen_op_shal_Rn(REG(B11_8));
1234 return;
1235 case 0x4021: /* shar Rn */
1236 gen_op_shar_Rn(REG(B11_8));
1237 return;
1238 case 0x4001: /* shlr Rn */
1239 gen_op_shlr_Rn(REG(B11_8));
1240 return;
1241 case 0x4008: /* shll2 Rn */
1242 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
1243 return;
1244 case 0x4018: /* shll8 Rn */
1245 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
1246 return;
1247 case 0x4028: /* shll16 Rn */
1248 tcg_gen_shli_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 16);
1249 return;
1250 case 0x4009: /* shlr2 Rn */
1251 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 2);
1252 return;
1253 case 0x4019: /* shlr8 Rn */
1254 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 8);
1255 return;
1256 case 0x4029: /* shlr16 Rn */
1257 tcg_gen_shri_i32(cpu_gregs[REG(B11_8)], cpu_gregs[REG(B11_8)], 16);
1258 return;
1259 case 0x401b: /* tas.b @Rn */
1260 tcg_gen_mov_i32(cpu_T[0], cpu_gregs[REG(B11_8)]);
1261 tcg_gen_mov_i32(cpu_T[1], cpu_T[0]);
1262 gen_op_ldub_T0_T0(ctx);
1263 gen_cmp_imm(TCG_COND_EQ, cpu_T[0], 0);
1264 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], 0x80);
1265 gen_op_stb_T0_T1(ctx);
1266 return;
1267 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1268 gen_op_movl_fpul_FT0();
1269 gen_op_fmov_FT0_frN(FREG(B11_8));
1270 return;
1271 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1272 gen_op_fmov_frN_FT0(FREG(B11_8));
1273 gen_op_movl_FT0_fpul();
1274 return;
1275 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1276 if (ctx->fpscr & FPSCR_PR) {
1277 if (ctx->opcode & 0x0100)
1278 break; /* illegal instruction */
1279 gen_op_float_DT();
1280 gen_op_fmov_DT0_drN(DREG(B11_8));
1281 }
1282 else {
1283 gen_op_float_FT();
1284 gen_op_fmov_FT0_frN(FREG(B11_8));
1285 }
1286 return;
1287 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1288 if (ctx->fpscr & FPSCR_PR) {
1289 if (ctx->opcode & 0x0100)
1290 break; /* illegal instruction */
1291 gen_op_fmov_drN_DT0(DREG(B11_8));
1292 gen_op_ftrc_DT();
1293 }
1294 else {
1295 gen_op_fmov_frN_FT0(FREG(B11_8));
1296 gen_op_ftrc_FT();
1297 }
1298 return;
1299 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1300 gen_op_fneg_frN(FREG(B11_8));
1301 return;
1302 case 0xf05d: /* fabs FRn/DRn */
1303 if (ctx->fpscr & FPSCR_PR) {
1304 if (ctx->opcode & 0x0100)
1305 break; /* illegal instruction */
1306 gen_op_fmov_drN_DT0(DREG(B11_8));
1307 gen_op_fabs_DT();
1308 gen_op_fmov_DT0_drN(DREG(B11_8));
1309 } else {
1310 gen_op_fmov_frN_FT0(FREG(B11_8));
1311 gen_op_fabs_FT();
1312 gen_op_fmov_FT0_frN(FREG(B11_8));
1313 }
1314 return;
1315 case 0xf06d: /* fsqrt FRn */
1316 if (ctx->fpscr & FPSCR_PR) {
1317 if (ctx->opcode & 0x0100)
1318 break; /* illegal instruction */
1319 gen_op_fmov_drN_DT0(FREG(B11_8));
1320 gen_op_fsqrt_DT();
1321 gen_op_fmov_DT0_drN(FREG(B11_8));
1322 } else {
1323 gen_op_fmov_frN_FT0(FREG(B11_8));
1324 gen_op_fsqrt_FT();
1325 gen_op_fmov_FT0_frN(FREG(B11_8));
1326 }
1327 return;
1328 case 0xf07d: /* fsrra FRn */
1329 break;
1330 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1331 if (!(ctx->fpscr & FPSCR_PR)) {
1332 tcg_gen_movi_i32(cpu_T[0], 0);
1333 gen_op_fmov_T0_frN(FREG(B11_8));
1334 return;
1335 }
1336 break;
1337 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1338 if (!(ctx->fpscr & FPSCR_PR)) {
1339 tcg_gen_movi_i32(cpu_T[0], 0x3f800000);
1340 gen_op_fmov_T0_frN(FREG(B11_8));
1341 return;
1342 }
1343 break;
1344 case 0xf0ad: /* fcnvsd FPUL,DRn */
1345 gen_op_movl_fpul_FT0();
1346 gen_op_fcnvsd_FT_DT();
1347 gen_op_fmov_DT0_drN(DREG(B11_8));
1348 return;
1349 case 0xf0bd: /* fcnvds DRn,FPUL */
1350 gen_op_fmov_drN_DT0(DREG(B11_8));
1351 gen_op_fcnvds_DT_FT();
1352 gen_op_movl_FT0_fpul();
1353 return;
1354 }
1355
1356 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1357 ctx->opcode, ctx->pc);
1358 gen_op_raise_illegal_instruction();
1359 ctx->bstate = BS_EXCP;
1360 }
1361
1362 void decode_opc(DisasContext * ctx)
1363 {
1364 uint32_t old_flags = ctx->flags;
1365
1366 _decode_opc(ctx);
1367
1368 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1369 if (ctx->flags & DELAY_SLOT_CLEARME) {
1370 gen_store_flags(0);
1371 } else {
1372 /* go out of the delay slot */
1373 uint32_t new_flags = ctx->flags;
1374 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1375 gen_store_flags(new_flags);
1376 }
1377 ctx->flags = 0;
1378 ctx->bstate = BS_BRANCH;
1379 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1380 gen_delayed_conditional_jump(ctx);
1381 } else if (old_flags & DELAY_SLOT) {
1382 gen_jump(ctx);
1383 }
1384
1385 }
1386
1387 /* go into a delay slot */
1388 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1389 gen_store_flags(ctx->flags);
1390 }
1391
1392 static inline void
1393 gen_intermediate_code_internal(CPUState * env, TranslationBlock * tb,
1394 int search_pc)
1395 {
1396 DisasContext ctx;
1397 target_ulong pc_start;
1398 static uint16_t *gen_opc_end;
1399 int i, ii;
1400 int num_insns;
1401 int max_insns;
1402
1403 pc_start = tb->pc;
1404 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1405 ctx.pc = pc_start;
1406 ctx.flags = (uint32_t)tb->flags;
1407 ctx.bstate = BS_NONE;
1408 ctx.sr = env->sr;
1409 ctx.fpscr = env->fpscr;
1410 ctx.memidx = (env->sr & SR_MD) ? 1 : 0;
1411 /* We don't know if the delayed pc came from a dynamic or static branch,
1412 so assume it is a dynamic branch. */
1413 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1414 ctx.tb = tb;
1415 ctx.singlestep_enabled = env->singlestep_enabled;
1416
1417 #ifdef DEBUG_DISAS
1418 if (loglevel & CPU_LOG_TB_CPU) {
1419 fprintf(logfile,
1420 "------------------------------------------------\n");
1421 cpu_dump_state(env, logfile, fprintf, 0);
1422 }
1423 #endif
1424
1425 ii = -1;
1426 num_insns = 0;
1427 max_insns = tb->cflags & CF_COUNT_MASK;
1428 if (max_insns == 0)
1429 max_insns = CF_COUNT_MASK;
1430 gen_icount_start();
1431 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1432 if (env->nb_breakpoints > 0) {
1433 for (i = 0; i < env->nb_breakpoints; i++) {
1434 if (ctx.pc == env->breakpoints[i]) {
1435 /* We have hit a breakpoint - make sure PC is up-to-date */
1436 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1437 gen_op_debug();
1438 ctx.bstate = BS_EXCP;
1439 break;
1440 }
1441 }
1442 }
1443 if (search_pc) {
1444 i = gen_opc_ptr - gen_opc_buf;
1445 if (ii < i) {
1446 ii++;
1447 while (ii < i)
1448 gen_opc_instr_start[ii++] = 0;
1449 }
1450 gen_opc_pc[ii] = ctx.pc;
1451 gen_opc_hflags[ii] = ctx.flags;
1452 gen_opc_instr_start[ii] = 1;
1453 gen_opc_icount[ii] = num_insns;
1454 }
1455 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1456 gen_io_start();
1457 #if 0
1458 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1459 fflush(stderr);
1460 #endif
1461 ctx.opcode = lduw_code(ctx.pc);
1462 decode_opc(&ctx);
1463 num_insns++;
1464 ctx.pc += 2;
1465 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1466 break;
1467 if (env->singlestep_enabled)
1468 break;
1469 if (num_insns >= max_insns)
1470 break;
1471 #ifdef SH4_SINGLE_STEP
1472 break;
1473 #endif
1474 }
1475 if (tb->cflags & CF_LAST_IO)
1476 gen_io_end();
1477 if (env->singlestep_enabled) {
1478 gen_op_debug();
1479 } else {
1480 switch (ctx.bstate) {
1481 case BS_STOP:
1482 /* gen_op_interrupt_restart(); */
1483 /* fall through */
1484 case BS_NONE:
1485 if (ctx.flags) {
1486 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1487 }
1488 gen_goto_tb(&ctx, 0, ctx.pc);
1489 break;
1490 case BS_EXCP:
1491 /* gen_op_interrupt_restart(); */
1492 tcg_gen_exit_tb(0);
1493 break;
1494 case BS_BRANCH:
1495 default:
1496 break;
1497 }
1498 }
1499
1500 gen_icount_end(tb, num_insns);
1501 *gen_opc_ptr = INDEX_op_end;
1502 if (search_pc) {
1503 i = gen_opc_ptr - gen_opc_buf;
1504 ii++;
1505 while (ii <= i)
1506 gen_opc_instr_start[ii++] = 0;
1507 } else {
1508 tb->size = ctx.pc - pc_start;
1509 tb->icount = num_insns;
1510 }
1511
1512 #ifdef DEBUG_DISAS
1513 #ifdef SH4_DEBUG_DISAS
1514 if (loglevel & CPU_LOG_TB_IN_ASM)
1515 fprintf(logfile, "\n");
1516 #endif
1517 if (loglevel & CPU_LOG_TB_IN_ASM) {
1518 fprintf(logfile, "IN:\n"); /* , lookup_symbol(pc_start)); */
1519 target_disas(logfile, pc_start, ctx.pc - pc_start, 0);
1520 fprintf(logfile, "\n");
1521 }
1522 #endif
1523 }
1524
1525 void gen_intermediate_code(CPUState * env, struct TranslationBlock *tb)
1526 {
1527 gen_intermediate_code_internal(env, tb, 0);
1528 }
1529
1530 void gen_intermediate_code_pc(CPUState * env, struct TranslationBlock *tb)
1531 {
1532 gen_intermediate_code_internal(env, tb, 1);
1533 }
1534
1535 void gen_pc_load(CPUState *env, TranslationBlock *tb,
1536 unsigned long searched_pc, int pc_pos, void *puc)
1537 {
1538 env->pc = gen_opc_pc[pc_pos];
1539 env->flags = gen_opc_hflags[pc_pos];
1540 }