]> git.proxmox.com Git - mirror_qemu.git/blob - target-sh4/translate.c
target-sh4: Let cpu_sh4_init() return SuperHCPU
[mirror_qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
23
24 #include "cpu.h"
25 #include "disas.h"
26 #include "tcg-op.h"
27
28 #include "helper.h"
29 #define GEN_HELPER 1
30 #include "helper.h"
31
32 typedef struct DisasContext {
33 struct TranslationBlock *tb;
34 target_ulong pc;
35 uint32_t sr;
36 uint32_t fpscr;
37 uint16_t opcode;
38 uint32_t flags;
39 int bstate;
40 int memidx;
41 uint32_t delayed_pc;
42 int singlestep_enabled;
43 uint32_t features;
44 int has_movcal;
45 } DisasContext;
46
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
49 #else
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
51 #endif
52
53 enum {
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition
56 */
57 BS_STOP = 1, /* We want to stop translation for any reason */
58 BS_BRANCH = 2, /* We reached a branch condition */
59 BS_EXCP = 3, /* We reached an exception condition */
60 };
61
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_gregs[24];
65 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
68 static TCGv cpu_fregs[32];
69
70 /* internal register indexes */
71 static TCGv cpu_flags, cpu_delayed_pc;
72
73 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
74
75 #include "gen-icount.h"
76
77 static void sh4_translate_init(void)
78 {
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 };
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 };
98
99 if (done_init)
100 return;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
108
109 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, ssr), "SSR");
115 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, spc), "SPC");
117 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, gbr), "GBR");
119 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, vbr), "VBR");
121 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, sgr), "SGR");
123 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, dbr), "DBR");
125 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, mach), "MACH");
127 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, macl), "MACL");
129 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, pr), "PR");
131 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, fpscr), "FPSCR");
133 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, fpul), "FPUL");
135
136 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, flags), "_flags_");
138 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, delayed_pc),
140 "_delayed_pc_");
141 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, ldst), "_ldst_");
143
144 for (i = 0; i < 32; i++)
145 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUSH4State, fregs[i]),
147 fregnames[i]);
148
149 /* register helpers */
150 #define GEN_HELPER 2
151 #include "helper.h"
152
153 done_init = 1;
154 }
155
156 void cpu_dump_state(CPUSH4State * env, FILE * f,
157 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
158 int flags)
159 {
160 int i;
161 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env->pc, env->sr, env->pr, env->fpscr);
163 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env->spc, env->ssr, env->gbr, env->vbr);
165 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env->sgr, env->dbr, env->delayed_pc, env->fpul);
167 for (i = 0; i < 24; i += 4) {
168 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i, env->gregs[i], i + 1, env->gregs[i + 1],
170 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
171 }
172 if (env->flags & DELAY_SLOT) {
173 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
175 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
176 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 }
179 }
180
181 void cpu_state_reset(CPUSH4State *env)
182 {
183 cpu_reset(ENV_GET_CPU(env));
184 }
185
186 typedef struct {
187 const char *name;
188 int id;
189 uint32_t pvr;
190 uint32_t prr;
191 uint32_t cvr;
192 uint32_t features;
193 } sh4_def_t;
194
195 static sh4_def_t sh4_defs[] = {
196 {
197 .name = "SH7750R",
198 .id = SH_CPU_SH7750R,
199 .pvr = 0x00050000,
200 .prr = 0x00000100,
201 .cvr = 0x00110000,
202 .features = SH_FEATURE_BCR3_AND_BCR4,
203 }, {
204 .name = "SH7751R",
205 .id = SH_CPU_SH7751R,
206 .pvr = 0x04050005,
207 .prr = 0x00000113,
208 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
209 .features = SH_FEATURE_BCR3_AND_BCR4,
210 }, {
211 .name = "SH7785",
212 .id = SH_CPU_SH7785,
213 .pvr = 0x10300700,
214 .prr = 0x00000200,
215 .cvr = 0x71440211,
216 .features = SH_FEATURE_SH4A,
217 },
218 };
219
220 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
221 {
222 int i;
223
224 if (strcasecmp(name, "any") == 0)
225 return &sh4_defs[0];
226
227 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
228 if (strcasecmp(name, sh4_defs[i].name) == 0)
229 return &sh4_defs[i];
230
231 return NULL;
232 }
233
234 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
235 {
236 int i;
237
238 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
239 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
240 }
241
242 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
243 {
244 env->pvr = def->pvr;
245 env->prr = def->prr;
246 env->cvr = def->cvr;
247 env->id = def->id;
248 }
249
250 SuperHCPU *cpu_sh4_init(const char *cpu_model)
251 {
252 SuperHCPU *cpu;
253 CPUSH4State *env;
254 const sh4_def_t *def;
255
256 def = cpu_sh4_find_by_name(cpu_model);
257 if (!def)
258 return NULL;
259 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
260 env = &cpu->env;
261 env->features = def->features;
262 sh4_translate_init();
263 env->cpu_model_str = cpu_model;
264 cpu_reset(CPU(cpu));
265 cpu_register(env, def);
266 qemu_init_vcpu(env);
267 return cpu;
268 }
269
270 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
271 {
272 TranslationBlock *tb;
273 tb = ctx->tb;
274
275 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
276 !ctx->singlestep_enabled) {
277 /* Use a direct jump if in same page and singlestep not enabled */
278 tcg_gen_goto_tb(n);
279 tcg_gen_movi_i32(cpu_pc, dest);
280 tcg_gen_exit_tb((tcg_target_long)tb + n);
281 } else {
282 tcg_gen_movi_i32(cpu_pc, dest);
283 if (ctx->singlestep_enabled)
284 gen_helper_debug();
285 tcg_gen_exit_tb(0);
286 }
287 }
288
289 static void gen_jump(DisasContext * ctx)
290 {
291 if (ctx->delayed_pc == (uint32_t) - 1) {
292 /* Target is not statically known, it comes necessarily from a
293 delayed jump as immediate jump are conditinal jumps */
294 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
295 if (ctx->singlestep_enabled)
296 gen_helper_debug();
297 tcg_gen_exit_tb(0);
298 } else {
299 gen_goto_tb(ctx, 0, ctx->delayed_pc);
300 }
301 }
302
303 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
304 {
305 TCGv sr;
306 int label = gen_new_label();
307 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
308 sr = tcg_temp_new();
309 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
310 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
311 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
312 gen_set_label(label);
313 }
314
315 /* Immediate conditional jump (bt or bf) */
316 static void gen_conditional_jump(DisasContext * ctx,
317 target_ulong ift, target_ulong ifnott)
318 {
319 int l1;
320 TCGv sr;
321
322 l1 = gen_new_label();
323 sr = tcg_temp_new();
324 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
325 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
326 gen_goto_tb(ctx, 0, ifnott);
327 gen_set_label(l1);
328 gen_goto_tb(ctx, 1, ift);
329 }
330
331 /* Delayed conditional jump (bt or bf) */
332 static void gen_delayed_conditional_jump(DisasContext * ctx)
333 {
334 int l1;
335 TCGv ds;
336
337 l1 = gen_new_label();
338 ds = tcg_temp_new();
339 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
340 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
341 gen_goto_tb(ctx, 1, ctx->pc + 2);
342 gen_set_label(l1);
343 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
344 gen_jump(ctx);
345 }
346
347 static inline void gen_set_t(void)
348 {
349 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
350 }
351
352 static inline void gen_clr_t(void)
353 {
354 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
355 }
356
357 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
358 {
359 TCGv t;
360
361 t = tcg_temp_new();
362 tcg_gen_setcond_i32(cond, t, t1, t0);
363 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
364 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
365
366 tcg_temp_free(t);
367 }
368
369 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
370 {
371 TCGv t;
372
373 t = tcg_temp_new();
374 tcg_gen_setcondi_i32(cond, t, t0, imm);
375 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
376 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
377
378 tcg_temp_free(t);
379 }
380
381 static inline void gen_store_flags(uint32_t flags)
382 {
383 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
384 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
385 }
386
387 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
388 {
389 TCGv tmp = tcg_temp_new();
390
391 p0 &= 0x1f;
392 p1 &= 0x1f;
393
394 tcg_gen_andi_i32(tmp, t1, (1 << p1));
395 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
396 if (p0 < p1)
397 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
398 else if (p0 > p1)
399 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
400 tcg_gen_or_i32(t0, t0, tmp);
401
402 tcg_temp_free(tmp);
403 }
404
405 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
406 {
407 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
408 }
409
410 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
411 {
412 TCGv_i32 tmp = tcg_temp_new_i32();
413 tcg_gen_trunc_i64_i32(tmp, t);
414 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
415 tcg_gen_shri_i64(t, t, 32);
416 tcg_gen_trunc_i64_i32(tmp, t);
417 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
418 tcg_temp_free_i32(tmp);
419 }
420
421 #define B3_0 (ctx->opcode & 0xf)
422 #define B6_4 ((ctx->opcode >> 4) & 0x7)
423 #define B7_4 ((ctx->opcode >> 4) & 0xf)
424 #define B7_0 (ctx->opcode & 0xff)
425 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
426 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
427 (ctx->opcode & 0xfff))
428 #define B11_8 ((ctx->opcode >> 8) & 0xf)
429 #define B15_12 ((ctx->opcode >> 12) & 0xf)
430
431 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
432 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
433
434 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
435 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
436
437 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
438 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
439 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
440 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
441
442 #define CHECK_NOT_DELAY_SLOT \
443 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
444 { \
445 gen_helper_raise_slot_illegal_instruction(); \
446 ctx->bstate = BS_EXCP; \
447 return; \
448 }
449
450 #define CHECK_PRIVILEGED \
451 if (IS_USER(ctx)) { \
452 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
453 gen_helper_raise_slot_illegal_instruction(); \
454 } else { \
455 gen_helper_raise_illegal_instruction(); \
456 } \
457 ctx->bstate = BS_EXCP; \
458 return; \
459 }
460
461 #define CHECK_FPU_ENABLED \
462 if (ctx->flags & SR_FD) { \
463 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
464 gen_helper_raise_slot_fpu_disable(); \
465 } else { \
466 gen_helper_raise_fpu_disable(); \
467 } \
468 ctx->bstate = BS_EXCP; \
469 return; \
470 }
471
472 static void _decode_opc(DisasContext * ctx)
473 {
474 /* This code tries to make movcal emulation sufficiently
475 accurate for Linux purposes. This instruction writes
476 memory, and prior to that, always allocates a cache line.
477 It is used in two contexts:
478 - in memcpy, where data is copied in blocks, the first write
479 of to a block uses movca.l for performance.
480 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
481 to flush the cache. Here, the data written by movcal.l is never
482 written to memory, and the data written is just bogus.
483
484 To simulate this, we simulate movcal.l, we store the value to memory,
485 but we also remember the previous content. If we see ocbi, we check
486 if movcal.l for that address was done previously. If so, the write should
487 not have hit the memory, so we restore the previous content.
488 When we see an instruction that is neither movca.l
489 nor ocbi, the previous content is discarded.
490
491 To optimize, we only try to flush stores when we're at the start of
492 TB, or if we already saw movca.l in this TB and did not flush stores
493 yet. */
494 if (ctx->has_movcal)
495 {
496 int opcode = ctx->opcode & 0xf0ff;
497 if (opcode != 0x0093 /* ocbi */
498 && opcode != 0x00c3 /* movca.l */)
499 {
500 gen_helper_discard_movcal_backup ();
501 ctx->has_movcal = 0;
502 }
503 }
504
505 #if 0
506 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
507 #endif
508
509 switch (ctx->opcode) {
510 case 0x0019: /* div0u */
511 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
512 return;
513 case 0x000b: /* rts */
514 CHECK_NOT_DELAY_SLOT
515 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
516 ctx->flags |= DELAY_SLOT;
517 ctx->delayed_pc = (uint32_t) - 1;
518 return;
519 case 0x0028: /* clrmac */
520 tcg_gen_movi_i32(cpu_mach, 0);
521 tcg_gen_movi_i32(cpu_macl, 0);
522 return;
523 case 0x0048: /* clrs */
524 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
525 return;
526 case 0x0008: /* clrt */
527 gen_clr_t();
528 return;
529 case 0x0038: /* ldtlb */
530 CHECK_PRIVILEGED
531 gen_helper_ldtlb();
532 return;
533 case 0x002b: /* rte */
534 CHECK_PRIVILEGED
535 CHECK_NOT_DELAY_SLOT
536 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
537 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
538 ctx->flags |= DELAY_SLOT;
539 ctx->delayed_pc = (uint32_t) - 1;
540 return;
541 case 0x0058: /* sets */
542 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
543 return;
544 case 0x0018: /* sett */
545 gen_set_t();
546 return;
547 case 0xfbfd: /* frchg */
548 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
549 ctx->bstate = BS_STOP;
550 return;
551 case 0xf3fd: /* fschg */
552 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
553 ctx->bstate = BS_STOP;
554 return;
555 case 0x0009: /* nop */
556 return;
557 case 0x001b: /* sleep */
558 CHECK_PRIVILEGED
559 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
560 return;
561 }
562
563 switch (ctx->opcode & 0xf000) {
564 case 0x1000: /* mov.l Rm,@(disp,Rn) */
565 {
566 TCGv addr = tcg_temp_new();
567 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
568 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
569 tcg_temp_free(addr);
570 }
571 return;
572 case 0x5000: /* mov.l @(disp,Rm),Rn */
573 {
574 TCGv addr = tcg_temp_new();
575 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
576 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
577 tcg_temp_free(addr);
578 }
579 return;
580 case 0xe000: /* mov #imm,Rn */
581 tcg_gen_movi_i32(REG(B11_8), B7_0s);
582 return;
583 case 0x9000: /* mov.w @(disp,PC),Rn */
584 {
585 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
586 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
587 tcg_temp_free(addr);
588 }
589 return;
590 case 0xd000: /* mov.l @(disp,PC),Rn */
591 {
592 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
593 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
594 tcg_temp_free(addr);
595 }
596 return;
597 case 0x7000: /* add #imm,Rn */
598 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
599 return;
600 case 0xa000: /* bra disp */
601 CHECK_NOT_DELAY_SLOT
602 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
603 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
604 ctx->flags |= DELAY_SLOT;
605 return;
606 case 0xb000: /* bsr disp */
607 CHECK_NOT_DELAY_SLOT
608 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
609 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
610 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
611 ctx->flags |= DELAY_SLOT;
612 return;
613 }
614
615 switch (ctx->opcode & 0xf00f) {
616 case 0x6003: /* mov Rm,Rn */
617 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
618 return;
619 case 0x2000: /* mov.b Rm,@Rn */
620 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
621 return;
622 case 0x2001: /* mov.w Rm,@Rn */
623 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
624 return;
625 case 0x2002: /* mov.l Rm,@Rn */
626 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
627 return;
628 case 0x6000: /* mov.b @Rm,Rn */
629 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
630 return;
631 case 0x6001: /* mov.w @Rm,Rn */
632 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
633 return;
634 case 0x6002: /* mov.l @Rm,Rn */
635 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
636 return;
637 case 0x2004: /* mov.b Rm,@-Rn */
638 {
639 TCGv addr = tcg_temp_new();
640 tcg_gen_subi_i32(addr, REG(B11_8), 1);
641 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
642 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
643 tcg_temp_free(addr);
644 }
645 return;
646 case 0x2005: /* mov.w Rm,@-Rn */
647 {
648 TCGv addr = tcg_temp_new();
649 tcg_gen_subi_i32(addr, REG(B11_8), 2);
650 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
651 tcg_gen_mov_i32(REG(B11_8), addr);
652 tcg_temp_free(addr);
653 }
654 return;
655 case 0x2006: /* mov.l Rm,@-Rn */
656 {
657 TCGv addr = tcg_temp_new();
658 tcg_gen_subi_i32(addr, REG(B11_8), 4);
659 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
660 tcg_gen_mov_i32(REG(B11_8), addr);
661 }
662 return;
663 case 0x6004: /* mov.b @Rm+,Rn */
664 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
665 if ( B11_8 != B7_4 )
666 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
667 return;
668 case 0x6005: /* mov.w @Rm+,Rn */
669 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
670 if ( B11_8 != B7_4 )
671 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
672 return;
673 case 0x6006: /* mov.l @Rm+,Rn */
674 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
675 if ( B11_8 != B7_4 )
676 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
677 return;
678 case 0x0004: /* mov.b Rm,@(R0,Rn) */
679 {
680 TCGv addr = tcg_temp_new();
681 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
682 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
683 tcg_temp_free(addr);
684 }
685 return;
686 case 0x0005: /* mov.w Rm,@(R0,Rn) */
687 {
688 TCGv addr = tcg_temp_new();
689 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
690 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
691 tcg_temp_free(addr);
692 }
693 return;
694 case 0x0006: /* mov.l Rm,@(R0,Rn) */
695 {
696 TCGv addr = tcg_temp_new();
697 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
698 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
699 tcg_temp_free(addr);
700 }
701 return;
702 case 0x000c: /* mov.b @(R0,Rm),Rn */
703 {
704 TCGv addr = tcg_temp_new();
705 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
706 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
707 tcg_temp_free(addr);
708 }
709 return;
710 case 0x000d: /* mov.w @(R0,Rm),Rn */
711 {
712 TCGv addr = tcg_temp_new();
713 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
714 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
715 tcg_temp_free(addr);
716 }
717 return;
718 case 0x000e: /* mov.l @(R0,Rm),Rn */
719 {
720 TCGv addr = tcg_temp_new();
721 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
722 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
723 tcg_temp_free(addr);
724 }
725 return;
726 case 0x6008: /* swap.b Rm,Rn */
727 {
728 TCGv high, low;
729 high = tcg_temp_new();
730 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
731 low = tcg_temp_new();
732 tcg_gen_ext16u_i32(low, REG(B7_4));
733 tcg_gen_bswap16_i32(low, low);
734 tcg_gen_or_i32(REG(B11_8), high, low);
735 tcg_temp_free(low);
736 tcg_temp_free(high);
737 }
738 return;
739 case 0x6009: /* swap.w Rm,Rn */
740 {
741 TCGv high, low;
742 high = tcg_temp_new();
743 tcg_gen_shli_i32(high, REG(B7_4), 16);
744 low = tcg_temp_new();
745 tcg_gen_shri_i32(low, REG(B7_4), 16);
746 tcg_gen_ext16u_i32(low, low);
747 tcg_gen_or_i32(REG(B11_8), high, low);
748 tcg_temp_free(low);
749 tcg_temp_free(high);
750 }
751 return;
752 case 0x200d: /* xtrct Rm,Rn */
753 {
754 TCGv high, low;
755 high = tcg_temp_new();
756 tcg_gen_shli_i32(high, REG(B7_4), 16);
757 low = tcg_temp_new();
758 tcg_gen_shri_i32(low, REG(B11_8), 16);
759 tcg_gen_ext16u_i32(low, low);
760 tcg_gen_or_i32(REG(B11_8), high, low);
761 tcg_temp_free(low);
762 tcg_temp_free(high);
763 }
764 return;
765 case 0x300c: /* add Rm,Rn */
766 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
767 return;
768 case 0x300e: /* addc Rm,Rn */
769 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
770 return;
771 case 0x300f: /* addv Rm,Rn */
772 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
773 return;
774 case 0x2009: /* and Rm,Rn */
775 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
776 return;
777 case 0x3000: /* cmp/eq Rm,Rn */
778 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
779 return;
780 case 0x3003: /* cmp/ge Rm,Rn */
781 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
782 return;
783 case 0x3007: /* cmp/gt Rm,Rn */
784 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
785 return;
786 case 0x3006: /* cmp/hi Rm,Rn */
787 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
788 return;
789 case 0x3002: /* cmp/hs Rm,Rn */
790 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
791 return;
792 case 0x200c: /* cmp/str Rm,Rn */
793 {
794 TCGv cmp1 = tcg_temp_new();
795 TCGv cmp2 = tcg_temp_new();
796 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
797 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
798 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
799 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
800 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
801 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
802 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
803 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
804 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
805 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
806 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
807 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
808 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
809 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
810 tcg_temp_free(cmp2);
811 tcg_temp_free(cmp1);
812 }
813 return;
814 case 0x2007: /* div0s Rm,Rn */
815 {
816 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
817 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
818 TCGv val = tcg_temp_new();
819 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
820 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
821 tcg_temp_free(val);
822 }
823 return;
824 case 0x3004: /* div1 Rm,Rn */
825 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
826 return;
827 case 0x300d: /* dmuls.l Rm,Rn */
828 {
829 TCGv_i64 tmp1 = tcg_temp_new_i64();
830 TCGv_i64 tmp2 = tcg_temp_new_i64();
831
832 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
833 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
834 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
835 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
836 tcg_gen_shri_i64(tmp1, tmp1, 32);
837 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
838
839 tcg_temp_free_i64(tmp2);
840 tcg_temp_free_i64(tmp1);
841 }
842 return;
843 case 0x3005: /* dmulu.l Rm,Rn */
844 {
845 TCGv_i64 tmp1 = tcg_temp_new_i64();
846 TCGv_i64 tmp2 = tcg_temp_new_i64();
847
848 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
849 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
850 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
851 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
852 tcg_gen_shri_i64(tmp1, tmp1, 32);
853 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
854
855 tcg_temp_free_i64(tmp2);
856 tcg_temp_free_i64(tmp1);
857 }
858 return;
859 case 0x600e: /* exts.b Rm,Rn */
860 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
861 return;
862 case 0x600f: /* exts.w Rm,Rn */
863 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
864 return;
865 case 0x600c: /* extu.b Rm,Rn */
866 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
867 return;
868 case 0x600d: /* extu.w Rm,Rn */
869 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
870 return;
871 case 0x000f: /* mac.l @Rm+,@Rn+ */
872 {
873 TCGv arg0, arg1;
874 arg0 = tcg_temp_new();
875 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
876 arg1 = tcg_temp_new();
877 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
878 gen_helper_macl(arg0, arg1);
879 tcg_temp_free(arg1);
880 tcg_temp_free(arg0);
881 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
882 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
883 }
884 return;
885 case 0x400f: /* mac.w @Rm+,@Rn+ */
886 {
887 TCGv arg0, arg1;
888 arg0 = tcg_temp_new();
889 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
890 arg1 = tcg_temp_new();
891 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
892 gen_helper_macw(arg0, arg1);
893 tcg_temp_free(arg1);
894 tcg_temp_free(arg0);
895 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
896 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
897 }
898 return;
899 case 0x0007: /* mul.l Rm,Rn */
900 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
901 return;
902 case 0x200f: /* muls.w Rm,Rn */
903 {
904 TCGv arg0, arg1;
905 arg0 = tcg_temp_new();
906 tcg_gen_ext16s_i32(arg0, REG(B7_4));
907 arg1 = tcg_temp_new();
908 tcg_gen_ext16s_i32(arg1, REG(B11_8));
909 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
910 tcg_temp_free(arg1);
911 tcg_temp_free(arg0);
912 }
913 return;
914 case 0x200e: /* mulu.w Rm,Rn */
915 {
916 TCGv arg0, arg1;
917 arg0 = tcg_temp_new();
918 tcg_gen_ext16u_i32(arg0, REG(B7_4));
919 arg1 = tcg_temp_new();
920 tcg_gen_ext16u_i32(arg1, REG(B11_8));
921 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
922 tcg_temp_free(arg1);
923 tcg_temp_free(arg0);
924 }
925 return;
926 case 0x600b: /* neg Rm,Rn */
927 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
928 return;
929 case 0x600a: /* negc Rm,Rn */
930 {
931 TCGv t0, t1;
932 t0 = tcg_temp_new();
933 tcg_gen_neg_i32(t0, REG(B7_4));
934 t1 = tcg_temp_new();
935 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
936 tcg_gen_sub_i32(REG(B11_8), t0, t1);
937 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
938 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
939 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
940 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
941 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
942 tcg_temp_free(t0);
943 tcg_temp_free(t1);
944 }
945 return;
946 case 0x6007: /* not Rm,Rn */
947 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
948 return;
949 case 0x200b: /* or Rm,Rn */
950 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
951 return;
952 case 0x400c: /* shad Rm,Rn */
953 {
954 int label1 = gen_new_label();
955 int label2 = gen_new_label();
956 int label3 = gen_new_label();
957 int label4 = gen_new_label();
958 TCGv shift;
959 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
960 /* Rm positive, shift to the left */
961 shift = tcg_temp_new();
962 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
963 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
964 tcg_temp_free(shift);
965 tcg_gen_br(label4);
966 /* Rm negative, shift to the right */
967 gen_set_label(label1);
968 shift = tcg_temp_new();
969 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
970 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
971 tcg_gen_not_i32(shift, REG(B7_4));
972 tcg_gen_andi_i32(shift, shift, 0x1f);
973 tcg_gen_addi_i32(shift, shift, 1);
974 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
975 tcg_temp_free(shift);
976 tcg_gen_br(label4);
977 /* Rm = -32 */
978 gen_set_label(label2);
979 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
980 tcg_gen_movi_i32(REG(B11_8), 0);
981 tcg_gen_br(label4);
982 gen_set_label(label3);
983 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
984 gen_set_label(label4);
985 }
986 return;
987 case 0x400d: /* shld Rm,Rn */
988 {
989 int label1 = gen_new_label();
990 int label2 = gen_new_label();
991 int label3 = gen_new_label();
992 TCGv shift;
993 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
994 /* Rm positive, shift to the left */
995 shift = tcg_temp_new();
996 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
997 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
998 tcg_temp_free(shift);
999 tcg_gen_br(label3);
1000 /* Rm negative, shift to the right */
1001 gen_set_label(label1);
1002 shift = tcg_temp_new();
1003 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1004 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1005 tcg_gen_not_i32(shift, REG(B7_4));
1006 tcg_gen_andi_i32(shift, shift, 0x1f);
1007 tcg_gen_addi_i32(shift, shift, 1);
1008 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1009 tcg_temp_free(shift);
1010 tcg_gen_br(label3);
1011 /* Rm = -32 */
1012 gen_set_label(label2);
1013 tcg_gen_movi_i32(REG(B11_8), 0);
1014 gen_set_label(label3);
1015 }
1016 return;
1017 case 0x3008: /* sub Rm,Rn */
1018 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1019 return;
1020 case 0x300a: /* subc Rm,Rn */
1021 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1022 return;
1023 case 0x300b: /* subv Rm,Rn */
1024 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1025 return;
1026 case 0x2008: /* tst Rm,Rn */
1027 {
1028 TCGv val = tcg_temp_new();
1029 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1030 gen_cmp_imm(TCG_COND_EQ, val, 0);
1031 tcg_temp_free(val);
1032 }
1033 return;
1034 case 0x200a: /* xor Rm,Rn */
1035 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1036 return;
1037 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1038 CHECK_FPU_ENABLED
1039 if (ctx->fpscr & FPSCR_SZ) {
1040 TCGv_i64 fp = tcg_temp_new_i64();
1041 gen_load_fpr64(fp, XREG(B7_4));
1042 gen_store_fpr64(fp, XREG(B11_8));
1043 tcg_temp_free_i64(fp);
1044 } else {
1045 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1046 }
1047 return;
1048 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1049 CHECK_FPU_ENABLED
1050 if (ctx->fpscr & FPSCR_SZ) {
1051 TCGv addr_hi = tcg_temp_new();
1052 int fr = XREG(B7_4);
1053 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1054 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1055 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1056 tcg_temp_free(addr_hi);
1057 } else {
1058 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1059 }
1060 return;
1061 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1062 CHECK_FPU_ENABLED
1063 if (ctx->fpscr & FPSCR_SZ) {
1064 TCGv addr_hi = tcg_temp_new();
1065 int fr = XREG(B11_8);
1066 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1067 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1068 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1069 tcg_temp_free(addr_hi);
1070 } else {
1071 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1072 }
1073 return;
1074 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1075 CHECK_FPU_ENABLED
1076 if (ctx->fpscr & FPSCR_SZ) {
1077 TCGv addr_hi = tcg_temp_new();
1078 int fr = XREG(B11_8);
1079 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1080 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1081 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1082 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1083 tcg_temp_free(addr_hi);
1084 } else {
1085 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1086 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1087 }
1088 return;
1089 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1090 CHECK_FPU_ENABLED
1091 if (ctx->fpscr & FPSCR_SZ) {
1092 TCGv addr = tcg_temp_new_i32();
1093 int fr = XREG(B7_4);
1094 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1095 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1096 tcg_gen_subi_i32(addr, addr, 4);
1097 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1098 tcg_gen_mov_i32(REG(B11_8), addr);
1099 tcg_temp_free(addr);
1100 } else {
1101 TCGv addr;
1102 addr = tcg_temp_new_i32();
1103 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1104 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1105 tcg_gen_mov_i32(REG(B11_8), addr);
1106 tcg_temp_free(addr);
1107 }
1108 return;
1109 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1110 CHECK_FPU_ENABLED
1111 {
1112 TCGv addr = tcg_temp_new_i32();
1113 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1114 if (ctx->fpscr & FPSCR_SZ) {
1115 int fr = XREG(B11_8);
1116 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1117 tcg_gen_addi_i32(addr, addr, 4);
1118 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1119 } else {
1120 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1121 }
1122 tcg_temp_free(addr);
1123 }
1124 return;
1125 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1126 CHECK_FPU_ENABLED
1127 {
1128 TCGv addr = tcg_temp_new();
1129 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1130 if (ctx->fpscr & FPSCR_SZ) {
1131 int fr = XREG(B7_4);
1132 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1133 tcg_gen_addi_i32(addr, addr, 4);
1134 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1135 } else {
1136 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1137 }
1138 tcg_temp_free(addr);
1139 }
1140 return;
1141 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1142 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1143 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1144 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1145 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1146 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1147 {
1148 CHECK_FPU_ENABLED
1149 if (ctx->fpscr & FPSCR_PR) {
1150 TCGv_i64 fp0, fp1;
1151
1152 if (ctx->opcode & 0x0110)
1153 break; /* illegal instruction */
1154 fp0 = tcg_temp_new_i64();
1155 fp1 = tcg_temp_new_i64();
1156 gen_load_fpr64(fp0, DREG(B11_8));
1157 gen_load_fpr64(fp1, DREG(B7_4));
1158 switch (ctx->opcode & 0xf00f) {
1159 case 0xf000: /* fadd Rm,Rn */
1160 gen_helper_fadd_DT(fp0, fp0, fp1);
1161 break;
1162 case 0xf001: /* fsub Rm,Rn */
1163 gen_helper_fsub_DT(fp0, fp0, fp1);
1164 break;
1165 case 0xf002: /* fmul Rm,Rn */
1166 gen_helper_fmul_DT(fp0, fp0, fp1);
1167 break;
1168 case 0xf003: /* fdiv Rm,Rn */
1169 gen_helper_fdiv_DT(fp0, fp0, fp1);
1170 break;
1171 case 0xf004: /* fcmp/eq Rm,Rn */
1172 gen_helper_fcmp_eq_DT(fp0, fp1);
1173 return;
1174 case 0xf005: /* fcmp/gt Rm,Rn */
1175 gen_helper_fcmp_gt_DT(fp0, fp1);
1176 return;
1177 }
1178 gen_store_fpr64(fp0, DREG(B11_8));
1179 tcg_temp_free_i64(fp0);
1180 tcg_temp_free_i64(fp1);
1181 } else {
1182 switch (ctx->opcode & 0xf00f) {
1183 case 0xf000: /* fadd Rm,Rn */
1184 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1185 break;
1186 case 0xf001: /* fsub Rm,Rn */
1187 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1188 break;
1189 case 0xf002: /* fmul Rm,Rn */
1190 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1191 break;
1192 case 0xf003: /* fdiv Rm,Rn */
1193 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1194 break;
1195 case 0xf004: /* fcmp/eq Rm,Rn */
1196 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1197 return;
1198 case 0xf005: /* fcmp/gt Rm,Rn */
1199 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1200 return;
1201 }
1202 }
1203 }
1204 return;
1205 case 0xf00e: /* fmac FR0,RM,Rn */
1206 {
1207 CHECK_FPU_ENABLED
1208 if (ctx->fpscr & FPSCR_PR) {
1209 break; /* illegal instruction */
1210 } else {
1211 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1212 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1213 return;
1214 }
1215 }
1216 }
1217
1218 switch (ctx->opcode & 0xff00) {
1219 case 0xc900: /* and #imm,R0 */
1220 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1221 return;
1222 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1223 {
1224 TCGv addr, val;
1225 addr = tcg_temp_new();
1226 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1227 val = tcg_temp_new();
1228 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1229 tcg_gen_andi_i32(val, val, B7_0);
1230 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1231 tcg_temp_free(val);
1232 tcg_temp_free(addr);
1233 }
1234 return;
1235 case 0x8b00: /* bf label */
1236 CHECK_NOT_DELAY_SLOT
1237 gen_conditional_jump(ctx, ctx->pc + 2,
1238 ctx->pc + 4 + B7_0s * 2);
1239 ctx->bstate = BS_BRANCH;
1240 return;
1241 case 0x8f00: /* bf/s label */
1242 CHECK_NOT_DELAY_SLOT
1243 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1244 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1245 return;
1246 case 0x8900: /* bt label */
1247 CHECK_NOT_DELAY_SLOT
1248 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1249 ctx->pc + 2);
1250 ctx->bstate = BS_BRANCH;
1251 return;
1252 case 0x8d00: /* bt/s label */
1253 CHECK_NOT_DELAY_SLOT
1254 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1255 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1256 return;
1257 case 0x8800: /* cmp/eq #imm,R0 */
1258 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1259 return;
1260 case 0xc400: /* mov.b @(disp,GBR),R0 */
1261 {
1262 TCGv addr = tcg_temp_new();
1263 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1264 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1265 tcg_temp_free(addr);
1266 }
1267 return;
1268 case 0xc500: /* mov.w @(disp,GBR),R0 */
1269 {
1270 TCGv addr = tcg_temp_new();
1271 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1272 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1273 tcg_temp_free(addr);
1274 }
1275 return;
1276 case 0xc600: /* mov.l @(disp,GBR),R0 */
1277 {
1278 TCGv addr = tcg_temp_new();
1279 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1280 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1281 tcg_temp_free(addr);
1282 }
1283 return;
1284 case 0xc000: /* mov.b R0,@(disp,GBR) */
1285 {
1286 TCGv addr = tcg_temp_new();
1287 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1288 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1289 tcg_temp_free(addr);
1290 }
1291 return;
1292 case 0xc100: /* mov.w R0,@(disp,GBR) */
1293 {
1294 TCGv addr = tcg_temp_new();
1295 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1296 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1297 tcg_temp_free(addr);
1298 }
1299 return;
1300 case 0xc200: /* mov.l R0,@(disp,GBR) */
1301 {
1302 TCGv addr = tcg_temp_new();
1303 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1304 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1305 tcg_temp_free(addr);
1306 }
1307 return;
1308 case 0x8000: /* mov.b R0,@(disp,Rn) */
1309 {
1310 TCGv addr = tcg_temp_new();
1311 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1312 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1313 tcg_temp_free(addr);
1314 }
1315 return;
1316 case 0x8100: /* mov.w R0,@(disp,Rn) */
1317 {
1318 TCGv addr = tcg_temp_new();
1319 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1320 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1321 tcg_temp_free(addr);
1322 }
1323 return;
1324 case 0x8400: /* mov.b @(disp,Rn),R0 */
1325 {
1326 TCGv addr = tcg_temp_new();
1327 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1328 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1329 tcg_temp_free(addr);
1330 }
1331 return;
1332 case 0x8500: /* mov.w @(disp,Rn),R0 */
1333 {
1334 TCGv addr = tcg_temp_new();
1335 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1336 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1337 tcg_temp_free(addr);
1338 }
1339 return;
1340 case 0xc700: /* mova @(disp,PC),R0 */
1341 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1342 return;
1343 case 0xcb00: /* or #imm,R0 */
1344 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1345 return;
1346 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1347 {
1348 TCGv addr, val;
1349 addr = tcg_temp_new();
1350 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1351 val = tcg_temp_new();
1352 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1353 tcg_gen_ori_i32(val, val, B7_0);
1354 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1355 tcg_temp_free(val);
1356 tcg_temp_free(addr);
1357 }
1358 return;
1359 case 0xc300: /* trapa #imm */
1360 {
1361 TCGv imm;
1362 CHECK_NOT_DELAY_SLOT
1363 imm = tcg_const_i32(B7_0);
1364 gen_helper_trapa(imm);
1365 tcg_temp_free(imm);
1366 ctx->bstate = BS_BRANCH;
1367 }
1368 return;
1369 case 0xc800: /* tst #imm,R0 */
1370 {
1371 TCGv val = tcg_temp_new();
1372 tcg_gen_andi_i32(val, REG(0), B7_0);
1373 gen_cmp_imm(TCG_COND_EQ, val, 0);
1374 tcg_temp_free(val);
1375 }
1376 return;
1377 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1378 {
1379 TCGv val = tcg_temp_new();
1380 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1381 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1382 tcg_gen_andi_i32(val, val, B7_0);
1383 gen_cmp_imm(TCG_COND_EQ, val, 0);
1384 tcg_temp_free(val);
1385 }
1386 return;
1387 case 0xca00: /* xor #imm,R0 */
1388 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1389 return;
1390 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1391 {
1392 TCGv addr, val;
1393 addr = tcg_temp_new();
1394 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1395 val = tcg_temp_new();
1396 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1397 tcg_gen_xori_i32(val, val, B7_0);
1398 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1399 tcg_temp_free(val);
1400 tcg_temp_free(addr);
1401 }
1402 return;
1403 }
1404
1405 switch (ctx->opcode & 0xf08f) {
1406 case 0x408e: /* ldc Rm,Rn_BANK */
1407 CHECK_PRIVILEGED
1408 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1409 return;
1410 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1411 CHECK_PRIVILEGED
1412 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1413 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1414 return;
1415 case 0x0082: /* stc Rm_BANK,Rn */
1416 CHECK_PRIVILEGED
1417 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1418 return;
1419 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1420 CHECK_PRIVILEGED
1421 {
1422 TCGv addr = tcg_temp_new();
1423 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1424 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1425 tcg_gen_mov_i32(REG(B11_8), addr);
1426 tcg_temp_free(addr);
1427 }
1428 return;
1429 }
1430
1431 switch (ctx->opcode & 0xf0ff) {
1432 case 0x0023: /* braf Rn */
1433 CHECK_NOT_DELAY_SLOT
1434 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1435 ctx->flags |= DELAY_SLOT;
1436 ctx->delayed_pc = (uint32_t) - 1;
1437 return;
1438 case 0x0003: /* bsrf Rn */
1439 CHECK_NOT_DELAY_SLOT
1440 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1441 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1442 ctx->flags |= DELAY_SLOT;
1443 ctx->delayed_pc = (uint32_t) - 1;
1444 return;
1445 case 0x4015: /* cmp/pl Rn */
1446 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1447 return;
1448 case 0x4011: /* cmp/pz Rn */
1449 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1450 return;
1451 case 0x4010: /* dt Rn */
1452 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1453 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1454 return;
1455 case 0x402b: /* jmp @Rn */
1456 CHECK_NOT_DELAY_SLOT
1457 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1458 ctx->flags |= DELAY_SLOT;
1459 ctx->delayed_pc = (uint32_t) - 1;
1460 return;
1461 case 0x400b: /* jsr @Rn */
1462 CHECK_NOT_DELAY_SLOT
1463 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1464 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1465 ctx->flags |= DELAY_SLOT;
1466 ctx->delayed_pc = (uint32_t) - 1;
1467 return;
1468 case 0x400e: /* ldc Rm,SR */
1469 CHECK_PRIVILEGED
1470 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1471 ctx->bstate = BS_STOP;
1472 return;
1473 case 0x4007: /* ldc.l @Rm+,SR */
1474 CHECK_PRIVILEGED
1475 {
1476 TCGv val = tcg_temp_new();
1477 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1478 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1479 tcg_temp_free(val);
1480 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1481 ctx->bstate = BS_STOP;
1482 }
1483 return;
1484 case 0x0002: /* stc SR,Rn */
1485 CHECK_PRIVILEGED
1486 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1487 return;
1488 case 0x4003: /* stc SR,@-Rn */
1489 CHECK_PRIVILEGED
1490 {
1491 TCGv addr = tcg_temp_new();
1492 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1493 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1494 tcg_gen_mov_i32(REG(B11_8), addr);
1495 tcg_temp_free(addr);
1496 }
1497 return;
1498 #define LD(reg,ldnum,ldpnum,prechk) \
1499 case ldnum: \
1500 prechk \
1501 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1502 return; \
1503 case ldpnum: \
1504 prechk \
1505 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1506 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1507 return;
1508 #define ST(reg,stnum,stpnum,prechk) \
1509 case stnum: \
1510 prechk \
1511 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1512 return; \
1513 case stpnum: \
1514 prechk \
1515 { \
1516 TCGv addr = tcg_temp_new(); \
1517 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1518 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1519 tcg_gen_mov_i32(REG(B11_8), addr); \
1520 tcg_temp_free(addr); \
1521 } \
1522 return;
1523 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1524 LD(reg,ldnum,ldpnum,prechk) \
1525 ST(reg,stnum,stpnum,prechk)
1526 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1527 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1528 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1529 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1530 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1531 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1532 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1533 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1534 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1535 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1536 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1537 case 0x406a: /* lds Rm,FPSCR */
1538 CHECK_FPU_ENABLED
1539 gen_helper_ld_fpscr(REG(B11_8));
1540 ctx->bstate = BS_STOP;
1541 return;
1542 case 0x4066: /* lds.l @Rm+,FPSCR */
1543 CHECK_FPU_ENABLED
1544 {
1545 TCGv addr = tcg_temp_new();
1546 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1547 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1548 gen_helper_ld_fpscr(addr);
1549 tcg_temp_free(addr);
1550 ctx->bstate = BS_STOP;
1551 }
1552 return;
1553 case 0x006a: /* sts FPSCR,Rn */
1554 CHECK_FPU_ENABLED
1555 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1556 return;
1557 case 0x4062: /* sts FPSCR,@-Rn */
1558 CHECK_FPU_ENABLED
1559 {
1560 TCGv addr, val;
1561 val = tcg_temp_new();
1562 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1563 addr = tcg_temp_new();
1564 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1565 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1566 tcg_gen_mov_i32(REG(B11_8), addr);
1567 tcg_temp_free(addr);
1568 tcg_temp_free(val);
1569 }
1570 return;
1571 case 0x00c3: /* movca.l R0,@Rm */
1572 {
1573 TCGv val = tcg_temp_new();
1574 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1575 gen_helper_movcal (REG(B11_8), val);
1576 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1577 }
1578 ctx->has_movcal = 1;
1579 return;
1580 case 0x40a9:
1581 /* MOVUA.L @Rm,R0 (Rm) -> R0
1582 Load non-boundary-aligned data */
1583 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1584 return;
1585 case 0x40e9:
1586 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1587 Load non-boundary-aligned data */
1588 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1589 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1590 return;
1591 case 0x0029: /* movt Rn */
1592 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1593 return;
1594 case 0x0073:
1595 /* MOVCO.L
1596 LDST -> T
1597 If (T == 1) R0 -> (Rn)
1598 0 -> LDST
1599 */
1600 if (ctx->features & SH_FEATURE_SH4A) {
1601 int label = gen_new_label();
1602 gen_clr_t();
1603 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1604 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1605 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1606 gen_set_label(label);
1607 tcg_gen_movi_i32(cpu_ldst, 0);
1608 return;
1609 } else
1610 break;
1611 case 0x0063:
1612 /* MOVLI.L @Rm,R0
1613 1 -> LDST
1614 (Rm) -> R0
1615 When interrupt/exception
1616 occurred 0 -> LDST
1617 */
1618 if (ctx->features & SH_FEATURE_SH4A) {
1619 tcg_gen_movi_i32(cpu_ldst, 0);
1620 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1621 tcg_gen_movi_i32(cpu_ldst, 1);
1622 return;
1623 } else
1624 break;
1625 case 0x0093: /* ocbi @Rn */
1626 {
1627 gen_helper_ocbi (REG(B11_8));
1628 }
1629 return;
1630 case 0x00a3: /* ocbp @Rn */
1631 case 0x00b3: /* ocbwb @Rn */
1632 /* These instructions are supposed to do nothing in case of
1633 a cache miss. Given that we only partially emulate caches
1634 it is safe to simply ignore them. */
1635 return;
1636 case 0x0083: /* pref @Rn */
1637 return;
1638 case 0x00d3: /* prefi @Rn */
1639 if (ctx->features & SH_FEATURE_SH4A)
1640 return;
1641 else
1642 break;
1643 case 0x00e3: /* icbi @Rn */
1644 if (ctx->features & SH_FEATURE_SH4A)
1645 return;
1646 else
1647 break;
1648 case 0x00ab: /* synco */
1649 if (ctx->features & SH_FEATURE_SH4A)
1650 return;
1651 else
1652 break;
1653 case 0x4024: /* rotcl Rn */
1654 {
1655 TCGv tmp = tcg_temp_new();
1656 tcg_gen_mov_i32(tmp, cpu_sr);
1657 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1658 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1659 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1660 tcg_temp_free(tmp);
1661 }
1662 return;
1663 case 0x4025: /* rotcr Rn */
1664 {
1665 TCGv tmp = tcg_temp_new();
1666 tcg_gen_mov_i32(tmp, cpu_sr);
1667 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1668 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1669 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1670 tcg_temp_free(tmp);
1671 }
1672 return;
1673 case 0x4004: /* rotl Rn */
1674 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1675 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1676 return;
1677 case 0x4005: /* rotr Rn */
1678 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1679 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1680 return;
1681 case 0x4000: /* shll Rn */
1682 case 0x4020: /* shal Rn */
1683 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1684 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1685 return;
1686 case 0x4021: /* shar Rn */
1687 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1688 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1689 return;
1690 case 0x4001: /* shlr Rn */
1691 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1692 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1693 return;
1694 case 0x4008: /* shll2 Rn */
1695 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1696 return;
1697 case 0x4018: /* shll8 Rn */
1698 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1699 return;
1700 case 0x4028: /* shll16 Rn */
1701 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1702 return;
1703 case 0x4009: /* shlr2 Rn */
1704 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1705 return;
1706 case 0x4019: /* shlr8 Rn */
1707 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1708 return;
1709 case 0x4029: /* shlr16 Rn */
1710 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1711 return;
1712 case 0x401b: /* tas.b @Rn */
1713 {
1714 TCGv addr, val;
1715 addr = tcg_temp_local_new();
1716 tcg_gen_mov_i32(addr, REG(B11_8));
1717 val = tcg_temp_local_new();
1718 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1719 gen_cmp_imm(TCG_COND_EQ, val, 0);
1720 tcg_gen_ori_i32(val, val, 0x80);
1721 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1722 tcg_temp_free(val);
1723 tcg_temp_free(addr);
1724 }
1725 return;
1726 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1727 CHECK_FPU_ENABLED
1728 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1729 return;
1730 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1731 CHECK_FPU_ENABLED
1732 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1733 return;
1734 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1735 CHECK_FPU_ENABLED
1736 if (ctx->fpscr & FPSCR_PR) {
1737 TCGv_i64 fp;
1738 if (ctx->opcode & 0x0100)
1739 break; /* illegal instruction */
1740 fp = tcg_temp_new_i64();
1741 gen_helper_float_DT(fp, cpu_fpul);
1742 gen_store_fpr64(fp, DREG(B11_8));
1743 tcg_temp_free_i64(fp);
1744 }
1745 else {
1746 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1747 }
1748 return;
1749 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1750 CHECK_FPU_ENABLED
1751 if (ctx->fpscr & FPSCR_PR) {
1752 TCGv_i64 fp;
1753 if (ctx->opcode & 0x0100)
1754 break; /* illegal instruction */
1755 fp = tcg_temp_new_i64();
1756 gen_load_fpr64(fp, DREG(B11_8));
1757 gen_helper_ftrc_DT(cpu_fpul, fp);
1758 tcg_temp_free_i64(fp);
1759 }
1760 else {
1761 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1762 }
1763 return;
1764 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1765 CHECK_FPU_ENABLED
1766 {
1767 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1768 }
1769 return;
1770 case 0xf05d: /* fabs FRn/DRn */
1771 CHECK_FPU_ENABLED
1772 if (ctx->fpscr & FPSCR_PR) {
1773 if (ctx->opcode & 0x0100)
1774 break; /* illegal instruction */
1775 TCGv_i64 fp = tcg_temp_new_i64();
1776 gen_load_fpr64(fp, DREG(B11_8));
1777 gen_helper_fabs_DT(fp, fp);
1778 gen_store_fpr64(fp, DREG(B11_8));
1779 tcg_temp_free_i64(fp);
1780 } else {
1781 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1782 }
1783 return;
1784 case 0xf06d: /* fsqrt FRn */
1785 CHECK_FPU_ENABLED
1786 if (ctx->fpscr & FPSCR_PR) {
1787 if (ctx->opcode & 0x0100)
1788 break; /* illegal instruction */
1789 TCGv_i64 fp = tcg_temp_new_i64();
1790 gen_load_fpr64(fp, DREG(B11_8));
1791 gen_helper_fsqrt_DT(fp, fp);
1792 gen_store_fpr64(fp, DREG(B11_8));
1793 tcg_temp_free_i64(fp);
1794 } else {
1795 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1796 }
1797 return;
1798 case 0xf07d: /* fsrra FRn */
1799 CHECK_FPU_ENABLED
1800 break;
1801 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1802 CHECK_FPU_ENABLED
1803 if (!(ctx->fpscr & FPSCR_PR)) {
1804 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1805 }
1806 return;
1807 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1808 CHECK_FPU_ENABLED
1809 if (!(ctx->fpscr & FPSCR_PR)) {
1810 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1811 }
1812 return;
1813 case 0xf0ad: /* fcnvsd FPUL,DRn */
1814 CHECK_FPU_ENABLED
1815 {
1816 TCGv_i64 fp = tcg_temp_new_i64();
1817 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1818 gen_store_fpr64(fp, DREG(B11_8));
1819 tcg_temp_free_i64(fp);
1820 }
1821 return;
1822 case 0xf0bd: /* fcnvds DRn,FPUL */
1823 CHECK_FPU_ENABLED
1824 {
1825 TCGv_i64 fp = tcg_temp_new_i64();
1826 gen_load_fpr64(fp, DREG(B11_8));
1827 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1828 tcg_temp_free_i64(fp);
1829 }
1830 return;
1831 case 0xf0ed: /* fipr FVm,FVn */
1832 CHECK_FPU_ENABLED
1833 if ((ctx->fpscr & FPSCR_PR) == 0) {
1834 TCGv m, n;
1835 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1836 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1837 gen_helper_fipr(m, n);
1838 tcg_temp_free(m);
1839 tcg_temp_free(n);
1840 return;
1841 }
1842 break;
1843 case 0xf0fd: /* ftrv XMTRX,FVn */
1844 CHECK_FPU_ENABLED
1845 if ((ctx->opcode & 0x0300) == 0x0100 &&
1846 (ctx->fpscr & FPSCR_PR) == 0) {
1847 TCGv n;
1848 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1849 gen_helper_ftrv(n);
1850 tcg_temp_free(n);
1851 return;
1852 }
1853 break;
1854 }
1855 #if 0
1856 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1857 ctx->opcode, ctx->pc);
1858 fflush(stderr);
1859 #endif
1860 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1861 gen_helper_raise_slot_illegal_instruction();
1862 } else {
1863 gen_helper_raise_illegal_instruction();
1864 }
1865 ctx->bstate = BS_EXCP;
1866 }
1867
1868 static void decode_opc(DisasContext * ctx)
1869 {
1870 uint32_t old_flags = ctx->flags;
1871
1872 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1873 tcg_gen_debug_insn_start(ctx->pc);
1874 }
1875
1876 _decode_opc(ctx);
1877
1878 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1879 if (ctx->flags & DELAY_SLOT_CLEARME) {
1880 gen_store_flags(0);
1881 } else {
1882 /* go out of the delay slot */
1883 uint32_t new_flags = ctx->flags;
1884 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1885 gen_store_flags(new_flags);
1886 }
1887 ctx->flags = 0;
1888 ctx->bstate = BS_BRANCH;
1889 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1890 gen_delayed_conditional_jump(ctx);
1891 } else if (old_flags & DELAY_SLOT) {
1892 gen_jump(ctx);
1893 }
1894
1895 }
1896
1897 /* go into a delay slot */
1898 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1899 gen_store_flags(ctx->flags);
1900 }
1901
1902 static inline void
1903 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1904 int search_pc)
1905 {
1906 DisasContext ctx;
1907 target_ulong pc_start;
1908 static uint16_t *gen_opc_end;
1909 CPUBreakpoint *bp;
1910 int i, ii;
1911 int num_insns;
1912 int max_insns;
1913
1914 pc_start = tb->pc;
1915 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1916 ctx.pc = pc_start;
1917 ctx.flags = (uint32_t)tb->flags;
1918 ctx.bstate = BS_NONE;
1919 ctx.sr = env->sr;
1920 ctx.fpscr = env->fpscr;
1921 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1922 /* We don't know if the delayed pc came from a dynamic or static branch,
1923 so assume it is a dynamic branch. */
1924 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1925 ctx.tb = tb;
1926 ctx.singlestep_enabled = env->singlestep_enabled;
1927 ctx.features = env->features;
1928 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1929
1930 ii = -1;
1931 num_insns = 0;
1932 max_insns = tb->cflags & CF_COUNT_MASK;
1933 if (max_insns == 0)
1934 max_insns = CF_COUNT_MASK;
1935 gen_icount_start();
1936 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1937 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1938 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1939 if (ctx.pc == bp->pc) {
1940 /* We have hit a breakpoint - make sure PC is up-to-date */
1941 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1942 gen_helper_debug();
1943 ctx.bstate = BS_EXCP;
1944 break;
1945 }
1946 }
1947 }
1948 if (search_pc) {
1949 i = gen_opc_ptr - gen_opc_buf;
1950 if (ii < i) {
1951 ii++;
1952 while (ii < i)
1953 gen_opc_instr_start[ii++] = 0;
1954 }
1955 gen_opc_pc[ii] = ctx.pc;
1956 gen_opc_hflags[ii] = ctx.flags;
1957 gen_opc_instr_start[ii] = 1;
1958 gen_opc_icount[ii] = num_insns;
1959 }
1960 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1961 gen_io_start();
1962 #if 0
1963 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1964 fflush(stderr);
1965 #endif
1966 ctx.opcode = lduw_code(ctx.pc);
1967 decode_opc(&ctx);
1968 num_insns++;
1969 ctx.pc += 2;
1970 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1971 break;
1972 if (env->singlestep_enabled)
1973 break;
1974 if (num_insns >= max_insns)
1975 break;
1976 if (singlestep)
1977 break;
1978 }
1979 if (tb->cflags & CF_LAST_IO)
1980 gen_io_end();
1981 if (env->singlestep_enabled) {
1982 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1983 gen_helper_debug();
1984 } else {
1985 switch (ctx.bstate) {
1986 case BS_STOP:
1987 /* gen_op_interrupt_restart(); */
1988 /* fall through */
1989 case BS_NONE:
1990 if (ctx.flags) {
1991 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1992 }
1993 gen_goto_tb(&ctx, 0, ctx.pc);
1994 break;
1995 case BS_EXCP:
1996 /* gen_op_interrupt_restart(); */
1997 tcg_gen_exit_tb(0);
1998 break;
1999 case BS_BRANCH:
2000 default:
2001 break;
2002 }
2003 }
2004
2005 gen_icount_end(tb, num_insns);
2006 *gen_opc_ptr = INDEX_op_end;
2007 if (search_pc) {
2008 i = gen_opc_ptr - gen_opc_buf;
2009 ii++;
2010 while (ii <= i)
2011 gen_opc_instr_start[ii++] = 0;
2012 } else {
2013 tb->size = ctx.pc - pc_start;
2014 tb->icount = num_insns;
2015 }
2016
2017 #ifdef DEBUG_DISAS
2018 #ifdef SH4_DEBUG_DISAS
2019 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2020 #endif
2021 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2022 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2023 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2024 qemu_log("\n");
2025 }
2026 #endif
2027 }
2028
2029 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2030 {
2031 gen_intermediate_code_internal(env, tb, 0);
2032 }
2033
2034 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2035 {
2036 gen_intermediate_code_internal(env, tb, 1);
2037 }
2038
2039 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2040 {
2041 env->pc = gen_opc_pc[pc_pos];
2042 env->flags = gen_opc_hflags[pc_pos];
2043 }