]> git.proxmox.com Git - mirror_qemu.git/blob - target-sh4/translate.c
target-sh4: QOM'ify CPU
[mirror_qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
23
24 #include "cpu.h"
25 #include "disas.h"
26 #include "tcg-op.h"
27
28 #include "helper.h"
29 #define GEN_HELPER 1
30 #include "helper.h"
31
32 typedef struct DisasContext {
33 struct TranslationBlock *tb;
34 target_ulong pc;
35 uint32_t sr;
36 uint32_t fpscr;
37 uint16_t opcode;
38 uint32_t flags;
39 int bstate;
40 int memidx;
41 uint32_t delayed_pc;
42 int singlestep_enabled;
43 uint32_t features;
44 int has_movcal;
45 } DisasContext;
46
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
49 #else
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
51 #endif
52
53 enum {
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition
56 */
57 BS_STOP = 1, /* We want to stop translation for any reason */
58 BS_BRANCH = 2, /* We reached a branch condition */
59 BS_EXCP = 3, /* We reached an exception condition */
60 };
61
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_gregs[24];
65 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
68 static TCGv cpu_fregs[32];
69
70 /* internal register indexes */
71 static TCGv cpu_flags, cpu_delayed_pc;
72
73 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
74
75 #include "gen-icount.h"
76
77 static void sh4_translate_init(void)
78 {
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 };
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 };
98
99 if (done_init)
100 return;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
108
109 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, ssr), "SSR");
115 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, spc), "SPC");
117 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, gbr), "GBR");
119 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, vbr), "VBR");
121 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, sgr), "SGR");
123 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, dbr), "DBR");
125 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, mach), "MACH");
127 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, macl), "MACL");
129 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, pr), "PR");
131 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, fpscr), "FPSCR");
133 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, fpul), "FPUL");
135
136 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, flags), "_flags_");
138 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, delayed_pc),
140 "_delayed_pc_");
141 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, ldst), "_ldst_");
143
144 for (i = 0; i < 32; i++)
145 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUSH4State, fregs[i]),
147 fregnames[i]);
148
149 /* register helpers */
150 #define GEN_HELPER 2
151 #include "helper.h"
152
153 done_init = 1;
154 }
155
156 void cpu_dump_state(CPUSH4State * env, FILE * f,
157 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
158 int flags)
159 {
160 int i;
161 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env->pc, env->sr, env->pr, env->fpscr);
163 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env->spc, env->ssr, env->gbr, env->vbr);
165 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env->sgr, env->dbr, env->delayed_pc, env->fpul);
167 for (i = 0; i < 24; i += 4) {
168 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i, env->gregs[i], i + 1, env->gregs[i + 1],
170 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
171 }
172 if (env->flags & DELAY_SLOT) {
173 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
175 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
176 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 }
179 }
180
181 void cpu_state_reset(CPUSH4State *env)
182 {
183 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
184 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
185 log_cpu_state(env, 0);
186 }
187
188 memset(env, 0, offsetof(CPUSH4State, breakpoints));
189 tlb_flush(env, 1);
190
191 env->pc = 0xA0000000;
192 #if defined(CONFIG_USER_ONLY)
193 env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
194 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
195 #else
196 env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
197 env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
198 set_float_rounding_mode(float_round_to_zero, &env->fp_status);
199 set_flush_to_zero(1, &env->fp_status);
200 #endif
201 set_default_nan_mode(1, &env->fp_status);
202 }
203
204 typedef struct {
205 const char *name;
206 int id;
207 uint32_t pvr;
208 uint32_t prr;
209 uint32_t cvr;
210 uint32_t features;
211 } sh4_def_t;
212
213 static sh4_def_t sh4_defs[] = {
214 {
215 .name = "SH7750R",
216 .id = SH_CPU_SH7750R,
217 .pvr = 0x00050000,
218 .prr = 0x00000100,
219 .cvr = 0x00110000,
220 .features = SH_FEATURE_BCR3_AND_BCR4,
221 }, {
222 .name = "SH7751R",
223 .id = SH_CPU_SH7751R,
224 .pvr = 0x04050005,
225 .prr = 0x00000113,
226 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
227 .features = SH_FEATURE_BCR3_AND_BCR4,
228 }, {
229 .name = "SH7785",
230 .id = SH_CPU_SH7785,
231 .pvr = 0x10300700,
232 .prr = 0x00000200,
233 .cvr = 0x71440211,
234 .features = SH_FEATURE_SH4A,
235 },
236 };
237
238 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
239 {
240 int i;
241
242 if (strcasecmp(name, "any") == 0)
243 return &sh4_defs[0];
244
245 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
246 if (strcasecmp(name, sh4_defs[i].name) == 0)
247 return &sh4_defs[i];
248
249 return NULL;
250 }
251
252 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
253 {
254 int i;
255
256 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
257 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
258 }
259
260 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
261 {
262 env->pvr = def->pvr;
263 env->prr = def->prr;
264 env->cvr = def->cvr;
265 env->id = def->id;
266 }
267
268 CPUSH4State *cpu_sh4_init(const char *cpu_model)
269 {
270 SuperHCPU *cpu;
271 CPUSH4State *env;
272 const sh4_def_t *def;
273
274 def = cpu_sh4_find_by_name(cpu_model);
275 if (!def)
276 return NULL;
277 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
278 env = &cpu->env;
279 env->features = def->features;
280 cpu_exec_init(env);
281 env->movcal_backup_tail = &(env->movcal_backup);
282 sh4_translate_init();
283 env->cpu_model_str = cpu_model;
284 cpu_state_reset(env);
285 cpu_register(env, def);
286 qemu_init_vcpu(env);
287 return env;
288 }
289
290 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
291 {
292 TranslationBlock *tb;
293 tb = ctx->tb;
294
295 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
296 !ctx->singlestep_enabled) {
297 /* Use a direct jump if in same page and singlestep not enabled */
298 tcg_gen_goto_tb(n);
299 tcg_gen_movi_i32(cpu_pc, dest);
300 tcg_gen_exit_tb((tcg_target_long)tb + n);
301 } else {
302 tcg_gen_movi_i32(cpu_pc, dest);
303 if (ctx->singlestep_enabled)
304 gen_helper_debug();
305 tcg_gen_exit_tb(0);
306 }
307 }
308
309 static void gen_jump(DisasContext * ctx)
310 {
311 if (ctx->delayed_pc == (uint32_t) - 1) {
312 /* Target is not statically known, it comes necessarily from a
313 delayed jump as immediate jump are conditinal jumps */
314 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
315 if (ctx->singlestep_enabled)
316 gen_helper_debug();
317 tcg_gen_exit_tb(0);
318 } else {
319 gen_goto_tb(ctx, 0, ctx->delayed_pc);
320 }
321 }
322
323 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
324 {
325 TCGv sr;
326 int label = gen_new_label();
327 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
328 sr = tcg_temp_new();
329 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
330 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
331 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
332 gen_set_label(label);
333 }
334
335 /* Immediate conditional jump (bt or bf) */
336 static void gen_conditional_jump(DisasContext * ctx,
337 target_ulong ift, target_ulong ifnott)
338 {
339 int l1;
340 TCGv sr;
341
342 l1 = gen_new_label();
343 sr = tcg_temp_new();
344 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
345 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
346 gen_goto_tb(ctx, 0, ifnott);
347 gen_set_label(l1);
348 gen_goto_tb(ctx, 1, ift);
349 }
350
351 /* Delayed conditional jump (bt or bf) */
352 static void gen_delayed_conditional_jump(DisasContext * ctx)
353 {
354 int l1;
355 TCGv ds;
356
357 l1 = gen_new_label();
358 ds = tcg_temp_new();
359 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
360 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
361 gen_goto_tb(ctx, 1, ctx->pc + 2);
362 gen_set_label(l1);
363 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
364 gen_jump(ctx);
365 }
366
367 static inline void gen_set_t(void)
368 {
369 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
370 }
371
372 static inline void gen_clr_t(void)
373 {
374 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
375 }
376
377 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
378 {
379 TCGv t;
380
381 t = tcg_temp_new();
382 tcg_gen_setcond_i32(cond, t, t1, t0);
383 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
384 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
385
386 tcg_temp_free(t);
387 }
388
389 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
390 {
391 TCGv t;
392
393 t = tcg_temp_new();
394 tcg_gen_setcondi_i32(cond, t, t0, imm);
395 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
396 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
397
398 tcg_temp_free(t);
399 }
400
401 static inline void gen_store_flags(uint32_t flags)
402 {
403 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
404 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
405 }
406
407 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
408 {
409 TCGv tmp = tcg_temp_new();
410
411 p0 &= 0x1f;
412 p1 &= 0x1f;
413
414 tcg_gen_andi_i32(tmp, t1, (1 << p1));
415 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
416 if (p0 < p1)
417 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
418 else if (p0 > p1)
419 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
420 tcg_gen_or_i32(t0, t0, tmp);
421
422 tcg_temp_free(tmp);
423 }
424
425 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
426 {
427 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
428 }
429
430 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
431 {
432 TCGv_i32 tmp = tcg_temp_new_i32();
433 tcg_gen_trunc_i64_i32(tmp, t);
434 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
435 tcg_gen_shri_i64(t, t, 32);
436 tcg_gen_trunc_i64_i32(tmp, t);
437 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
438 tcg_temp_free_i32(tmp);
439 }
440
441 #define B3_0 (ctx->opcode & 0xf)
442 #define B6_4 ((ctx->opcode >> 4) & 0x7)
443 #define B7_4 ((ctx->opcode >> 4) & 0xf)
444 #define B7_0 (ctx->opcode & 0xff)
445 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
446 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
447 (ctx->opcode & 0xfff))
448 #define B11_8 ((ctx->opcode >> 8) & 0xf)
449 #define B15_12 ((ctx->opcode >> 12) & 0xf)
450
451 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
452 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
453
454 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
455 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
456
457 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
458 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
459 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
460 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
461
462 #define CHECK_NOT_DELAY_SLOT \
463 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
464 { \
465 gen_helper_raise_slot_illegal_instruction(); \
466 ctx->bstate = BS_EXCP; \
467 return; \
468 }
469
470 #define CHECK_PRIVILEGED \
471 if (IS_USER(ctx)) { \
472 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
473 gen_helper_raise_slot_illegal_instruction(); \
474 } else { \
475 gen_helper_raise_illegal_instruction(); \
476 } \
477 ctx->bstate = BS_EXCP; \
478 return; \
479 }
480
481 #define CHECK_FPU_ENABLED \
482 if (ctx->flags & SR_FD) { \
483 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
484 gen_helper_raise_slot_fpu_disable(); \
485 } else { \
486 gen_helper_raise_fpu_disable(); \
487 } \
488 ctx->bstate = BS_EXCP; \
489 return; \
490 }
491
492 static void _decode_opc(DisasContext * ctx)
493 {
494 /* This code tries to make movcal emulation sufficiently
495 accurate for Linux purposes. This instruction writes
496 memory, and prior to that, always allocates a cache line.
497 It is used in two contexts:
498 - in memcpy, where data is copied in blocks, the first write
499 of to a block uses movca.l for performance.
500 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
501 to flush the cache. Here, the data written by movcal.l is never
502 written to memory, and the data written is just bogus.
503
504 To simulate this, we simulate movcal.l, we store the value to memory,
505 but we also remember the previous content. If we see ocbi, we check
506 if movcal.l for that address was done previously. If so, the write should
507 not have hit the memory, so we restore the previous content.
508 When we see an instruction that is neither movca.l
509 nor ocbi, the previous content is discarded.
510
511 To optimize, we only try to flush stores when we're at the start of
512 TB, or if we already saw movca.l in this TB and did not flush stores
513 yet. */
514 if (ctx->has_movcal)
515 {
516 int opcode = ctx->opcode & 0xf0ff;
517 if (opcode != 0x0093 /* ocbi */
518 && opcode != 0x00c3 /* movca.l */)
519 {
520 gen_helper_discard_movcal_backup ();
521 ctx->has_movcal = 0;
522 }
523 }
524
525 #if 0
526 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
527 #endif
528
529 switch (ctx->opcode) {
530 case 0x0019: /* div0u */
531 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
532 return;
533 case 0x000b: /* rts */
534 CHECK_NOT_DELAY_SLOT
535 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
536 ctx->flags |= DELAY_SLOT;
537 ctx->delayed_pc = (uint32_t) - 1;
538 return;
539 case 0x0028: /* clrmac */
540 tcg_gen_movi_i32(cpu_mach, 0);
541 tcg_gen_movi_i32(cpu_macl, 0);
542 return;
543 case 0x0048: /* clrs */
544 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
545 return;
546 case 0x0008: /* clrt */
547 gen_clr_t();
548 return;
549 case 0x0038: /* ldtlb */
550 CHECK_PRIVILEGED
551 gen_helper_ldtlb();
552 return;
553 case 0x002b: /* rte */
554 CHECK_PRIVILEGED
555 CHECK_NOT_DELAY_SLOT
556 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
557 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
558 ctx->flags |= DELAY_SLOT;
559 ctx->delayed_pc = (uint32_t) - 1;
560 return;
561 case 0x0058: /* sets */
562 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
563 return;
564 case 0x0018: /* sett */
565 gen_set_t();
566 return;
567 case 0xfbfd: /* frchg */
568 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
569 ctx->bstate = BS_STOP;
570 return;
571 case 0xf3fd: /* fschg */
572 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
573 ctx->bstate = BS_STOP;
574 return;
575 case 0x0009: /* nop */
576 return;
577 case 0x001b: /* sleep */
578 CHECK_PRIVILEGED
579 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
580 return;
581 }
582
583 switch (ctx->opcode & 0xf000) {
584 case 0x1000: /* mov.l Rm,@(disp,Rn) */
585 {
586 TCGv addr = tcg_temp_new();
587 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
588 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
589 tcg_temp_free(addr);
590 }
591 return;
592 case 0x5000: /* mov.l @(disp,Rm),Rn */
593 {
594 TCGv addr = tcg_temp_new();
595 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
596 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
597 tcg_temp_free(addr);
598 }
599 return;
600 case 0xe000: /* mov #imm,Rn */
601 tcg_gen_movi_i32(REG(B11_8), B7_0s);
602 return;
603 case 0x9000: /* mov.w @(disp,PC),Rn */
604 {
605 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
606 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
607 tcg_temp_free(addr);
608 }
609 return;
610 case 0xd000: /* mov.l @(disp,PC),Rn */
611 {
612 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
613 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
614 tcg_temp_free(addr);
615 }
616 return;
617 case 0x7000: /* add #imm,Rn */
618 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
619 return;
620 case 0xa000: /* bra disp */
621 CHECK_NOT_DELAY_SLOT
622 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
623 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
624 ctx->flags |= DELAY_SLOT;
625 return;
626 case 0xb000: /* bsr disp */
627 CHECK_NOT_DELAY_SLOT
628 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
629 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
630 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
631 ctx->flags |= DELAY_SLOT;
632 return;
633 }
634
635 switch (ctx->opcode & 0xf00f) {
636 case 0x6003: /* mov Rm,Rn */
637 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
638 return;
639 case 0x2000: /* mov.b Rm,@Rn */
640 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
641 return;
642 case 0x2001: /* mov.w Rm,@Rn */
643 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
644 return;
645 case 0x2002: /* mov.l Rm,@Rn */
646 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
647 return;
648 case 0x6000: /* mov.b @Rm,Rn */
649 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
650 return;
651 case 0x6001: /* mov.w @Rm,Rn */
652 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
653 return;
654 case 0x6002: /* mov.l @Rm,Rn */
655 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
656 return;
657 case 0x2004: /* mov.b Rm,@-Rn */
658 {
659 TCGv addr = tcg_temp_new();
660 tcg_gen_subi_i32(addr, REG(B11_8), 1);
661 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
662 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
663 tcg_temp_free(addr);
664 }
665 return;
666 case 0x2005: /* mov.w Rm,@-Rn */
667 {
668 TCGv addr = tcg_temp_new();
669 tcg_gen_subi_i32(addr, REG(B11_8), 2);
670 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
671 tcg_gen_mov_i32(REG(B11_8), addr);
672 tcg_temp_free(addr);
673 }
674 return;
675 case 0x2006: /* mov.l Rm,@-Rn */
676 {
677 TCGv addr = tcg_temp_new();
678 tcg_gen_subi_i32(addr, REG(B11_8), 4);
679 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
680 tcg_gen_mov_i32(REG(B11_8), addr);
681 }
682 return;
683 case 0x6004: /* mov.b @Rm+,Rn */
684 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
685 if ( B11_8 != B7_4 )
686 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
687 return;
688 case 0x6005: /* mov.w @Rm+,Rn */
689 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
690 if ( B11_8 != B7_4 )
691 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
692 return;
693 case 0x6006: /* mov.l @Rm+,Rn */
694 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
695 if ( B11_8 != B7_4 )
696 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
697 return;
698 case 0x0004: /* mov.b Rm,@(R0,Rn) */
699 {
700 TCGv addr = tcg_temp_new();
701 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
702 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
703 tcg_temp_free(addr);
704 }
705 return;
706 case 0x0005: /* mov.w Rm,@(R0,Rn) */
707 {
708 TCGv addr = tcg_temp_new();
709 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
710 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
711 tcg_temp_free(addr);
712 }
713 return;
714 case 0x0006: /* mov.l Rm,@(R0,Rn) */
715 {
716 TCGv addr = tcg_temp_new();
717 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
718 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
719 tcg_temp_free(addr);
720 }
721 return;
722 case 0x000c: /* mov.b @(R0,Rm),Rn */
723 {
724 TCGv addr = tcg_temp_new();
725 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
726 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
727 tcg_temp_free(addr);
728 }
729 return;
730 case 0x000d: /* mov.w @(R0,Rm),Rn */
731 {
732 TCGv addr = tcg_temp_new();
733 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
734 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
735 tcg_temp_free(addr);
736 }
737 return;
738 case 0x000e: /* mov.l @(R0,Rm),Rn */
739 {
740 TCGv addr = tcg_temp_new();
741 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
742 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
743 tcg_temp_free(addr);
744 }
745 return;
746 case 0x6008: /* swap.b Rm,Rn */
747 {
748 TCGv high, low;
749 high = tcg_temp_new();
750 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
751 low = tcg_temp_new();
752 tcg_gen_ext16u_i32(low, REG(B7_4));
753 tcg_gen_bswap16_i32(low, low);
754 tcg_gen_or_i32(REG(B11_8), high, low);
755 tcg_temp_free(low);
756 tcg_temp_free(high);
757 }
758 return;
759 case 0x6009: /* swap.w Rm,Rn */
760 {
761 TCGv high, low;
762 high = tcg_temp_new();
763 tcg_gen_shli_i32(high, REG(B7_4), 16);
764 low = tcg_temp_new();
765 tcg_gen_shri_i32(low, REG(B7_4), 16);
766 tcg_gen_ext16u_i32(low, low);
767 tcg_gen_or_i32(REG(B11_8), high, low);
768 tcg_temp_free(low);
769 tcg_temp_free(high);
770 }
771 return;
772 case 0x200d: /* xtrct Rm,Rn */
773 {
774 TCGv high, low;
775 high = tcg_temp_new();
776 tcg_gen_shli_i32(high, REG(B7_4), 16);
777 low = tcg_temp_new();
778 tcg_gen_shri_i32(low, REG(B11_8), 16);
779 tcg_gen_ext16u_i32(low, low);
780 tcg_gen_or_i32(REG(B11_8), high, low);
781 tcg_temp_free(low);
782 tcg_temp_free(high);
783 }
784 return;
785 case 0x300c: /* add Rm,Rn */
786 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
787 return;
788 case 0x300e: /* addc Rm,Rn */
789 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
790 return;
791 case 0x300f: /* addv Rm,Rn */
792 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
793 return;
794 case 0x2009: /* and Rm,Rn */
795 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
796 return;
797 case 0x3000: /* cmp/eq Rm,Rn */
798 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
799 return;
800 case 0x3003: /* cmp/ge Rm,Rn */
801 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
802 return;
803 case 0x3007: /* cmp/gt Rm,Rn */
804 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
805 return;
806 case 0x3006: /* cmp/hi Rm,Rn */
807 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
808 return;
809 case 0x3002: /* cmp/hs Rm,Rn */
810 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
811 return;
812 case 0x200c: /* cmp/str Rm,Rn */
813 {
814 TCGv cmp1 = tcg_temp_new();
815 TCGv cmp2 = tcg_temp_new();
816 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
817 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
818 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
819 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
820 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
821 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
822 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
823 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
824 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
825 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
826 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
827 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
828 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
829 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
830 tcg_temp_free(cmp2);
831 tcg_temp_free(cmp1);
832 }
833 return;
834 case 0x2007: /* div0s Rm,Rn */
835 {
836 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
837 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
838 TCGv val = tcg_temp_new();
839 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
840 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
841 tcg_temp_free(val);
842 }
843 return;
844 case 0x3004: /* div1 Rm,Rn */
845 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
846 return;
847 case 0x300d: /* dmuls.l Rm,Rn */
848 {
849 TCGv_i64 tmp1 = tcg_temp_new_i64();
850 TCGv_i64 tmp2 = tcg_temp_new_i64();
851
852 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
853 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
854 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
855 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
856 tcg_gen_shri_i64(tmp1, tmp1, 32);
857 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
858
859 tcg_temp_free_i64(tmp2);
860 tcg_temp_free_i64(tmp1);
861 }
862 return;
863 case 0x3005: /* dmulu.l Rm,Rn */
864 {
865 TCGv_i64 tmp1 = tcg_temp_new_i64();
866 TCGv_i64 tmp2 = tcg_temp_new_i64();
867
868 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
869 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
870 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
871 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
872 tcg_gen_shri_i64(tmp1, tmp1, 32);
873 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
874
875 tcg_temp_free_i64(tmp2);
876 tcg_temp_free_i64(tmp1);
877 }
878 return;
879 case 0x600e: /* exts.b Rm,Rn */
880 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
881 return;
882 case 0x600f: /* exts.w Rm,Rn */
883 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
884 return;
885 case 0x600c: /* extu.b Rm,Rn */
886 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
887 return;
888 case 0x600d: /* extu.w Rm,Rn */
889 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
890 return;
891 case 0x000f: /* mac.l @Rm+,@Rn+ */
892 {
893 TCGv arg0, arg1;
894 arg0 = tcg_temp_new();
895 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
896 arg1 = tcg_temp_new();
897 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
898 gen_helper_macl(arg0, arg1);
899 tcg_temp_free(arg1);
900 tcg_temp_free(arg0);
901 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
902 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
903 }
904 return;
905 case 0x400f: /* mac.w @Rm+,@Rn+ */
906 {
907 TCGv arg0, arg1;
908 arg0 = tcg_temp_new();
909 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
910 arg1 = tcg_temp_new();
911 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
912 gen_helper_macw(arg0, arg1);
913 tcg_temp_free(arg1);
914 tcg_temp_free(arg0);
915 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
916 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
917 }
918 return;
919 case 0x0007: /* mul.l Rm,Rn */
920 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
921 return;
922 case 0x200f: /* muls.w Rm,Rn */
923 {
924 TCGv arg0, arg1;
925 arg0 = tcg_temp_new();
926 tcg_gen_ext16s_i32(arg0, REG(B7_4));
927 arg1 = tcg_temp_new();
928 tcg_gen_ext16s_i32(arg1, REG(B11_8));
929 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
930 tcg_temp_free(arg1);
931 tcg_temp_free(arg0);
932 }
933 return;
934 case 0x200e: /* mulu.w Rm,Rn */
935 {
936 TCGv arg0, arg1;
937 arg0 = tcg_temp_new();
938 tcg_gen_ext16u_i32(arg0, REG(B7_4));
939 arg1 = tcg_temp_new();
940 tcg_gen_ext16u_i32(arg1, REG(B11_8));
941 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
942 tcg_temp_free(arg1);
943 tcg_temp_free(arg0);
944 }
945 return;
946 case 0x600b: /* neg Rm,Rn */
947 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
948 return;
949 case 0x600a: /* negc Rm,Rn */
950 {
951 TCGv t0, t1;
952 t0 = tcg_temp_new();
953 tcg_gen_neg_i32(t0, REG(B7_4));
954 t1 = tcg_temp_new();
955 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
956 tcg_gen_sub_i32(REG(B11_8), t0, t1);
957 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
958 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
959 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
960 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
961 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
962 tcg_temp_free(t0);
963 tcg_temp_free(t1);
964 }
965 return;
966 case 0x6007: /* not Rm,Rn */
967 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
968 return;
969 case 0x200b: /* or Rm,Rn */
970 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
971 return;
972 case 0x400c: /* shad Rm,Rn */
973 {
974 int label1 = gen_new_label();
975 int label2 = gen_new_label();
976 int label3 = gen_new_label();
977 int label4 = gen_new_label();
978 TCGv shift;
979 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
980 /* Rm positive, shift to the left */
981 shift = tcg_temp_new();
982 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
983 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
984 tcg_temp_free(shift);
985 tcg_gen_br(label4);
986 /* Rm negative, shift to the right */
987 gen_set_label(label1);
988 shift = tcg_temp_new();
989 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
990 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
991 tcg_gen_not_i32(shift, REG(B7_4));
992 tcg_gen_andi_i32(shift, shift, 0x1f);
993 tcg_gen_addi_i32(shift, shift, 1);
994 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
995 tcg_temp_free(shift);
996 tcg_gen_br(label4);
997 /* Rm = -32 */
998 gen_set_label(label2);
999 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
1000 tcg_gen_movi_i32(REG(B11_8), 0);
1001 tcg_gen_br(label4);
1002 gen_set_label(label3);
1003 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
1004 gen_set_label(label4);
1005 }
1006 return;
1007 case 0x400d: /* shld Rm,Rn */
1008 {
1009 int label1 = gen_new_label();
1010 int label2 = gen_new_label();
1011 int label3 = gen_new_label();
1012 TCGv shift;
1013 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1014 /* Rm positive, shift to the left */
1015 shift = tcg_temp_new();
1016 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1017 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1018 tcg_temp_free(shift);
1019 tcg_gen_br(label3);
1020 /* Rm negative, shift to the right */
1021 gen_set_label(label1);
1022 shift = tcg_temp_new();
1023 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1024 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1025 tcg_gen_not_i32(shift, REG(B7_4));
1026 tcg_gen_andi_i32(shift, shift, 0x1f);
1027 tcg_gen_addi_i32(shift, shift, 1);
1028 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1029 tcg_temp_free(shift);
1030 tcg_gen_br(label3);
1031 /* Rm = -32 */
1032 gen_set_label(label2);
1033 tcg_gen_movi_i32(REG(B11_8), 0);
1034 gen_set_label(label3);
1035 }
1036 return;
1037 case 0x3008: /* sub Rm,Rn */
1038 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1039 return;
1040 case 0x300a: /* subc Rm,Rn */
1041 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1042 return;
1043 case 0x300b: /* subv Rm,Rn */
1044 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1045 return;
1046 case 0x2008: /* tst Rm,Rn */
1047 {
1048 TCGv val = tcg_temp_new();
1049 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1050 gen_cmp_imm(TCG_COND_EQ, val, 0);
1051 tcg_temp_free(val);
1052 }
1053 return;
1054 case 0x200a: /* xor Rm,Rn */
1055 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1056 return;
1057 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1058 CHECK_FPU_ENABLED
1059 if (ctx->fpscr & FPSCR_SZ) {
1060 TCGv_i64 fp = tcg_temp_new_i64();
1061 gen_load_fpr64(fp, XREG(B7_4));
1062 gen_store_fpr64(fp, XREG(B11_8));
1063 tcg_temp_free_i64(fp);
1064 } else {
1065 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1066 }
1067 return;
1068 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1069 CHECK_FPU_ENABLED
1070 if (ctx->fpscr & FPSCR_SZ) {
1071 TCGv addr_hi = tcg_temp_new();
1072 int fr = XREG(B7_4);
1073 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1074 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1075 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1076 tcg_temp_free(addr_hi);
1077 } else {
1078 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1079 }
1080 return;
1081 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1082 CHECK_FPU_ENABLED
1083 if (ctx->fpscr & FPSCR_SZ) {
1084 TCGv addr_hi = tcg_temp_new();
1085 int fr = XREG(B11_8);
1086 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1087 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1088 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1089 tcg_temp_free(addr_hi);
1090 } else {
1091 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1092 }
1093 return;
1094 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1095 CHECK_FPU_ENABLED
1096 if (ctx->fpscr & FPSCR_SZ) {
1097 TCGv addr_hi = tcg_temp_new();
1098 int fr = XREG(B11_8);
1099 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1100 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1101 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1102 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1103 tcg_temp_free(addr_hi);
1104 } else {
1105 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1106 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1107 }
1108 return;
1109 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1110 CHECK_FPU_ENABLED
1111 if (ctx->fpscr & FPSCR_SZ) {
1112 TCGv addr = tcg_temp_new_i32();
1113 int fr = XREG(B7_4);
1114 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1115 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1116 tcg_gen_subi_i32(addr, addr, 4);
1117 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1118 tcg_gen_mov_i32(REG(B11_8), addr);
1119 tcg_temp_free(addr);
1120 } else {
1121 TCGv addr;
1122 addr = tcg_temp_new_i32();
1123 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1124 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1125 tcg_gen_mov_i32(REG(B11_8), addr);
1126 tcg_temp_free(addr);
1127 }
1128 return;
1129 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1130 CHECK_FPU_ENABLED
1131 {
1132 TCGv addr = tcg_temp_new_i32();
1133 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1134 if (ctx->fpscr & FPSCR_SZ) {
1135 int fr = XREG(B11_8);
1136 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1137 tcg_gen_addi_i32(addr, addr, 4);
1138 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1139 } else {
1140 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1141 }
1142 tcg_temp_free(addr);
1143 }
1144 return;
1145 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1146 CHECK_FPU_ENABLED
1147 {
1148 TCGv addr = tcg_temp_new();
1149 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1150 if (ctx->fpscr & FPSCR_SZ) {
1151 int fr = XREG(B7_4);
1152 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1153 tcg_gen_addi_i32(addr, addr, 4);
1154 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1155 } else {
1156 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1157 }
1158 tcg_temp_free(addr);
1159 }
1160 return;
1161 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1162 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1163 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1164 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1165 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1166 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1167 {
1168 CHECK_FPU_ENABLED
1169 if (ctx->fpscr & FPSCR_PR) {
1170 TCGv_i64 fp0, fp1;
1171
1172 if (ctx->opcode & 0x0110)
1173 break; /* illegal instruction */
1174 fp0 = tcg_temp_new_i64();
1175 fp1 = tcg_temp_new_i64();
1176 gen_load_fpr64(fp0, DREG(B11_8));
1177 gen_load_fpr64(fp1, DREG(B7_4));
1178 switch (ctx->opcode & 0xf00f) {
1179 case 0xf000: /* fadd Rm,Rn */
1180 gen_helper_fadd_DT(fp0, fp0, fp1);
1181 break;
1182 case 0xf001: /* fsub Rm,Rn */
1183 gen_helper_fsub_DT(fp0, fp0, fp1);
1184 break;
1185 case 0xf002: /* fmul Rm,Rn */
1186 gen_helper_fmul_DT(fp0, fp0, fp1);
1187 break;
1188 case 0xf003: /* fdiv Rm,Rn */
1189 gen_helper_fdiv_DT(fp0, fp0, fp1);
1190 break;
1191 case 0xf004: /* fcmp/eq Rm,Rn */
1192 gen_helper_fcmp_eq_DT(fp0, fp1);
1193 return;
1194 case 0xf005: /* fcmp/gt Rm,Rn */
1195 gen_helper_fcmp_gt_DT(fp0, fp1);
1196 return;
1197 }
1198 gen_store_fpr64(fp0, DREG(B11_8));
1199 tcg_temp_free_i64(fp0);
1200 tcg_temp_free_i64(fp1);
1201 } else {
1202 switch (ctx->opcode & 0xf00f) {
1203 case 0xf000: /* fadd Rm,Rn */
1204 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1205 break;
1206 case 0xf001: /* fsub Rm,Rn */
1207 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1208 break;
1209 case 0xf002: /* fmul Rm,Rn */
1210 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1211 break;
1212 case 0xf003: /* fdiv Rm,Rn */
1213 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1214 break;
1215 case 0xf004: /* fcmp/eq Rm,Rn */
1216 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1217 return;
1218 case 0xf005: /* fcmp/gt Rm,Rn */
1219 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1220 return;
1221 }
1222 }
1223 }
1224 return;
1225 case 0xf00e: /* fmac FR0,RM,Rn */
1226 {
1227 CHECK_FPU_ENABLED
1228 if (ctx->fpscr & FPSCR_PR) {
1229 break; /* illegal instruction */
1230 } else {
1231 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1232 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1233 return;
1234 }
1235 }
1236 }
1237
1238 switch (ctx->opcode & 0xff00) {
1239 case 0xc900: /* and #imm,R0 */
1240 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1241 return;
1242 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1243 {
1244 TCGv addr, val;
1245 addr = tcg_temp_new();
1246 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1247 val = tcg_temp_new();
1248 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1249 tcg_gen_andi_i32(val, val, B7_0);
1250 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1251 tcg_temp_free(val);
1252 tcg_temp_free(addr);
1253 }
1254 return;
1255 case 0x8b00: /* bf label */
1256 CHECK_NOT_DELAY_SLOT
1257 gen_conditional_jump(ctx, ctx->pc + 2,
1258 ctx->pc + 4 + B7_0s * 2);
1259 ctx->bstate = BS_BRANCH;
1260 return;
1261 case 0x8f00: /* bf/s label */
1262 CHECK_NOT_DELAY_SLOT
1263 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1264 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1265 return;
1266 case 0x8900: /* bt label */
1267 CHECK_NOT_DELAY_SLOT
1268 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1269 ctx->pc + 2);
1270 ctx->bstate = BS_BRANCH;
1271 return;
1272 case 0x8d00: /* bt/s label */
1273 CHECK_NOT_DELAY_SLOT
1274 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1275 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1276 return;
1277 case 0x8800: /* cmp/eq #imm,R0 */
1278 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1279 return;
1280 case 0xc400: /* mov.b @(disp,GBR),R0 */
1281 {
1282 TCGv addr = tcg_temp_new();
1283 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1284 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1285 tcg_temp_free(addr);
1286 }
1287 return;
1288 case 0xc500: /* mov.w @(disp,GBR),R0 */
1289 {
1290 TCGv addr = tcg_temp_new();
1291 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1292 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1293 tcg_temp_free(addr);
1294 }
1295 return;
1296 case 0xc600: /* mov.l @(disp,GBR),R0 */
1297 {
1298 TCGv addr = tcg_temp_new();
1299 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1300 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1301 tcg_temp_free(addr);
1302 }
1303 return;
1304 case 0xc000: /* mov.b R0,@(disp,GBR) */
1305 {
1306 TCGv addr = tcg_temp_new();
1307 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1308 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1309 tcg_temp_free(addr);
1310 }
1311 return;
1312 case 0xc100: /* mov.w R0,@(disp,GBR) */
1313 {
1314 TCGv addr = tcg_temp_new();
1315 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1316 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1317 tcg_temp_free(addr);
1318 }
1319 return;
1320 case 0xc200: /* mov.l R0,@(disp,GBR) */
1321 {
1322 TCGv addr = tcg_temp_new();
1323 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1324 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1325 tcg_temp_free(addr);
1326 }
1327 return;
1328 case 0x8000: /* mov.b R0,@(disp,Rn) */
1329 {
1330 TCGv addr = tcg_temp_new();
1331 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1332 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1333 tcg_temp_free(addr);
1334 }
1335 return;
1336 case 0x8100: /* mov.w R0,@(disp,Rn) */
1337 {
1338 TCGv addr = tcg_temp_new();
1339 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1340 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1341 tcg_temp_free(addr);
1342 }
1343 return;
1344 case 0x8400: /* mov.b @(disp,Rn),R0 */
1345 {
1346 TCGv addr = tcg_temp_new();
1347 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1348 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1349 tcg_temp_free(addr);
1350 }
1351 return;
1352 case 0x8500: /* mov.w @(disp,Rn),R0 */
1353 {
1354 TCGv addr = tcg_temp_new();
1355 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1356 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1357 tcg_temp_free(addr);
1358 }
1359 return;
1360 case 0xc700: /* mova @(disp,PC),R0 */
1361 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1362 return;
1363 case 0xcb00: /* or #imm,R0 */
1364 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1365 return;
1366 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1367 {
1368 TCGv addr, val;
1369 addr = tcg_temp_new();
1370 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1371 val = tcg_temp_new();
1372 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1373 tcg_gen_ori_i32(val, val, B7_0);
1374 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1375 tcg_temp_free(val);
1376 tcg_temp_free(addr);
1377 }
1378 return;
1379 case 0xc300: /* trapa #imm */
1380 {
1381 TCGv imm;
1382 CHECK_NOT_DELAY_SLOT
1383 imm = tcg_const_i32(B7_0);
1384 gen_helper_trapa(imm);
1385 tcg_temp_free(imm);
1386 ctx->bstate = BS_BRANCH;
1387 }
1388 return;
1389 case 0xc800: /* tst #imm,R0 */
1390 {
1391 TCGv val = tcg_temp_new();
1392 tcg_gen_andi_i32(val, REG(0), B7_0);
1393 gen_cmp_imm(TCG_COND_EQ, val, 0);
1394 tcg_temp_free(val);
1395 }
1396 return;
1397 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1398 {
1399 TCGv val = tcg_temp_new();
1400 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1401 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1402 tcg_gen_andi_i32(val, val, B7_0);
1403 gen_cmp_imm(TCG_COND_EQ, val, 0);
1404 tcg_temp_free(val);
1405 }
1406 return;
1407 case 0xca00: /* xor #imm,R0 */
1408 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1409 return;
1410 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1411 {
1412 TCGv addr, val;
1413 addr = tcg_temp_new();
1414 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1415 val = tcg_temp_new();
1416 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1417 tcg_gen_xori_i32(val, val, B7_0);
1418 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1419 tcg_temp_free(val);
1420 tcg_temp_free(addr);
1421 }
1422 return;
1423 }
1424
1425 switch (ctx->opcode & 0xf08f) {
1426 case 0x408e: /* ldc Rm,Rn_BANK */
1427 CHECK_PRIVILEGED
1428 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1429 return;
1430 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1431 CHECK_PRIVILEGED
1432 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1433 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1434 return;
1435 case 0x0082: /* stc Rm_BANK,Rn */
1436 CHECK_PRIVILEGED
1437 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1438 return;
1439 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1440 CHECK_PRIVILEGED
1441 {
1442 TCGv addr = tcg_temp_new();
1443 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1444 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1445 tcg_gen_mov_i32(REG(B11_8), addr);
1446 tcg_temp_free(addr);
1447 }
1448 return;
1449 }
1450
1451 switch (ctx->opcode & 0xf0ff) {
1452 case 0x0023: /* braf Rn */
1453 CHECK_NOT_DELAY_SLOT
1454 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1455 ctx->flags |= DELAY_SLOT;
1456 ctx->delayed_pc = (uint32_t) - 1;
1457 return;
1458 case 0x0003: /* bsrf Rn */
1459 CHECK_NOT_DELAY_SLOT
1460 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1461 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1462 ctx->flags |= DELAY_SLOT;
1463 ctx->delayed_pc = (uint32_t) - 1;
1464 return;
1465 case 0x4015: /* cmp/pl Rn */
1466 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1467 return;
1468 case 0x4011: /* cmp/pz Rn */
1469 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1470 return;
1471 case 0x4010: /* dt Rn */
1472 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1473 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1474 return;
1475 case 0x402b: /* jmp @Rn */
1476 CHECK_NOT_DELAY_SLOT
1477 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1478 ctx->flags |= DELAY_SLOT;
1479 ctx->delayed_pc = (uint32_t) - 1;
1480 return;
1481 case 0x400b: /* jsr @Rn */
1482 CHECK_NOT_DELAY_SLOT
1483 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1484 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1485 ctx->flags |= DELAY_SLOT;
1486 ctx->delayed_pc = (uint32_t) - 1;
1487 return;
1488 case 0x400e: /* ldc Rm,SR */
1489 CHECK_PRIVILEGED
1490 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1491 ctx->bstate = BS_STOP;
1492 return;
1493 case 0x4007: /* ldc.l @Rm+,SR */
1494 CHECK_PRIVILEGED
1495 {
1496 TCGv val = tcg_temp_new();
1497 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1498 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1499 tcg_temp_free(val);
1500 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1501 ctx->bstate = BS_STOP;
1502 }
1503 return;
1504 case 0x0002: /* stc SR,Rn */
1505 CHECK_PRIVILEGED
1506 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1507 return;
1508 case 0x4003: /* stc SR,@-Rn */
1509 CHECK_PRIVILEGED
1510 {
1511 TCGv addr = tcg_temp_new();
1512 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1513 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1514 tcg_gen_mov_i32(REG(B11_8), addr);
1515 tcg_temp_free(addr);
1516 }
1517 return;
1518 #define LD(reg,ldnum,ldpnum,prechk) \
1519 case ldnum: \
1520 prechk \
1521 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1522 return; \
1523 case ldpnum: \
1524 prechk \
1525 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1526 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1527 return;
1528 #define ST(reg,stnum,stpnum,prechk) \
1529 case stnum: \
1530 prechk \
1531 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1532 return; \
1533 case stpnum: \
1534 prechk \
1535 { \
1536 TCGv addr = tcg_temp_new(); \
1537 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1538 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1539 tcg_gen_mov_i32(REG(B11_8), addr); \
1540 tcg_temp_free(addr); \
1541 } \
1542 return;
1543 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1544 LD(reg,ldnum,ldpnum,prechk) \
1545 ST(reg,stnum,stpnum,prechk)
1546 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1547 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1548 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1549 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1550 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1551 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1552 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1553 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1554 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1555 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1556 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1557 case 0x406a: /* lds Rm,FPSCR */
1558 CHECK_FPU_ENABLED
1559 gen_helper_ld_fpscr(REG(B11_8));
1560 ctx->bstate = BS_STOP;
1561 return;
1562 case 0x4066: /* lds.l @Rm+,FPSCR */
1563 CHECK_FPU_ENABLED
1564 {
1565 TCGv addr = tcg_temp_new();
1566 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1567 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1568 gen_helper_ld_fpscr(addr);
1569 tcg_temp_free(addr);
1570 ctx->bstate = BS_STOP;
1571 }
1572 return;
1573 case 0x006a: /* sts FPSCR,Rn */
1574 CHECK_FPU_ENABLED
1575 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1576 return;
1577 case 0x4062: /* sts FPSCR,@-Rn */
1578 CHECK_FPU_ENABLED
1579 {
1580 TCGv addr, val;
1581 val = tcg_temp_new();
1582 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1583 addr = tcg_temp_new();
1584 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1585 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1586 tcg_gen_mov_i32(REG(B11_8), addr);
1587 tcg_temp_free(addr);
1588 tcg_temp_free(val);
1589 }
1590 return;
1591 case 0x00c3: /* movca.l R0,@Rm */
1592 {
1593 TCGv val = tcg_temp_new();
1594 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1595 gen_helper_movcal (REG(B11_8), val);
1596 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1597 }
1598 ctx->has_movcal = 1;
1599 return;
1600 case 0x40a9:
1601 /* MOVUA.L @Rm,R0 (Rm) -> R0
1602 Load non-boundary-aligned data */
1603 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1604 return;
1605 case 0x40e9:
1606 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1607 Load non-boundary-aligned data */
1608 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1609 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1610 return;
1611 case 0x0029: /* movt Rn */
1612 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1613 return;
1614 case 0x0073:
1615 /* MOVCO.L
1616 LDST -> T
1617 If (T == 1) R0 -> (Rn)
1618 0 -> LDST
1619 */
1620 if (ctx->features & SH_FEATURE_SH4A) {
1621 int label = gen_new_label();
1622 gen_clr_t();
1623 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1624 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1625 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1626 gen_set_label(label);
1627 tcg_gen_movi_i32(cpu_ldst, 0);
1628 return;
1629 } else
1630 break;
1631 case 0x0063:
1632 /* MOVLI.L @Rm,R0
1633 1 -> LDST
1634 (Rm) -> R0
1635 When interrupt/exception
1636 occurred 0 -> LDST
1637 */
1638 if (ctx->features & SH_FEATURE_SH4A) {
1639 tcg_gen_movi_i32(cpu_ldst, 0);
1640 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1641 tcg_gen_movi_i32(cpu_ldst, 1);
1642 return;
1643 } else
1644 break;
1645 case 0x0093: /* ocbi @Rn */
1646 {
1647 gen_helper_ocbi (REG(B11_8));
1648 }
1649 return;
1650 case 0x00a3: /* ocbp @Rn */
1651 case 0x00b3: /* ocbwb @Rn */
1652 /* These instructions are supposed to do nothing in case of
1653 a cache miss. Given that we only partially emulate caches
1654 it is safe to simply ignore them. */
1655 return;
1656 case 0x0083: /* pref @Rn */
1657 return;
1658 case 0x00d3: /* prefi @Rn */
1659 if (ctx->features & SH_FEATURE_SH4A)
1660 return;
1661 else
1662 break;
1663 case 0x00e3: /* icbi @Rn */
1664 if (ctx->features & SH_FEATURE_SH4A)
1665 return;
1666 else
1667 break;
1668 case 0x00ab: /* synco */
1669 if (ctx->features & SH_FEATURE_SH4A)
1670 return;
1671 else
1672 break;
1673 case 0x4024: /* rotcl Rn */
1674 {
1675 TCGv tmp = tcg_temp_new();
1676 tcg_gen_mov_i32(tmp, cpu_sr);
1677 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1678 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1679 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1680 tcg_temp_free(tmp);
1681 }
1682 return;
1683 case 0x4025: /* rotcr Rn */
1684 {
1685 TCGv tmp = tcg_temp_new();
1686 tcg_gen_mov_i32(tmp, cpu_sr);
1687 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1688 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1689 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1690 tcg_temp_free(tmp);
1691 }
1692 return;
1693 case 0x4004: /* rotl Rn */
1694 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1695 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1696 return;
1697 case 0x4005: /* rotr Rn */
1698 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1699 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1700 return;
1701 case 0x4000: /* shll Rn */
1702 case 0x4020: /* shal Rn */
1703 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1704 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1705 return;
1706 case 0x4021: /* shar Rn */
1707 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1708 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1709 return;
1710 case 0x4001: /* shlr Rn */
1711 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1712 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1713 return;
1714 case 0x4008: /* shll2 Rn */
1715 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1716 return;
1717 case 0x4018: /* shll8 Rn */
1718 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1719 return;
1720 case 0x4028: /* shll16 Rn */
1721 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1722 return;
1723 case 0x4009: /* shlr2 Rn */
1724 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1725 return;
1726 case 0x4019: /* shlr8 Rn */
1727 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1728 return;
1729 case 0x4029: /* shlr16 Rn */
1730 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1731 return;
1732 case 0x401b: /* tas.b @Rn */
1733 {
1734 TCGv addr, val;
1735 addr = tcg_temp_local_new();
1736 tcg_gen_mov_i32(addr, REG(B11_8));
1737 val = tcg_temp_local_new();
1738 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1739 gen_cmp_imm(TCG_COND_EQ, val, 0);
1740 tcg_gen_ori_i32(val, val, 0x80);
1741 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1742 tcg_temp_free(val);
1743 tcg_temp_free(addr);
1744 }
1745 return;
1746 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1747 CHECK_FPU_ENABLED
1748 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1749 return;
1750 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1751 CHECK_FPU_ENABLED
1752 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1753 return;
1754 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1755 CHECK_FPU_ENABLED
1756 if (ctx->fpscr & FPSCR_PR) {
1757 TCGv_i64 fp;
1758 if (ctx->opcode & 0x0100)
1759 break; /* illegal instruction */
1760 fp = tcg_temp_new_i64();
1761 gen_helper_float_DT(fp, cpu_fpul);
1762 gen_store_fpr64(fp, DREG(B11_8));
1763 tcg_temp_free_i64(fp);
1764 }
1765 else {
1766 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1767 }
1768 return;
1769 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1770 CHECK_FPU_ENABLED
1771 if (ctx->fpscr & FPSCR_PR) {
1772 TCGv_i64 fp;
1773 if (ctx->opcode & 0x0100)
1774 break; /* illegal instruction */
1775 fp = tcg_temp_new_i64();
1776 gen_load_fpr64(fp, DREG(B11_8));
1777 gen_helper_ftrc_DT(cpu_fpul, fp);
1778 tcg_temp_free_i64(fp);
1779 }
1780 else {
1781 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1782 }
1783 return;
1784 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1785 CHECK_FPU_ENABLED
1786 {
1787 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1788 }
1789 return;
1790 case 0xf05d: /* fabs FRn/DRn */
1791 CHECK_FPU_ENABLED
1792 if (ctx->fpscr & FPSCR_PR) {
1793 if (ctx->opcode & 0x0100)
1794 break; /* illegal instruction */
1795 TCGv_i64 fp = tcg_temp_new_i64();
1796 gen_load_fpr64(fp, DREG(B11_8));
1797 gen_helper_fabs_DT(fp, fp);
1798 gen_store_fpr64(fp, DREG(B11_8));
1799 tcg_temp_free_i64(fp);
1800 } else {
1801 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1802 }
1803 return;
1804 case 0xf06d: /* fsqrt FRn */
1805 CHECK_FPU_ENABLED
1806 if (ctx->fpscr & FPSCR_PR) {
1807 if (ctx->opcode & 0x0100)
1808 break; /* illegal instruction */
1809 TCGv_i64 fp = tcg_temp_new_i64();
1810 gen_load_fpr64(fp, DREG(B11_8));
1811 gen_helper_fsqrt_DT(fp, fp);
1812 gen_store_fpr64(fp, DREG(B11_8));
1813 tcg_temp_free_i64(fp);
1814 } else {
1815 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1816 }
1817 return;
1818 case 0xf07d: /* fsrra FRn */
1819 CHECK_FPU_ENABLED
1820 break;
1821 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1822 CHECK_FPU_ENABLED
1823 if (!(ctx->fpscr & FPSCR_PR)) {
1824 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1825 }
1826 return;
1827 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1828 CHECK_FPU_ENABLED
1829 if (!(ctx->fpscr & FPSCR_PR)) {
1830 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1831 }
1832 return;
1833 case 0xf0ad: /* fcnvsd FPUL,DRn */
1834 CHECK_FPU_ENABLED
1835 {
1836 TCGv_i64 fp = tcg_temp_new_i64();
1837 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1838 gen_store_fpr64(fp, DREG(B11_8));
1839 tcg_temp_free_i64(fp);
1840 }
1841 return;
1842 case 0xf0bd: /* fcnvds DRn,FPUL */
1843 CHECK_FPU_ENABLED
1844 {
1845 TCGv_i64 fp = tcg_temp_new_i64();
1846 gen_load_fpr64(fp, DREG(B11_8));
1847 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1848 tcg_temp_free_i64(fp);
1849 }
1850 return;
1851 case 0xf0ed: /* fipr FVm,FVn */
1852 CHECK_FPU_ENABLED
1853 if ((ctx->fpscr & FPSCR_PR) == 0) {
1854 TCGv m, n;
1855 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1856 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1857 gen_helper_fipr(m, n);
1858 tcg_temp_free(m);
1859 tcg_temp_free(n);
1860 return;
1861 }
1862 break;
1863 case 0xf0fd: /* ftrv XMTRX,FVn */
1864 CHECK_FPU_ENABLED
1865 if ((ctx->opcode & 0x0300) == 0x0100 &&
1866 (ctx->fpscr & FPSCR_PR) == 0) {
1867 TCGv n;
1868 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1869 gen_helper_ftrv(n);
1870 tcg_temp_free(n);
1871 return;
1872 }
1873 break;
1874 }
1875 #if 0
1876 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1877 ctx->opcode, ctx->pc);
1878 fflush(stderr);
1879 #endif
1880 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1881 gen_helper_raise_slot_illegal_instruction();
1882 } else {
1883 gen_helper_raise_illegal_instruction();
1884 }
1885 ctx->bstate = BS_EXCP;
1886 }
1887
1888 static void decode_opc(DisasContext * ctx)
1889 {
1890 uint32_t old_flags = ctx->flags;
1891
1892 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1893 tcg_gen_debug_insn_start(ctx->pc);
1894 }
1895
1896 _decode_opc(ctx);
1897
1898 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1899 if (ctx->flags & DELAY_SLOT_CLEARME) {
1900 gen_store_flags(0);
1901 } else {
1902 /* go out of the delay slot */
1903 uint32_t new_flags = ctx->flags;
1904 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1905 gen_store_flags(new_flags);
1906 }
1907 ctx->flags = 0;
1908 ctx->bstate = BS_BRANCH;
1909 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1910 gen_delayed_conditional_jump(ctx);
1911 } else if (old_flags & DELAY_SLOT) {
1912 gen_jump(ctx);
1913 }
1914
1915 }
1916
1917 /* go into a delay slot */
1918 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1919 gen_store_flags(ctx->flags);
1920 }
1921
1922 static inline void
1923 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1924 int search_pc)
1925 {
1926 DisasContext ctx;
1927 target_ulong pc_start;
1928 static uint16_t *gen_opc_end;
1929 CPUBreakpoint *bp;
1930 int i, ii;
1931 int num_insns;
1932 int max_insns;
1933
1934 pc_start = tb->pc;
1935 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1936 ctx.pc = pc_start;
1937 ctx.flags = (uint32_t)tb->flags;
1938 ctx.bstate = BS_NONE;
1939 ctx.sr = env->sr;
1940 ctx.fpscr = env->fpscr;
1941 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1942 /* We don't know if the delayed pc came from a dynamic or static branch,
1943 so assume it is a dynamic branch. */
1944 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1945 ctx.tb = tb;
1946 ctx.singlestep_enabled = env->singlestep_enabled;
1947 ctx.features = env->features;
1948 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1949
1950 ii = -1;
1951 num_insns = 0;
1952 max_insns = tb->cflags & CF_COUNT_MASK;
1953 if (max_insns == 0)
1954 max_insns = CF_COUNT_MASK;
1955 gen_icount_start();
1956 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1957 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1958 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1959 if (ctx.pc == bp->pc) {
1960 /* We have hit a breakpoint - make sure PC is up-to-date */
1961 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1962 gen_helper_debug();
1963 ctx.bstate = BS_EXCP;
1964 break;
1965 }
1966 }
1967 }
1968 if (search_pc) {
1969 i = gen_opc_ptr - gen_opc_buf;
1970 if (ii < i) {
1971 ii++;
1972 while (ii < i)
1973 gen_opc_instr_start[ii++] = 0;
1974 }
1975 gen_opc_pc[ii] = ctx.pc;
1976 gen_opc_hflags[ii] = ctx.flags;
1977 gen_opc_instr_start[ii] = 1;
1978 gen_opc_icount[ii] = num_insns;
1979 }
1980 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1981 gen_io_start();
1982 #if 0
1983 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1984 fflush(stderr);
1985 #endif
1986 ctx.opcode = lduw_code(ctx.pc);
1987 decode_opc(&ctx);
1988 num_insns++;
1989 ctx.pc += 2;
1990 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1991 break;
1992 if (env->singlestep_enabled)
1993 break;
1994 if (num_insns >= max_insns)
1995 break;
1996 if (singlestep)
1997 break;
1998 }
1999 if (tb->cflags & CF_LAST_IO)
2000 gen_io_end();
2001 if (env->singlestep_enabled) {
2002 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2003 gen_helper_debug();
2004 } else {
2005 switch (ctx.bstate) {
2006 case BS_STOP:
2007 /* gen_op_interrupt_restart(); */
2008 /* fall through */
2009 case BS_NONE:
2010 if (ctx.flags) {
2011 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2012 }
2013 gen_goto_tb(&ctx, 0, ctx.pc);
2014 break;
2015 case BS_EXCP:
2016 /* gen_op_interrupt_restart(); */
2017 tcg_gen_exit_tb(0);
2018 break;
2019 case BS_BRANCH:
2020 default:
2021 break;
2022 }
2023 }
2024
2025 gen_icount_end(tb, num_insns);
2026 *gen_opc_ptr = INDEX_op_end;
2027 if (search_pc) {
2028 i = gen_opc_ptr - gen_opc_buf;
2029 ii++;
2030 while (ii <= i)
2031 gen_opc_instr_start[ii++] = 0;
2032 } else {
2033 tb->size = ctx.pc - pc_start;
2034 tb->icount = num_insns;
2035 }
2036
2037 #ifdef DEBUG_DISAS
2038 #ifdef SH4_DEBUG_DISAS
2039 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2040 #endif
2041 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2042 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2043 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2044 qemu_log("\n");
2045 }
2046 #endif
2047 }
2048
2049 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2050 {
2051 gen_intermediate_code_internal(env, tb, 0);
2052 }
2053
2054 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2055 {
2056 gen_intermediate_code_internal(env, tb, 1);
2057 }
2058
2059 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2060 {
2061 env->pc = gen_opc_pc[pc_pos];
2062 env->flags = gen_opc_hflags[pc_pos];
2063 }