]> git.proxmox.com Git - qemu.git/blob - target-sh4/translate.c
target-sh4: cleanup DisasContext
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
23
24 #include "cpu.h"
25 #include "disas.h"
26 #include "tcg-op.h"
27
28 #include "helper.h"
29 #define GEN_HELPER 1
30 #include "helper.h"
31
32 typedef struct DisasContext {
33 struct TranslationBlock *tb;
34 target_ulong pc;
35 uint16_t opcode;
36 uint32_t flags;
37 int bstate;
38 int memidx;
39 uint32_t delayed_pc;
40 int singlestep_enabled;
41 uint32_t features;
42 int has_movcal;
43 } DisasContext;
44
45 #if defined(CONFIG_USER_ONLY)
46 #define IS_USER(ctx) 1
47 #else
48 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
49 #endif
50
51 enum {
52 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
53 * exception condition
54 */
55 BS_STOP = 1, /* We want to stop translation for any reason */
56 BS_BRANCH = 2, /* We reached a branch condition */
57 BS_EXCP = 3, /* We reached an exception condition */
58 };
59
60 /* global register indexes */
61 static TCGv_ptr cpu_env;
62 static TCGv cpu_gregs[24];
63 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
64 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
65 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
66 static TCGv cpu_fregs[32];
67
68 /* internal register indexes */
69 static TCGv cpu_flags, cpu_delayed_pc;
70
71 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
72
73 #include "gen-icount.h"
74
75 static void sh4_translate_init(void)
76 {
77 int i;
78 static int done_init = 0;
79 static const char * const gregnames[24] = {
80 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
81 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
82 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
83 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
84 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
85 };
86 static const char * const fregnames[32] = {
87 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
88 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
89 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
90 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
91 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
92 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
93 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
94 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
95 };
96
97 if (done_init)
98 return;
99
100 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
101
102 for (i = 0; i < 24; i++)
103 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
104 offsetof(CPUSH4State, gregs[i]),
105 gregnames[i]);
106
107 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
108 offsetof(CPUSH4State, pc), "PC");
109 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, sr), "SR");
111 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, ssr), "SSR");
113 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, spc), "SPC");
115 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, gbr), "GBR");
117 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, vbr), "VBR");
119 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, sgr), "SGR");
121 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, dbr), "DBR");
123 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, mach), "MACH");
125 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, macl), "MACL");
127 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, pr), "PR");
129 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, fpscr), "FPSCR");
131 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, fpul), "FPUL");
133
134 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUSH4State, flags), "_flags_");
136 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, delayed_pc),
138 "_delayed_pc_");
139 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
140 offsetof(CPUSH4State, ldst), "_ldst_");
141
142 for (i = 0; i < 32; i++)
143 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
144 offsetof(CPUSH4State, fregs[i]),
145 fregnames[i]);
146
147 /* register helpers */
148 #define GEN_HELPER 2
149 #include "helper.h"
150
151 done_init = 1;
152 }
153
154 void cpu_dump_state(CPUSH4State * env, FILE * f,
155 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
156 int flags)
157 {
158 int i;
159 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
160 env->pc, env->sr, env->pr, env->fpscr);
161 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
162 env->spc, env->ssr, env->gbr, env->vbr);
163 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
164 env->sgr, env->dbr, env->delayed_pc, env->fpul);
165 for (i = 0; i < 24; i += 4) {
166 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
167 i, env->gregs[i], i + 1, env->gregs[i + 1],
168 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
169 }
170 if (env->flags & DELAY_SLOT) {
171 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
172 env->delayed_pc);
173 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
174 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
175 env->delayed_pc);
176 }
177 }
178
179 typedef struct {
180 const char *name;
181 int id;
182 uint32_t pvr;
183 uint32_t prr;
184 uint32_t cvr;
185 uint32_t features;
186 } sh4_def_t;
187
188 static sh4_def_t sh4_defs[] = {
189 {
190 .name = "SH7750R",
191 .id = SH_CPU_SH7750R,
192 .pvr = 0x00050000,
193 .prr = 0x00000100,
194 .cvr = 0x00110000,
195 .features = SH_FEATURE_BCR3_AND_BCR4,
196 }, {
197 .name = "SH7751R",
198 .id = SH_CPU_SH7751R,
199 .pvr = 0x04050005,
200 .prr = 0x00000113,
201 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
202 .features = SH_FEATURE_BCR3_AND_BCR4,
203 }, {
204 .name = "SH7785",
205 .id = SH_CPU_SH7785,
206 .pvr = 0x10300700,
207 .prr = 0x00000200,
208 .cvr = 0x71440211,
209 .features = SH_FEATURE_SH4A,
210 },
211 };
212
213 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
214 {
215 int i;
216
217 if (strcasecmp(name, "any") == 0)
218 return &sh4_defs[0];
219
220 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
221 if (strcasecmp(name, sh4_defs[i].name) == 0)
222 return &sh4_defs[i];
223
224 return NULL;
225 }
226
227 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
228 {
229 int i;
230
231 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
232 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
233 }
234
235 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
236 {
237 env->pvr = def->pvr;
238 env->prr = def->prr;
239 env->cvr = def->cvr;
240 env->id = def->id;
241 }
242
243 SuperHCPU *cpu_sh4_init(const char *cpu_model)
244 {
245 SuperHCPU *cpu;
246 CPUSH4State *env;
247 const sh4_def_t *def;
248
249 def = cpu_sh4_find_by_name(cpu_model);
250 if (!def)
251 return NULL;
252 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
253 env = &cpu->env;
254 env->features = def->features;
255 sh4_translate_init();
256 env->cpu_model_str = cpu_model;
257 cpu_reset(CPU(cpu));
258 cpu_register(env, def);
259 qemu_init_vcpu(env);
260 return cpu;
261 }
262
263 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
264 {
265 TranslationBlock *tb;
266 tb = ctx->tb;
267
268 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
269 !ctx->singlestep_enabled) {
270 /* Use a direct jump if in same page and singlestep not enabled */
271 tcg_gen_goto_tb(n);
272 tcg_gen_movi_i32(cpu_pc, dest);
273 tcg_gen_exit_tb((tcg_target_long)tb + n);
274 } else {
275 tcg_gen_movi_i32(cpu_pc, dest);
276 if (ctx->singlestep_enabled)
277 gen_helper_debug(cpu_env);
278 tcg_gen_exit_tb(0);
279 }
280 }
281
282 static void gen_jump(DisasContext * ctx)
283 {
284 if (ctx->delayed_pc == (uint32_t) - 1) {
285 /* Target is not statically known, it comes necessarily from a
286 delayed jump as immediate jump are conditinal jumps */
287 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
288 if (ctx->singlestep_enabled)
289 gen_helper_debug(cpu_env);
290 tcg_gen_exit_tb(0);
291 } else {
292 gen_goto_tb(ctx, 0, ctx->delayed_pc);
293 }
294 }
295
296 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
297 {
298 TCGv sr;
299 int label = gen_new_label();
300 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
301 sr = tcg_temp_new();
302 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
303 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
304 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
305 gen_set_label(label);
306 }
307
308 /* Immediate conditional jump (bt or bf) */
309 static void gen_conditional_jump(DisasContext * ctx,
310 target_ulong ift, target_ulong ifnott)
311 {
312 int l1;
313 TCGv sr;
314
315 l1 = gen_new_label();
316 sr = tcg_temp_new();
317 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
318 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
319 gen_goto_tb(ctx, 0, ifnott);
320 gen_set_label(l1);
321 gen_goto_tb(ctx, 1, ift);
322 }
323
324 /* Delayed conditional jump (bt or bf) */
325 static void gen_delayed_conditional_jump(DisasContext * ctx)
326 {
327 int l1;
328 TCGv ds;
329
330 l1 = gen_new_label();
331 ds = tcg_temp_new();
332 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
333 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
334 gen_goto_tb(ctx, 1, ctx->pc + 2);
335 gen_set_label(l1);
336 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
337 gen_jump(ctx);
338 }
339
340 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
341 {
342 TCGv t;
343
344 t = tcg_temp_new();
345 tcg_gen_setcond_i32(cond, t, t1, t0);
346 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
347 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
348
349 tcg_temp_free(t);
350 }
351
352 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
353 {
354 TCGv t;
355
356 t = tcg_temp_new();
357 tcg_gen_setcondi_i32(cond, t, t0, imm);
358 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
359 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
360
361 tcg_temp_free(t);
362 }
363
364 static inline void gen_store_flags(uint32_t flags)
365 {
366 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
367 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
368 }
369
370 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
371 {
372 TCGv tmp = tcg_temp_new();
373
374 p0 &= 0x1f;
375 p1 &= 0x1f;
376
377 tcg_gen_andi_i32(tmp, t1, (1 << p1));
378 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
379 if (p0 < p1)
380 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
381 else if (p0 > p1)
382 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
383 tcg_gen_or_i32(t0, t0, tmp);
384
385 tcg_temp_free(tmp);
386 }
387
388 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
389 {
390 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
391 }
392
393 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
394 {
395 TCGv_i32 tmp = tcg_temp_new_i32();
396 tcg_gen_trunc_i64_i32(tmp, t);
397 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
398 tcg_gen_shri_i64(t, t, 32);
399 tcg_gen_trunc_i64_i32(tmp, t);
400 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
401 tcg_temp_free_i32(tmp);
402 }
403
404 #define B3_0 (ctx->opcode & 0xf)
405 #define B6_4 ((ctx->opcode >> 4) & 0x7)
406 #define B7_4 ((ctx->opcode >> 4) & 0xf)
407 #define B7_0 (ctx->opcode & 0xff)
408 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
409 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
410 (ctx->opcode & 0xfff))
411 #define B11_8 ((ctx->opcode >> 8) & 0xf)
412 #define B15_12 ((ctx->opcode >> 12) & 0xf)
413
414 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
415 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
416
417 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
418 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
419
420 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
421 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
422 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
423 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
424
425 #define CHECK_NOT_DELAY_SLOT \
426 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
427 { \
428 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
429 gen_helper_raise_slot_illegal_instruction(cpu_env); \
430 ctx->bstate = BS_BRANCH; \
431 return; \
432 }
433
434 #define CHECK_PRIVILEGED \
435 if (IS_USER(ctx)) { \
436 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
437 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
438 gen_helper_raise_slot_illegal_instruction(cpu_env); \
439 } else { \
440 gen_helper_raise_illegal_instruction(cpu_env); \
441 } \
442 ctx->bstate = BS_BRANCH; \
443 return; \
444 }
445
446 #define CHECK_FPU_ENABLED \
447 if (ctx->flags & SR_FD) { \
448 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
449 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
450 gen_helper_raise_slot_fpu_disable(cpu_env); \
451 } else { \
452 gen_helper_raise_fpu_disable(cpu_env); \
453 } \
454 ctx->bstate = BS_BRANCH; \
455 return; \
456 }
457
458 static void _decode_opc(DisasContext * ctx)
459 {
460 /* This code tries to make movcal emulation sufficiently
461 accurate for Linux purposes. This instruction writes
462 memory, and prior to that, always allocates a cache line.
463 It is used in two contexts:
464 - in memcpy, where data is copied in blocks, the first write
465 of to a block uses movca.l for performance.
466 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
467 to flush the cache. Here, the data written by movcal.l is never
468 written to memory, and the data written is just bogus.
469
470 To simulate this, we simulate movcal.l, we store the value to memory,
471 but we also remember the previous content. If we see ocbi, we check
472 if movcal.l for that address was done previously. If so, the write should
473 not have hit the memory, so we restore the previous content.
474 When we see an instruction that is neither movca.l
475 nor ocbi, the previous content is discarded.
476
477 To optimize, we only try to flush stores when we're at the start of
478 TB, or if we already saw movca.l in this TB and did not flush stores
479 yet. */
480 if (ctx->has_movcal)
481 {
482 int opcode = ctx->opcode & 0xf0ff;
483 if (opcode != 0x0093 /* ocbi */
484 && opcode != 0x00c3 /* movca.l */)
485 {
486 gen_helper_discard_movcal_backup(cpu_env);
487 ctx->has_movcal = 0;
488 }
489 }
490
491 #if 0
492 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
493 #endif
494
495 switch (ctx->opcode) {
496 case 0x0019: /* div0u */
497 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
498 return;
499 case 0x000b: /* rts */
500 CHECK_NOT_DELAY_SLOT
501 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
502 ctx->flags |= DELAY_SLOT;
503 ctx->delayed_pc = (uint32_t) - 1;
504 return;
505 case 0x0028: /* clrmac */
506 tcg_gen_movi_i32(cpu_mach, 0);
507 tcg_gen_movi_i32(cpu_macl, 0);
508 return;
509 case 0x0048: /* clrs */
510 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
511 return;
512 case 0x0008: /* clrt */
513 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
514 return;
515 case 0x0038: /* ldtlb */
516 CHECK_PRIVILEGED
517 gen_helper_ldtlb(cpu_env);
518 return;
519 case 0x002b: /* rte */
520 CHECK_PRIVILEGED
521 CHECK_NOT_DELAY_SLOT
522 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
523 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
524 ctx->flags |= DELAY_SLOT;
525 ctx->delayed_pc = (uint32_t) - 1;
526 return;
527 case 0x0058: /* sets */
528 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
529 return;
530 case 0x0018: /* sett */
531 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
532 return;
533 case 0xfbfd: /* frchg */
534 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
535 ctx->bstate = BS_STOP;
536 return;
537 case 0xf3fd: /* fschg */
538 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
539 ctx->bstate = BS_STOP;
540 return;
541 case 0x0009: /* nop */
542 return;
543 case 0x001b: /* sleep */
544 CHECK_PRIVILEGED
545 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
546 gen_helper_sleep(cpu_env);
547 return;
548 }
549
550 switch (ctx->opcode & 0xf000) {
551 case 0x1000: /* mov.l Rm,@(disp,Rn) */
552 {
553 TCGv addr = tcg_temp_new();
554 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
555 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
556 tcg_temp_free(addr);
557 }
558 return;
559 case 0x5000: /* mov.l @(disp,Rm),Rn */
560 {
561 TCGv addr = tcg_temp_new();
562 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
563 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
564 tcg_temp_free(addr);
565 }
566 return;
567 case 0xe000: /* mov #imm,Rn */
568 tcg_gen_movi_i32(REG(B11_8), B7_0s);
569 return;
570 case 0x9000: /* mov.w @(disp,PC),Rn */
571 {
572 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
573 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
574 tcg_temp_free(addr);
575 }
576 return;
577 case 0xd000: /* mov.l @(disp,PC),Rn */
578 {
579 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
580 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
581 tcg_temp_free(addr);
582 }
583 return;
584 case 0x7000: /* add #imm,Rn */
585 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
586 return;
587 case 0xa000: /* bra disp */
588 CHECK_NOT_DELAY_SLOT
589 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
590 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
591 ctx->flags |= DELAY_SLOT;
592 return;
593 case 0xb000: /* bsr disp */
594 CHECK_NOT_DELAY_SLOT
595 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
596 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
597 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
598 ctx->flags |= DELAY_SLOT;
599 return;
600 }
601
602 switch (ctx->opcode & 0xf00f) {
603 case 0x6003: /* mov Rm,Rn */
604 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
605 return;
606 case 0x2000: /* mov.b Rm,@Rn */
607 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
608 return;
609 case 0x2001: /* mov.w Rm,@Rn */
610 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
611 return;
612 case 0x2002: /* mov.l Rm,@Rn */
613 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
614 return;
615 case 0x6000: /* mov.b @Rm,Rn */
616 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
617 return;
618 case 0x6001: /* mov.w @Rm,Rn */
619 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
620 return;
621 case 0x6002: /* mov.l @Rm,Rn */
622 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
623 return;
624 case 0x2004: /* mov.b Rm,@-Rn */
625 {
626 TCGv addr = tcg_temp_new();
627 tcg_gen_subi_i32(addr, REG(B11_8), 1);
628 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
629 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
630 tcg_temp_free(addr);
631 }
632 return;
633 case 0x2005: /* mov.w Rm,@-Rn */
634 {
635 TCGv addr = tcg_temp_new();
636 tcg_gen_subi_i32(addr, REG(B11_8), 2);
637 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
638 tcg_gen_mov_i32(REG(B11_8), addr);
639 tcg_temp_free(addr);
640 }
641 return;
642 case 0x2006: /* mov.l Rm,@-Rn */
643 {
644 TCGv addr = tcg_temp_new();
645 tcg_gen_subi_i32(addr, REG(B11_8), 4);
646 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
647 tcg_gen_mov_i32(REG(B11_8), addr);
648 }
649 return;
650 case 0x6004: /* mov.b @Rm+,Rn */
651 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
652 if ( B11_8 != B7_4 )
653 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
654 return;
655 case 0x6005: /* mov.w @Rm+,Rn */
656 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
657 if ( B11_8 != B7_4 )
658 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
659 return;
660 case 0x6006: /* mov.l @Rm+,Rn */
661 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
662 if ( B11_8 != B7_4 )
663 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
664 return;
665 case 0x0004: /* mov.b Rm,@(R0,Rn) */
666 {
667 TCGv addr = tcg_temp_new();
668 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
669 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
670 tcg_temp_free(addr);
671 }
672 return;
673 case 0x0005: /* mov.w Rm,@(R0,Rn) */
674 {
675 TCGv addr = tcg_temp_new();
676 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
677 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
678 tcg_temp_free(addr);
679 }
680 return;
681 case 0x0006: /* mov.l Rm,@(R0,Rn) */
682 {
683 TCGv addr = tcg_temp_new();
684 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
685 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
686 tcg_temp_free(addr);
687 }
688 return;
689 case 0x000c: /* mov.b @(R0,Rm),Rn */
690 {
691 TCGv addr = tcg_temp_new();
692 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
693 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
694 tcg_temp_free(addr);
695 }
696 return;
697 case 0x000d: /* mov.w @(R0,Rm),Rn */
698 {
699 TCGv addr = tcg_temp_new();
700 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
701 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
702 tcg_temp_free(addr);
703 }
704 return;
705 case 0x000e: /* mov.l @(R0,Rm),Rn */
706 {
707 TCGv addr = tcg_temp_new();
708 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
709 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
710 tcg_temp_free(addr);
711 }
712 return;
713 case 0x6008: /* swap.b Rm,Rn */
714 {
715 TCGv high, low;
716 high = tcg_temp_new();
717 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
718 low = tcg_temp_new();
719 tcg_gen_ext16u_i32(low, REG(B7_4));
720 tcg_gen_bswap16_i32(low, low);
721 tcg_gen_or_i32(REG(B11_8), high, low);
722 tcg_temp_free(low);
723 tcg_temp_free(high);
724 }
725 return;
726 case 0x6009: /* swap.w Rm,Rn */
727 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
728 return;
729 case 0x200d: /* xtrct Rm,Rn */
730 {
731 TCGv high, low;
732 high = tcg_temp_new();
733 tcg_gen_shli_i32(high, REG(B7_4), 16);
734 low = tcg_temp_new();
735 tcg_gen_shri_i32(low, REG(B11_8), 16);
736 tcg_gen_or_i32(REG(B11_8), high, low);
737 tcg_temp_free(low);
738 tcg_temp_free(high);
739 }
740 return;
741 case 0x300c: /* add Rm,Rn */
742 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
743 return;
744 case 0x300e: /* addc Rm,Rn */
745 {
746 TCGv t0, t1, t2;
747 t0 = tcg_temp_new();
748 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
749 t1 = tcg_temp_new();
750 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
751 tcg_gen_add_i32(t0, t0, t1);
752 t2 = tcg_temp_new();
753 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
754 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
755 tcg_gen_or_i32(t1, t1, t2);
756 tcg_temp_free(t2);
757 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
758 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
759 tcg_temp_free(t1);
760 tcg_gen_mov_i32(REG(B11_8), t0);
761 tcg_temp_free(t0);
762 }
763 return;
764 case 0x300f: /* addv Rm,Rn */
765 {
766 TCGv t0, t1, t2;
767 t0 = tcg_temp_new();
768 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
769 t1 = tcg_temp_new();
770 tcg_gen_xor_i32(t1, t0, REG(B11_8));
771 t2 = tcg_temp_new();
772 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
773 tcg_gen_andc_i32(t1, t1, t2);
774 tcg_temp_free(t2);
775 tcg_gen_shri_i32(t1, t1, 31);
776 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
777 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
778 tcg_temp_free(t1);
779 tcg_gen_mov_i32(REG(B7_4), t0);
780 tcg_temp_free(t0);
781 }
782 return;
783 case 0x2009: /* and Rm,Rn */
784 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
785 return;
786 case 0x3000: /* cmp/eq Rm,Rn */
787 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
788 return;
789 case 0x3003: /* cmp/ge Rm,Rn */
790 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
791 return;
792 case 0x3007: /* cmp/gt Rm,Rn */
793 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
794 return;
795 case 0x3006: /* cmp/hi Rm,Rn */
796 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
797 return;
798 case 0x3002: /* cmp/hs Rm,Rn */
799 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
800 return;
801 case 0x200c: /* cmp/str Rm,Rn */
802 {
803 TCGv cmp1 = tcg_temp_new();
804 TCGv cmp2 = tcg_temp_new();
805 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
806 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
807 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
808 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
809 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
810 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
811 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
812 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
813 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
814 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
815 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
816 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
817 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
818 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
819 tcg_temp_free(cmp2);
820 tcg_temp_free(cmp1);
821 }
822 return;
823 case 0x2007: /* div0s Rm,Rn */
824 {
825 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
826 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
827 TCGv val = tcg_temp_new();
828 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
829 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
830 tcg_temp_free(val);
831 }
832 return;
833 case 0x3004: /* div1 Rm,Rn */
834 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
835 return;
836 case 0x300d: /* dmuls.l Rm,Rn */
837 {
838 TCGv_i64 tmp1 = tcg_temp_new_i64();
839 TCGv_i64 tmp2 = tcg_temp_new_i64();
840
841 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
842 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
843 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
844 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
845 tcg_gen_shri_i64(tmp1, tmp1, 32);
846 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
847
848 tcg_temp_free_i64(tmp2);
849 tcg_temp_free_i64(tmp1);
850 }
851 return;
852 case 0x3005: /* dmulu.l Rm,Rn */
853 {
854 TCGv_i64 tmp1 = tcg_temp_new_i64();
855 TCGv_i64 tmp2 = tcg_temp_new_i64();
856
857 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
858 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
859 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
860 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
861 tcg_gen_shri_i64(tmp1, tmp1, 32);
862 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
863
864 tcg_temp_free_i64(tmp2);
865 tcg_temp_free_i64(tmp1);
866 }
867 return;
868 case 0x600e: /* exts.b Rm,Rn */
869 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
870 return;
871 case 0x600f: /* exts.w Rm,Rn */
872 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
873 return;
874 case 0x600c: /* extu.b Rm,Rn */
875 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
876 return;
877 case 0x600d: /* extu.w Rm,Rn */
878 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
879 return;
880 case 0x000f: /* mac.l @Rm+,@Rn+ */
881 {
882 TCGv arg0, arg1;
883 arg0 = tcg_temp_new();
884 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
885 arg1 = tcg_temp_new();
886 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
887 gen_helper_macl(cpu_env, arg0, arg1);
888 tcg_temp_free(arg1);
889 tcg_temp_free(arg0);
890 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
891 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
892 }
893 return;
894 case 0x400f: /* mac.w @Rm+,@Rn+ */
895 {
896 TCGv arg0, arg1;
897 arg0 = tcg_temp_new();
898 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
899 arg1 = tcg_temp_new();
900 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
901 gen_helper_macw(cpu_env, arg0, arg1);
902 tcg_temp_free(arg1);
903 tcg_temp_free(arg0);
904 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
905 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
906 }
907 return;
908 case 0x0007: /* mul.l Rm,Rn */
909 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
910 return;
911 case 0x200f: /* muls.w Rm,Rn */
912 {
913 TCGv arg0, arg1;
914 arg0 = tcg_temp_new();
915 tcg_gen_ext16s_i32(arg0, REG(B7_4));
916 arg1 = tcg_temp_new();
917 tcg_gen_ext16s_i32(arg1, REG(B11_8));
918 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
919 tcg_temp_free(arg1);
920 tcg_temp_free(arg0);
921 }
922 return;
923 case 0x200e: /* mulu.w Rm,Rn */
924 {
925 TCGv arg0, arg1;
926 arg0 = tcg_temp_new();
927 tcg_gen_ext16u_i32(arg0, REG(B7_4));
928 arg1 = tcg_temp_new();
929 tcg_gen_ext16u_i32(arg1, REG(B11_8));
930 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
931 tcg_temp_free(arg1);
932 tcg_temp_free(arg0);
933 }
934 return;
935 case 0x600b: /* neg Rm,Rn */
936 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
937 return;
938 case 0x600a: /* negc Rm,Rn */
939 {
940 TCGv t0, t1;
941 t0 = tcg_temp_new();
942 tcg_gen_neg_i32(t0, REG(B7_4));
943 t1 = tcg_temp_new();
944 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
945 tcg_gen_sub_i32(REG(B11_8), t0, t1);
946 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
947 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
948 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
949 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
950 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
951 tcg_temp_free(t0);
952 tcg_temp_free(t1);
953 }
954 return;
955 case 0x6007: /* not Rm,Rn */
956 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
957 return;
958 case 0x200b: /* or Rm,Rn */
959 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
960 return;
961 case 0x400c: /* shad Rm,Rn */
962 {
963 int label1 = gen_new_label();
964 int label2 = gen_new_label();
965 int label3 = gen_new_label();
966 int label4 = gen_new_label();
967 TCGv shift;
968 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
969 /* Rm positive, shift to the left */
970 shift = tcg_temp_new();
971 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
972 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
973 tcg_temp_free(shift);
974 tcg_gen_br(label4);
975 /* Rm negative, shift to the right */
976 gen_set_label(label1);
977 shift = tcg_temp_new();
978 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
979 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
980 tcg_gen_not_i32(shift, REG(B7_4));
981 tcg_gen_andi_i32(shift, shift, 0x1f);
982 tcg_gen_addi_i32(shift, shift, 1);
983 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
984 tcg_temp_free(shift);
985 tcg_gen_br(label4);
986 /* Rm = -32 */
987 gen_set_label(label2);
988 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
989 tcg_gen_movi_i32(REG(B11_8), 0);
990 tcg_gen_br(label4);
991 gen_set_label(label3);
992 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
993 gen_set_label(label4);
994 }
995 return;
996 case 0x400d: /* shld Rm,Rn */
997 {
998 int label1 = gen_new_label();
999 int label2 = gen_new_label();
1000 int label3 = gen_new_label();
1001 TCGv shift;
1002 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
1003 /* Rm positive, shift to the left */
1004 shift = tcg_temp_new();
1005 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1006 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1007 tcg_temp_free(shift);
1008 tcg_gen_br(label3);
1009 /* Rm negative, shift to the right */
1010 gen_set_label(label1);
1011 shift = tcg_temp_new();
1012 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1013 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1014 tcg_gen_not_i32(shift, REG(B7_4));
1015 tcg_gen_andi_i32(shift, shift, 0x1f);
1016 tcg_gen_addi_i32(shift, shift, 1);
1017 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1018 tcg_temp_free(shift);
1019 tcg_gen_br(label3);
1020 /* Rm = -32 */
1021 gen_set_label(label2);
1022 tcg_gen_movi_i32(REG(B11_8), 0);
1023 gen_set_label(label3);
1024 }
1025 return;
1026 case 0x3008: /* sub Rm,Rn */
1027 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1028 return;
1029 case 0x300a: /* subc Rm,Rn */
1030 {
1031 TCGv t0, t1, t2;
1032 t0 = tcg_temp_new();
1033 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
1034 t1 = tcg_temp_new();
1035 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
1036 tcg_gen_sub_i32(t0, t1, t0);
1037 t2 = tcg_temp_new();
1038 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
1039 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
1040 tcg_gen_or_i32(t1, t1, t2);
1041 tcg_temp_free(t2);
1042 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1043 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
1044 tcg_temp_free(t1);
1045 tcg_gen_mov_i32(REG(B11_8), t0);
1046 tcg_temp_free(t0);
1047 }
1048 return;
1049 case 0x300b: /* subv Rm,Rn */
1050 {
1051 TCGv t0, t1, t2;
1052 t0 = tcg_temp_new();
1053 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
1054 t1 = tcg_temp_new();
1055 tcg_gen_xor_i32(t1, t0, REG(B7_4));
1056 t2 = tcg_temp_new();
1057 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
1058 tcg_gen_and_i32(t1, t1, t2);
1059 tcg_temp_free(t2);
1060 tcg_gen_shri_i32(t1, t1, 31);
1061 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1062 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
1063 tcg_temp_free(t1);
1064 tcg_gen_mov_i32(REG(B11_8), t0);
1065 tcg_temp_free(t0);
1066 }
1067 return;
1068 case 0x2008: /* tst Rm,Rn */
1069 {
1070 TCGv val = tcg_temp_new();
1071 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1072 gen_cmp_imm(TCG_COND_EQ, val, 0);
1073 tcg_temp_free(val);
1074 }
1075 return;
1076 case 0x200a: /* xor Rm,Rn */
1077 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1078 return;
1079 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1080 CHECK_FPU_ENABLED
1081 if (ctx->flags & FPSCR_SZ) {
1082 TCGv_i64 fp = tcg_temp_new_i64();
1083 gen_load_fpr64(fp, XREG(B7_4));
1084 gen_store_fpr64(fp, XREG(B11_8));
1085 tcg_temp_free_i64(fp);
1086 } else {
1087 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1088 }
1089 return;
1090 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1091 CHECK_FPU_ENABLED
1092 if (ctx->flags & FPSCR_SZ) {
1093 TCGv addr_hi = tcg_temp_new();
1094 int fr = XREG(B7_4);
1095 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1096 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1097 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1098 tcg_temp_free(addr_hi);
1099 } else {
1100 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1101 }
1102 return;
1103 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1104 CHECK_FPU_ENABLED
1105 if (ctx->flags & FPSCR_SZ) {
1106 TCGv addr_hi = tcg_temp_new();
1107 int fr = XREG(B11_8);
1108 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1109 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1110 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1111 tcg_temp_free(addr_hi);
1112 } else {
1113 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1114 }
1115 return;
1116 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1117 CHECK_FPU_ENABLED
1118 if (ctx->flags & FPSCR_SZ) {
1119 TCGv addr_hi = tcg_temp_new();
1120 int fr = XREG(B11_8);
1121 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1122 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1123 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1124 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1125 tcg_temp_free(addr_hi);
1126 } else {
1127 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1128 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1129 }
1130 return;
1131 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1132 CHECK_FPU_ENABLED
1133 if (ctx->flags & FPSCR_SZ) {
1134 TCGv addr = tcg_temp_new_i32();
1135 int fr = XREG(B7_4);
1136 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1137 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1138 tcg_gen_subi_i32(addr, addr, 4);
1139 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1140 tcg_gen_mov_i32(REG(B11_8), addr);
1141 tcg_temp_free(addr);
1142 } else {
1143 TCGv addr;
1144 addr = tcg_temp_new_i32();
1145 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1146 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1147 tcg_gen_mov_i32(REG(B11_8), addr);
1148 tcg_temp_free(addr);
1149 }
1150 return;
1151 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1152 CHECK_FPU_ENABLED
1153 {
1154 TCGv addr = tcg_temp_new_i32();
1155 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1156 if (ctx->flags & FPSCR_SZ) {
1157 int fr = XREG(B11_8);
1158 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1159 tcg_gen_addi_i32(addr, addr, 4);
1160 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1161 } else {
1162 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1163 }
1164 tcg_temp_free(addr);
1165 }
1166 return;
1167 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1168 CHECK_FPU_ENABLED
1169 {
1170 TCGv addr = tcg_temp_new();
1171 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1172 if (ctx->flags & FPSCR_SZ) {
1173 int fr = XREG(B7_4);
1174 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1175 tcg_gen_addi_i32(addr, addr, 4);
1176 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1177 } else {
1178 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1179 }
1180 tcg_temp_free(addr);
1181 }
1182 return;
1183 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1184 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1185 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1186 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1187 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1188 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1189 {
1190 CHECK_FPU_ENABLED
1191 if (ctx->flags & FPSCR_PR) {
1192 TCGv_i64 fp0, fp1;
1193
1194 if (ctx->opcode & 0x0110)
1195 break; /* illegal instruction */
1196 fp0 = tcg_temp_new_i64();
1197 fp1 = tcg_temp_new_i64();
1198 gen_load_fpr64(fp0, DREG(B11_8));
1199 gen_load_fpr64(fp1, DREG(B7_4));
1200 switch (ctx->opcode & 0xf00f) {
1201 case 0xf000: /* fadd Rm,Rn */
1202 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1203 break;
1204 case 0xf001: /* fsub Rm,Rn */
1205 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1206 break;
1207 case 0xf002: /* fmul Rm,Rn */
1208 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1209 break;
1210 case 0xf003: /* fdiv Rm,Rn */
1211 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1212 break;
1213 case 0xf004: /* fcmp/eq Rm,Rn */
1214 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1215 return;
1216 case 0xf005: /* fcmp/gt Rm,Rn */
1217 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1218 return;
1219 }
1220 gen_store_fpr64(fp0, DREG(B11_8));
1221 tcg_temp_free_i64(fp0);
1222 tcg_temp_free_i64(fp1);
1223 } else {
1224 switch (ctx->opcode & 0xf00f) {
1225 case 0xf000: /* fadd Rm,Rn */
1226 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1227 cpu_fregs[FREG(B11_8)],
1228 cpu_fregs[FREG(B7_4)]);
1229 break;
1230 case 0xf001: /* fsub Rm,Rn */
1231 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1232 cpu_fregs[FREG(B11_8)],
1233 cpu_fregs[FREG(B7_4)]);
1234 break;
1235 case 0xf002: /* fmul Rm,Rn */
1236 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1237 cpu_fregs[FREG(B11_8)],
1238 cpu_fregs[FREG(B7_4)]);
1239 break;
1240 case 0xf003: /* fdiv Rm,Rn */
1241 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1242 cpu_fregs[FREG(B11_8)],
1243 cpu_fregs[FREG(B7_4)]);
1244 break;
1245 case 0xf004: /* fcmp/eq Rm,Rn */
1246 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1247 cpu_fregs[FREG(B7_4)]);
1248 return;
1249 case 0xf005: /* fcmp/gt Rm,Rn */
1250 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1251 cpu_fregs[FREG(B7_4)]);
1252 return;
1253 }
1254 }
1255 }
1256 return;
1257 case 0xf00e: /* fmac FR0,RM,Rn */
1258 {
1259 CHECK_FPU_ENABLED
1260 if (ctx->flags & FPSCR_PR) {
1261 break; /* illegal instruction */
1262 } else {
1263 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1264 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1265 cpu_fregs[FREG(B11_8)]);
1266 return;
1267 }
1268 }
1269 }
1270
1271 switch (ctx->opcode & 0xff00) {
1272 case 0xc900: /* and #imm,R0 */
1273 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1274 return;
1275 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1276 {
1277 TCGv addr, val;
1278 addr = tcg_temp_new();
1279 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1280 val = tcg_temp_new();
1281 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1282 tcg_gen_andi_i32(val, val, B7_0);
1283 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1284 tcg_temp_free(val);
1285 tcg_temp_free(addr);
1286 }
1287 return;
1288 case 0x8b00: /* bf label */
1289 CHECK_NOT_DELAY_SLOT
1290 gen_conditional_jump(ctx, ctx->pc + 2,
1291 ctx->pc + 4 + B7_0s * 2);
1292 ctx->bstate = BS_BRANCH;
1293 return;
1294 case 0x8f00: /* bf/s label */
1295 CHECK_NOT_DELAY_SLOT
1296 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1297 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1298 return;
1299 case 0x8900: /* bt label */
1300 CHECK_NOT_DELAY_SLOT
1301 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1302 ctx->pc + 2);
1303 ctx->bstate = BS_BRANCH;
1304 return;
1305 case 0x8d00: /* bt/s label */
1306 CHECK_NOT_DELAY_SLOT
1307 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1308 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1309 return;
1310 case 0x8800: /* cmp/eq #imm,R0 */
1311 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1312 return;
1313 case 0xc400: /* mov.b @(disp,GBR),R0 */
1314 {
1315 TCGv addr = tcg_temp_new();
1316 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1317 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1318 tcg_temp_free(addr);
1319 }
1320 return;
1321 case 0xc500: /* mov.w @(disp,GBR),R0 */
1322 {
1323 TCGv addr = tcg_temp_new();
1324 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1325 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1326 tcg_temp_free(addr);
1327 }
1328 return;
1329 case 0xc600: /* mov.l @(disp,GBR),R0 */
1330 {
1331 TCGv addr = tcg_temp_new();
1332 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1333 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1334 tcg_temp_free(addr);
1335 }
1336 return;
1337 case 0xc000: /* mov.b R0,@(disp,GBR) */
1338 {
1339 TCGv addr = tcg_temp_new();
1340 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1341 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1342 tcg_temp_free(addr);
1343 }
1344 return;
1345 case 0xc100: /* mov.w R0,@(disp,GBR) */
1346 {
1347 TCGv addr = tcg_temp_new();
1348 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1349 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1350 tcg_temp_free(addr);
1351 }
1352 return;
1353 case 0xc200: /* mov.l R0,@(disp,GBR) */
1354 {
1355 TCGv addr = tcg_temp_new();
1356 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1357 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1358 tcg_temp_free(addr);
1359 }
1360 return;
1361 case 0x8000: /* mov.b R0,@(disp,Rn) */
1362 {
1363 TCGv addr = tcg_temp_new();
1364 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1365 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1366 tcg_temp_free(addr);
1367 }
1368 return;
1369 case 0x8100: /* mov.w R0,@(disp,Rn) */
1370 {
1371 TCGv addr = tcg_temp_new();
1372 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1373 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1374 tcg_temp_free(addr);
1375 }
1376 return;
1377 case 0x8400: /* mov.b @(disp,Rn),R0 */
1378 {
1379 TCGv addr = tcg_temp_new();
1380 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1381 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1382 tcg_temp_free(addr);
1383 }
1384 return;
1385 case 0x8500: /* mov.w @(disp,Rn),R0 */
1386 {
1387 TCGv addr = tcg_temp_new();
1388 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1389 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1390 tcg_temp_free(addr);
1391 }
1392 return;
1393 case 0xc700: /* mova @(disp,PC),R0 */
1394 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1395 return;
1396 case 0xcb00: /* or #imm,R0 */
1397 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1398 return;
1399 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1400 {
1401 TCGv addr, val;
1402 addr = tcg_temp_new();
1403 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1404 val = tcg_temp_new();
1405 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1406 tcg_gen_ori_i32(val, val, B7_0);
1407 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1408 tcg_temp_free(val);
1409 tcg_temp_free(addr);
1410 }
1411 return;
1412 case 0xc300: /* trapa #imm */
1413 {
1414 TCGv imm;
1415 CHECK_NOT_DELAY_SLOT
1416 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1417 imm = tcg_const_i32(B7_0);
1418 gen_helper_trapa(cpu_env, imm);
1419 tcg_temp_free(imm);
1420 ctx->bstate = BS_BRANCH;
1421 }
1422 return;
1423 case 0xc800: /* tst #imm,R0 */
1424 {
1425 TCGv val = tcg_temp_new();
1426 tcg_gen_andi_i32(val, REG(0), B7_0);
1427 gen_cmp_imm(TCG_COND_EQ, val, 0);
1428 tcg_temp_free(val);
1429 }
1430 return;
1431 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1432 {
1433 TCGv val = tcg_temp_new();
1434 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1435 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1436 tcg_gen_andi_i32(val, val, B7_0);
1437 gen_cmp_imm(TCG_COND_EQ, val, 0);
1438 tcg_temp_free(val);
1439 }
1440 return;
1441 case 0xca00: /* xor #imm,R0 */
1442 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1443 return;
1444 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1445 {
1446 TCGv addr, val;
1447 addr = tcg_temp_new();
1448 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1449 val = tcg_temp_new();
1450 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1451 tcg_gen_xori_i32(val, val, B7_0);
1452 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1453 tcg_temp_free(val);
1454 tcg_temp_free(addr);
1455 }
1456 return;
1457 }
1458
1459 switch (ctx->opcode & 0xf08f) {
1460 case 0x408e: /* ldc Rm,Rn_BANK */
1461 CHECK_PRIVILEGED
1462 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1463 return;
1464 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1465 CHECK_PRIVILEGED
1466 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1467 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1468 return;
1469 case 0x0082: /* stc Rm_BANK,Rn */
1470 CHECK_PRIVILEGED
1471 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1472 return;
1473 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1474 CHECK_PRIVILEGED
1475 {
1476 TCGv addr = tcg_temp_new();
1477 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1478 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1479 tcg_gen_mov_i32(REG(B11_8), addr);
1480 tcg_temp_free(addr);
1481 }
1482 return;
1483 }
1484
1485 switch (ctx->opcode & 0xf0ff) {
1486 case 0x0023: /* braf Rn */
1487 CHECK_NOT_DELAY_SLOT
1488 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1489 ctx->flags |= DELAY_SLOT;
1490 ctx->delayed_pc = (uint32_t) - 1;
1491 return;
1492 case 0x0003: /* bsrf Rn */
1493 CHECK_NOT_DELAY_SLOT
1494 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1495 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1496 ctx->flags |= DELAY_SLOT;
1497 ctx->delayed_pc = (uint32_t) - 1;
1498 return;
1499 case 0x4015: /* cmp/pl Rn */
1500 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1501 return;
1502 case 0x4011: /* cmp/pz Rn */
1503 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1504 return;
1505 case 0x4010: /* dt Rn */
1506 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1507 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1508 return;
1509 case 0x402b: /* jmp @Rn */
1510 CHECK_NOT_DELAY_SLOT
1511 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1512 ctx->flags |= DELAY_SLOT;
1513 ctx->delayed_pc = (uint32_t) - 1;
1514 return;
1515 case 0x400b: /* jsr @Rn */
1516 CHECK_NOT_DELAY_SLOT
1517 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1518 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1519 ctx->flags |= DELAY_SLOT;
1520 ctx->delayed_pc = (uint32_t) - 1;
1521 return;
1522 case 0x400e: /* ldc Rm,SR */
1523 CHECK_PRIVILEGED
1524 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1525 ctx->bstate = BS_STOP;
1526 return;
1527 case 0x4007: /* ldc.l @Rm+,SR */
1528 CHECK_PRIVILEGED
1529 {
1530 TCGv val = tcg_temp_new();
1531 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1532 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1533 tcg_temp_free(val);
1534 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1535 ctx->bstate = BS_STOP;
1536 }
1537 return;
1538 case 0x0002: /* stc SR,Rn */
1539 CHECK_PRIVILEGED
1540 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1541 return;
1542 case 0x4003: /* stc SR,@-Rn */
1543 CHECK_PRIVILEGED
1544 {
1545 TCGv addr = tcg_temp_new();
1546 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1547 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1548 tcg_gen_mov_i32(REG(B11_8), addr);
1549 tcg_temp_free(addr);
1550 }
1551 return;
1552 #define LD(reg,ldnum,ldpnum,prechk) \
1553 case ldnum: \
1554 prechk \
1555 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1556 return; \
1557 case ldpnum: \
1558 prechk \
1559 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1560 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1561 return;
1562 #define ST(reg,stnum,stpnum,prechk) \
1563 case stnum: \
1564 prechk \
1565 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1566 return; \
1567 case stpnum: \
1568 prechk \
1569 { \
1570 TCGv addr = tcg_temp_new(); \
1571 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1572 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1573 tcg_gen_mov_i32(REG(B11_8), addr); \
1574 tcg_temp_free(addr); \
1575 } \
1576 return;
1577 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1578 LD(reg,ldnum,ldpnum,prechk) \
1579 ST(reg,stnum,stpnum,prechk)
1580 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1581 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1582 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1583 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1584 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1585 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1586 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1587 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1588 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1589 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1590 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1591 case 0x406a: /* lds Rm,FPSCR */
1592 CHECK_FPU_ENABLED
1593 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1594 ctx->bstate = BS_STOP;
1595 return;
1596 case 0x4066: /* lds.l @Rm+,FPSCR */
1597 CHECK_FPU_ENABLED
1598 {
1599 TCGv addr = tcg_temp_new();
1600 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1601 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1602 gen_helper_ld_fpscr(cpu_env, addr);
1603 tcg_temp_free(addr);
1604 ctx->bstate = BS_STOP;
1605 }
1606 return;
1607 case 0x006a: /* sts FPSCR,Rn */
1608 CHECK_FPU_ENABLED
1609 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1610 return;
1611 case 0x4062: /* sts FPSCR,@-Rn */
1612 CHECK_FPU_ENABLED
1613 {
1614 TCGv addr, val;
1615 val = tcg_temp_new();
1616 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1617 addr = tcg_temp_new();
1618 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1619 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1620 tcg_gen_mov_i32(REG(B11_8), addr);
1621 tcg_temp_free(addr);
1622 tcg_temp_free(val);
1623 }
1624 return;
1625 case 0x00c3: /* movca.l R0,@Rm */
1626 {
1627 TCGv val = tcg_temp_new();
1628 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1629 gen_helper_movcal(cpu_env, REG(B11_8), val);
1630 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1631 }
1632 ctx->has_movcal = 1;
1633 return;
1634 case 0x40a9:
1635 /* MOVUA.L @Rm,R0 (Rm) -> R0
1636 Load non-boundary-aligned data */
1637 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1638 return;
1639 case 0x40e9:
1640 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1641 Load non-boundary-aligned data */
1642 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1643 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1644 return;
1645 case 0x0029: /* movt Rn */
1646 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1647 return;
1648 case 0x0073:
1649 /* MOVCO.L
1650 LDST -> T
1651 If (T == 1) R0 -> (Rn)
1652 0 -> LDST
1653 */
1654 if (ctx->features & SH_FEATURE_SH4A) {
1655 int label = gen_new_label();
1656 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1657 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1658 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1659 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1660 gen_set_label(label);
1661 tcg_gen_movi_i32(cpu_ldst, 0);
1662 return;
1663 } else
1664 break;
1665 case 0x0063:
1666 /* MOVLI.L @Rm,R0
1667 1 -> LDST
1668 (Rm) -> R0
1669 When interrupt/exception
1670 occurred 0 -> LDST
1671 */
1672 if (ctx->features & SH_FEATURE_SH4A) {
1673 tcg_gen_movi_i32(cpu_ldst, 0);
1674 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1675 tcg_gen_movi_i32(cpu_ldst, 1);
1676 return;
1677 } else
1678 break;
1679 case 0x0093: /* ocbi @Rn */
1680 {
1681 gen_helper_ocbi(cpu_env, REG(B11_8));
1682 }
1683 return;
1684 case 0x00a3: /* ocbp @Rn */
1685 case 0x00b3: /* ocbwb @Rn */
1686 /* These instructions are supposed to do nothing in case of
1687 a cache miss. Given that we only partially emulate caches
1688 it is safe to simply ignore them. */
1689 return;
1690 case 0x0083: /* pref @Rn */
1691 return;
1692 case 0x00d3: /* prefi @Rn */
1693 if (ctx->features & SH_FEATURE_SH4A)
1694 return;
1695 else
1696 break;
1697 case 0x00e3: /* icbi @Rn */
1698 if (ctx->features & SH_FEATURE_SH4A)
1699 return;
1700 else
1701 break;
1702 case 0x00ab: /* synco */
1703 if (ctx->features & SH_FEATURE_SH4A)
1704 return;
1705 else
1706 break;
1707 case 0x4024: /* rotcl Rn */
1708 {
1709 TCGv tmp = tcg_temp_new();
1710 tcg_gen_mov_i32(tmp, cpu_sr);
1711 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1712 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1713 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1714 tcg_temp_free(tmp);
1715 }
1716 return;
1717 case 0x4025: /* rotcr Rn */
1718 {
1719 TCGv tmp = tcg_temp_new();
1720 tcg_gen_mov_i32(tmp, cpu_sr);
1721 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1722 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1723 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1724 tcg_temp_free(tmp);
1725 }
1726 return;
1727 case 0x4004: /* rotl Rn */
1728 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1729 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1730 return;
1731 case 0x4005: /* rotr Rn */
1732 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1733 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1734 return;
1735 case 0x4000: /* shll Rn */
1736 case 0x4020: /* shal Rn */
1737 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1738 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1739 return;
1740 case 0x4021: /* shar Rn */
1741 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1742 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1743 return;
1744 case 0x4001: /* shlr Rn */
1745 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1746 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1747 return;
1748 case 0x4008: /* shll2 Rn */
1749 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1750 return;
1751 case 0x4018: /* shll8 Rn */
1752 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1753 return;
1754 case 0x4028: /* shll16 Rn */
1755 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1756 return;
1757 case 0x4009: /* shlr2 Rn */
1758 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1759 return;
1760 case 0x4019: /* shlr8 Rn */
1761 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1762 return;
1763 case 0x4029: /* shlr16 Rn */
1764 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1765 return;
1766 case 0x401b: /* tas.b @Rn */
1767 {
1768 TCGv addr, val;
1769 addr = tcg_temp_local_new();
1770 tcg_gen_mov_i32(addr, REG(B11_8));
1771 val = tcg_temp_local_new();
1772 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1773 gen_cmp_imm(TCG_COND_EQ, val, 0);
1774 tcg_gen_ori_i32(val, val, 0x80);
1775 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1776 tcg_temp_free(val);
1777 tcg_temp_free(addr);
1778 }
1779 return;
1780 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1781 CHECK_FPU_ENABLED
1782 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1783 return;
1784 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1785 CHECK_FPU_ENABLED
1786 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1787 return;
1788 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1789 CHECK_FPU_ENABLED
1790 if (ctx->flags & FPSCR_PR) {
1791 TCGv_i64 fp;
1792 if (ctx->opcode & 0x0100)
1793 break; /* illegal instruction */
1794 fp = tcg_temp_new_i64();
1795 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1796 gen_store_fpr64(fp, DREG(B11_8));
1797 tcg_temp_free_i64(fp);
1798 }
1799 else {
1800 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1801 }
1802 return;
1803 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1804 CHECK_FPU_ENABLED
1805 if (ctx->flags & FPSCR_PR) {
1806 TCGv_i64 fp;
1807 if (ctx->opcode & 0x0100)
1808 break; /* illegal instruction */
1809 fp = tcg_temp_new_i64();
1810 gen_load_fpr64(fp, DREG(B11_8));
1811 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1812 tcg_temp_free_i64(fp);
1813 }
1814 else {
1815 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1816 }
1817 return;
1818 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1819 CHECK_FPU_ENABLED
1820 {
1821 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1822 }
1823 return;
1824 case 0xf05d: /* fabs FRn/DRn */
1825 CHECK_FPU_ENABLED
1826 if (ctx->flags & FPSCR_PR) {
1827 if (ctx->opcode & 0x0100)
1828 break; /* illegal instruction */
1829 TCGv_i64 fp = tcg_temp_new_i64();
1830 gen_load_fpr64(fp, DREG(B11_8));
1831 gen_helper_fabs_DT(fp, fp);
1832 gen_store_fpr64(fp, DREG(B11_8));
1833 tcg_temp_free_i64(fp);
1834 } else {
1835 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1836 }
1837 return;
1838 case 0xf06d: /* fsqrt FRn */
1839 CHECK_FPU_ENABLED
1840 if (ctx->flags & FPSCR_PR) {
1841 if (ctx->opcode & 0x0100)
1842 break; /* illegal instruction */
1843 TCGv_i64 fp = tcg_temp_new_i64();
1844 gen_load_fpr64(fp, DREG(B11_8));
1845 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1846 gen_store_fpr64(fp, DREG(B11_8));
1847 tcg_temp_free_i64(fp);
1848 } else {
1849 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1850 cpu_fregs[FREG(B11_8)]);
1851 }
1852 return;
1853 case 0xf07d: /* fsrra FRn */
1854 CHECK_FPU_ENABLED
1855 break;
1856 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1857 CHECK_FPU_ENABLED
1858 if (!(ctx->flags & FPSCR_PR)) {
1859 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1860 }
1861 return;
1862 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1863 CHECK_FPU_ENABLED
1864 if (!(ctx->flags & FPSCR_PR)) {
1865 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1866 }
1867 return;
1868 case 0xf0ad: /* fcnvsd FPUL,DRn */
1869 CHECK_FPU_ENABLED
1870 {
1871 TCGv_i64 fp = tcg_temp_new_i64();
1872 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1873 gen_store_fpr64(fp, DREG(B11_8));
1874 tcg_temp_free_i64(fp);
1875 }
1876 return;
1877 case 0xf0bd: /* fcnvds DRn,FPUL */
1878 CHECK_FPU_ENABLED
1879 {
1880 TCGv_i64 fp = tcg_temp_new_i64();
1881 gen_load_fpr64(fp, DREG(B11_8));
1882 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1883 tcg_temp_free_i64(fp);
1884 }
1885 return;
1886 case 0xf0ed: /* fipr FVm,FVn */
1887 CHECK_FPU_ENABLED
1888 if ((ctx->flags & FPSCR_PR) == 0) {
1889 TCGv m, n;
1890 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1891 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1892 gen_helper_fipr(cpu_env, m, n);
1893 tcg_temp_free(m);
1894 tcg_temp_free(n);
1895 return;
1896 }
1897 break;
1898 case 0xf0fd: /* ftrv XMTRX,FVn */
1899 CHECK_FPU_ENABLED
1900 if ((ctx->opcode & 0x0300) == 0x0100 &&
1901 (ctx->flags & FPSCR_PR) == 0) {
1902 TCGv n;
1903 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1904 gen_helper_ftrv(cpu_env, n);
1905 tcg_temp_free(n);
1906 return;
1907 }
1908 break;
1909 }
1910 #if 0
1911 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1912 ctx->opcode, ctx->pc);
1913 fflush(stderr);
1914 #endif
1915 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1916 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1917 gen_helper_raise_slot_illegal_instruction(cpu_env);
1918 } else {
1919 gen_helper_raise_illegal_instruction(cpu_env);
1920 }
1921 ctx->bstate = BS_BRANCH;
1922 }
1923
1924 static void decode_opc(DisasContext * ctx)
1925 {
1926 uint32_t old_flags = ctx->flags;
1927
1928 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1929 tcg_gen_debug_insn_start(ctx->pc);
1930 }
1931
1932 _decode_opc(ctx);
1933
1934 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1935 if (ctx->flags & DELAY_SLOT_CLEARME) {
1936 gen_store_flags(0);
1937 } else {
1938 /* go out of the delay slot */
1939 uint32_t new_flags = ctx->flags;
1940 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1941 gen_store_flags(new_flags);
1942 }
1943 ctx->flags = 0;
1944 ctx->bstate = BS_BRANCH;
1945 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1946 gen_delayed_conditional_jump(ctx);
1947 } else if (old_flags & DELAY_SLOT) {
1948 gen_jump(ctx);
1949 }
1950
1951 }
1952
1953 /* go into a delay slot */
1954 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1955 gen_store_flags(ctx->flags);
1956 }
1957
1958 static inline void
1959 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1960 int search_pc)
1961 {
1962 DisasContext ctx;
1963 target_ulong pc_start;
1964 static uint16_t *gen_opc_end;
1965 CPUBreakpoint *bp;
1966 int i, ii;
1967 int num_insns;
1968 int max_insns;
1969
1970 pc_start = tb->pc;
1971 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1972 ctx.pc = pc_start;
1973 ctx.flags = (uint32_t)tb->flags;
1974 ctx.bstate = BS_NONE;
1975 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1976 /* We don't know if the delayed pc came from a dynamic or static branch,
1977 so assume it is a dynamic branch. */
1978 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1979 ctx.tb = tb;
1980 ctx.singlestep_enabled = env->singlestep_enabled;
1981 ctx.features = env->features;
1982 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1983
1984 ii = -1;
1985 num_insns = 0;
1986 max_insns = tb->cflags & CF_COUNT_MASK;
1987 if (max_insns == 0)
1988 max_insns = CF_COUNT_MASK;
1989 gen_icount_start();
1990 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1991 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1992 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1993 if (ctx.pc == bp->pc) {
1994 /* We have hit a breakpoint - make sure PC is up-to-date */
1995 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1996 gen_helper_debug(cpu_env);
1997 ctx.bstate = BS_BRANCH;
1998 break;
1999 }
2000 }
2001 }
2002 if (search_pc) {
2003 i = gen_opc_ptr - gen_opc_buf;
2004 if (ii < i) {
2005 ii++;
2006 while (ii < i)
2007 gen_opc_instr_start[ii++] = 0;
2008 }
2009 gen_opc_pc[ii] = ctx.pc;
2010 gen_opc_hflags[ii] = ctx.flags;
2011 gen_opc_instr_start[ii] = 1;
2012 gen_opc_icount[ii] = num_insns;
2013 }
2014 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2015 gen_io_start();
2016 #if 0
2017 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
2018 fflush(stderr);
2019 #endif
2020 ctx.opcode = cpu_lduw_code(env, ctx.pc);
2021 decode_opc(&ctx);
2022 num_insns++;
2023 ctx.pc += 2;
2024 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
2025 break;
2026 if (env->singlestep_enabled)
2027 break;
2028 if (num_insns >= max_insns)
2029 break;
2030 if (singlestep)
2031 break;
2032 }
2033 if (tb->cflags & CF_LAST_IO)
2034 gen_io_end();
2035 if (env->singlestep_enabled) {
2036 tcg_gen_movi_i32(cpu_pc, ctx.pc);
2037 gen_helper_debug(cpu_env);
2038 } else {
2039 switch (ctx.bstate) {
2040 case BS_STOP:
2041 /* gen_op_interrupt_restart(); */
2042 /* fall through */
2043 case BS_NONE:
2044 if (ctx.flags) {
2045 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
2046 }
2047 gen_goto_tb(&ctx, 0, ctx.pc);
2048 break;
2049 case BS_EXCP:
2050 /* gen_op_interrupt_restart(); */
2051 tcg_gen_exit_tb(0);
2052 break;
2053 case BS_BRANCH:
2054 default:
2055 break;
2056 }
2057 }
2058
2059 gen_icount_end(tb, num_insns);
2060 *gen_opc_ptr = INDEX_op_end;
2061 if (search_pc) {
2062 i = gen_opc_ptr - gen_opc_buf;
2063 ii++;
2064 while (ii <= i)
2065 gen_opc_instr_start[ii++] = 0;
2066 } else {
2067 tb->size = ctx.pc - pc_start;
2068 tb->icount = num_insns;
2069 }
2070
2071 #ifdef DEBUG_DISAS
2072 #ifdef SH4_DEBUG_DISAS
2073 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2074 #endif
2075 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2076 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2077 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2078 qemu_log("\n");
2079 }
2080 #endif
2081 }
2082
2083 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2084 {
2085 gen_intermediate_code_internal(env, tb, 0);
2086 }
2087
2088 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2089 {
2090 gen_intermediate_code_internal(env, tb, 1);
2091 }
2092
2093 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2094 {
2095 env->pc = gen_opc_pc[pc_pos];
2096 env->flags = gen_opc_hflags[pc_pos];
2097 }