]> git.proxmox.com Git - qemu.git/blob - target-sh4/translate.c
d0568e296427b81b554601a3d9faba61fee76edf
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
23
24 #include "cpu.h"
25 #include "disas.h"
26 #include "tcg-op.h"
27
28 #include "helper.h"
29 #define GEN_HELPER 1
30 #include "helper.h"
31
32 typedef struct DisasContext {
33 struct TranslationBlock *tb;
34 target_ulong pc;
35 uint32_t sr;
36 uint32_t fpscr;
37 uint16_t opcode;
38 uint32_t flags;
39 int bstate;
40 int memidx;
41 uint32_t delayed_pc;
42 int singlestep_enabled;
43 uint32_t features;
44 int has_movcal;
45 } DisasContext;
46
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
49 #else
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
51 #endif
52
53 enum {
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition
56 */
57 BS_STOP = 1, /* We want to stop translation for any reason */
58 BS_BRANCH = 2, /* We reached a branch condition */
59 BS_EXCP = 3, /* We reached an exception condition */
60 };
61
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_gregs[24];
65 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
68 static TCGv cpu_fregs[32];
69
70 /* internal register indexes */
71 static TCGv cpu_flags, cpu_delayed_pc;
72
73 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
74
75 #include "gen-icount.h"
76
77 static void sh4_translate_init(void)
78 {
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 };
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 };
98
99 if (done_init)
100 return;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
108
109 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, ssr), "SSR");
115 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, spc), "SPC");
117 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, gbr), "GBR");
119 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, vbr), "VBR");
121 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, sgr), "SGR");
123 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, dbr), "DBR");
125 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, mach), "MACH");
127 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, macl), "MACL");
129 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, pr), "PR");
131 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, fpscr), "FPSCR");
133 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, fpul), "FPUL");
135
136 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, flags), "_flags_");
138 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, delayed_pc),
140 "_delayed_pc_");
141 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, ldst), "_ldst_");
143
144 for (i = 0; i < 32; i++)
145 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUSH4State, fregs[i]),
147 fregnames[i]);
148
149 /* register helpers */
150 #define GEN_HELPER 2
151 #include "helper.h"
152
153 done_init = 1;
154 }
155
156 void cpu_dump_state(CPUSH4State * env, FILE * f,
157 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
158 int flags)
159 {
160 int i;
161 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env->pc, env->sr, env->pr, env->fpscr);
163 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env->spc, env->ssr, env->gbr, env->vbr);
165 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env->sgr, env->dbr, env->delayed_pc, env->fpul);
167 for (i = 0; i < 24; i += 4) {
168 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i, env->gregs[i], i + 1, env->gregs[i + 1],
170 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
171 }
172 if (env->flags & DELAY_SLOT) {
173 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
175 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
176 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 }
179 }
180
181 void cpu_state_reset(CPUSH4State *env)
182 {
183 cpu_reset(ENV_GET_CPU(env));
184 }
185
186 typedef struct {
187 const char *name;
188 int id;
189 uint32_t pvr;
190 uint32_t prr;
191 uint32_t cvr;
192 uint32_t features;
193 } sh4_def_t;
194
195 static sh4_def_t sh4_defs[] = {
196 {
197 .name = "SH7750R",
198 .id = SH_CPU_SH7750R,
199 .pvr = 0x00050000,
200 .prr = 0x00000100,
201 .cvr = 0x00110000,
202 .features = SH_FEATURE_BCR3_AND_BCR4,
203 }, {
204 .name = "SH7751R",
205 .id = SH_CPU_SH7751R,
206 .pvr = 0x04050005,
207 .prr = 0x00000113,
208 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
209 .features = SH_FEATURE_BCR3_AND_BCR4,
210 }, {
211 .name = "SH7785",
212 .id = SH_CPU_SH7785,
213 .pvr = 0x10300700,
214 .prr = 0x00000200,
215 .cvr = 0x71440211,
216 .features = SH_FEATURE_SH4A,
217 },
218 };
219
220 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
221 {
222 int i;
223
224 if (strcasecmp(name, "any") == 0)
225 return &sh4_defs[0];
226
227 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
228 if (strcasecmp(name, sh4_defs[i].name) == 0)
229 return &sh4_defs[i];
230
231 return NULL;
232 }
233
234 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
235 {
236 int i;
237
238 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
239 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
240 }
241
242 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
243 {
244 env->pvr = def->pvr;
245 env->prr = def->prr;
246 env->cvr = def->cvr;
247 env->id = def->id;
248 }
249
250 CPUSH4State *cpu_sh4_init(const char *cpu_model)
251 {
252 SuperHCPU *cpu;
253 CPUSH4State *env;
254 const sh4_def_t *def;
255
256 def = cpu_sh4_find_by_name(cpu_model);
257 if (!def)
258 return NULL;
259 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
260 env = &cpu->env;
261 env->features = def->features;
262 cpu_exec_init(env);
263 env->movcal_backup_tail = &(env->movcal_backup);
264 sh4_translate_init();
265 env->cpu_model_str = cpu_model;
266 cpu_reset(CPU(cpu));
267 cpu_register(env, def);
268 qemu_init_vcpu(env);
269 return env;
270 }
271
272 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
273 {
274 TranslationBlock *tb;
275 tb = ctx->tb;
276
277 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
278 !ctx->singlestep_enabled) {
279 /* Use a direct jump if in same page and singlestep not enabled */
280 tcg_gen_goto_tb(n);
281 tcg_gen_movi_i32(cpu_pc, dest);
282 tcg_gen_exit_tb((tcg_target_long)tb + n);
283 } else {
284 tcg_gen_movi_i32(cpu_pc, dest);
285 if (ctx->singlestep_enabled)
286 gen_helper_debug();
287 tcg_gen_exit_tb(0);
288 }
289 }
290
291 static void gen_jump(DisasContext * ctx)
292 {
293 if (ctx->delayed_pc == (uint32_t) - 1) {
294 /* Target is not statically known, it comes necessarily from a
295 delayed jump as immediate jump are conditinal jumps */
296 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
297 if (ctx->singlestep_enabled)
298 gen_helper_debug();
299 tcg_gen_exit_tb(0);
300 } else {
301 gen_goto_tb(ctx, 0, ctx->delayed_pc);
302 }
303 }
304
305 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
306 {
307 TCGv sr;
308 int label = gen_new_label();
309 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
310 sr = tcg_temp_new();
311 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
312 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
313 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
314 gen_set_label(label);
315 }
316
317 /* Immediate conditional jump (bt or bf) */
318 static void gen_conditional_jump(DisasContext * ctx,
319 target_ulong ift, target_ulong ifnott)
320 {
321 int l1;
322 TCGv sr;
323
324 l1 = gen_new_label();
325 sr = tcg_temp_new();
326 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
327 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
328 gen_goto_tb(ctx, 0, ifnott);
329 gen_set_label(l1);
330 gen_goto_tb(ctx, 1, ift);
331 }
332
333 /* Delayed conditional jump (bt or bf) */
334 static void gen_delayed_conditional_jump(DisasContext * ctx)
335 {
336 int l1;
337 TCGv ds;
338
339 l1 = gen_new_label();
340 ds = tcg_temp_new();
341 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
342 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
343 gen_goto_tb(ctx, 1, ctx->pc + 2);
344 gen_set_label(l1);
345 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
346 gen_jump(ctx);
347 }
348
349 static inline void gen_set_t(void)
350 {
351 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
352 }
353
354 static inline void gen_clr_t(void)
355 {
356 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
357 }
358
359 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
360 {
361 TCGv t;
362
363 t = tcg_temp_new();
364 tcg_gen_setcond_i32(cond, t, t1, t0);
365 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
366 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
367
368 tcg_temp_free(t);
369 }
370
371 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
372 {
373 TCGv t;
374
375 t = tcg_temp_new();
376 tcg_gen_setcondi_i32(cond, t, t0, imm);
377 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
378 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
379
380 tcg_temp_free(t);
381 }
382
383 static inline void gen_store_flags(uint32_t flags)
384 {
385 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
386 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
387 }
388
389 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
390 {
391 TCGv tmp = tcg_temp_new();
392
393 p0 &= 0x1f;
394 p1 &= 0x1f;
395
396 tcg_gen_andi_i32(tmp, t1, (1 << p1));
397 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
398 if (p0 < p1)
399 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
400 else if (p0 > p1)
401 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
402 tcg_gen_or_i32(t0, t0, tmp);
403
404 tcg_temp_free(tmp);
405 }
406
407 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
408 {
409 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
410 }
411
412 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
413 {
414 TCGv_i32 tmp = tcg_temp_new_i32();
415 tcg_gen_trunc_i64_i32(tmp, t);
416 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
417 tcg_gen_shri_i64(t, t, 32);
418 tcg_gen_trunc_i64_i32(tmp, t);
419 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
420 tcg_temp_free_i32(tmp);
421 }
422
423 #define B3_0 (ctx->opcode & 0xf)
424 #define B6_4 ((ctx->opcode >> 4) & 0x7)
425 #define B7_4 ((ctx->opcode >> 4) & 0xf)
426 #define B7_0 (ctx->opcode & 0xff)
427 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
428 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
429 (ctx->opcode & 0xfff))
430 #define B11_8 ((ctx->opcode >> 8) & 0xf)
431 #define B15_12 ((ctx->opcode >> 12) & 0xf)
432
433 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
434 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
435
436 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
437 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
438
439 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
440 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
441 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
442 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
443
444 #define CHECK_NOT_DELAY_SLOT \
445 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
446 { \
447 gen_helper_raise_slot_illegal_instruction(); \
448 ctx->bstate = BS_EXCP; \
449 return; \
450 }
451
452 #define CHECK_PRIVILEGED \
453 if (IS_USER(ctx)) { \
454 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
455 gen_helper_raise_slot_illegal_instruction(); \
456 } else { \
457 gen_helper_raise_illegal_instruction(); \
458 } \
459 ctx->bstate = BS_EXCP; \
460 return; \
461 }
462
463 #define CHECK_FPU_ENABLED \
464 if (ctx->flags & SR_FD) { \
465 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
466 gen_helper_raise_slot_fpu_disable(); \
467 } else { \
468 gen_helper_raise_fpu_disable(); \
469 } \
470 ctx->bstate = BS_EXCP; \
471 return; \
472 }
473
474 static void _decode_opc(DisasContext * ctx)
475 {
476 /* This code tries to make movcal emulation sufficiently
477 accurate for Linux purposes. This instruction writes
478 memory, and prior to that, always allocates a cache line.
479 It is used in two contexts:
480 - in memcpy, where data is copied in blocks, the first write
481 of to a block uses movca.l for performance.
482 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
483 to flush the cache. Here, the data written by movcal.l is never
484 written to memory, and the data written is just bogus.
485
486 To simulate this, we simulate movcal.l, we store the value to memory,
487 but we also remember the previous content. If we see ocbi, we check
488 if movcal.l for that address was done previously. If so, the write should
489 not have hit the memory, so we restore the previous content.
490 When we see an instruction that is neither movca.l
491 nor ocbi, the previous content is discarded.
492
493 To optimize, we only try to flush stores when we're at the start of
494 TB, or if we already saw movca.l in this TB and did not flush stores
495 yet. */
496 if (ctx->has_movcal)
497 {
498 int opcode = ctx->opcode & 0xf0ff;
499 if (opcode != 0x0093 /* ocbi */
500 && opcode != 0x00c3 /* movca.l */)
501 {
502 gen_helper_discard_movcal_backup ();
503 ctx->has_movcal = 0;
504 }
505 }
506
507 #if 0
508 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
509 #endif
510
511 switch (ctx->opcode) {
512 case 0x0019: /* div0u */
513 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
514 return;
515 case 0x000b: /* rts */
516 CHECK_NOT_DELAY_SLOT
517 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
518 ctx->flags |= DELAY_SLOT;
519 ctx->delayed_pc = (uint32_t) - 1;
520 return;
521 case 0x0028: /* clrmac */
522 tcg_gen_movi_i32(cpu_mach, 0);
523 tcg_gen_movi_i32(cpu_macl, 0);
524 return;
525 case 0x0048: /* clrs */
526 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
527 return;
528 case 0x0008: /* clrt */
529 gen_clr_t();
530 return;
531 case 0x0038: /* ldtlb */
532 CHECK_PRIVILEGED
533 gen_helper_ldtlb();
534 return;
535 case 0x002b: /* rte */
536 CHECK_PRIVILEGED
537 CHECK_NOT_DELAY_SLOT
538 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
539 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
540 ctx->flags |= DELAY_SLOT;
541 ctx->delayed_pc = (uint32_t) - 1;
542 return;
543 case 0x0058: /* sets */
544 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
545 return;
546 case 0x0018: /* sett */
547 gen_set_t();
548 return;
549 case 0xfbfd: /* frchg */
550 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
551 ctx->bstate = BS_STOP;
552 return;
553 case 0xf3fd: /* fschg */
554 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
555 ctx->bstate = BS_STOP;
556 return;
557 case 0x0009: /* nop */
558 return;
559 case 0x001b: /* sleep */
560 CHECK_PRIVILEGED
561 gen_helper_sleep(tcg_const_i32(ctx->pc + 2));
562 return;
563 }
564
565 switch (ctx->opcode & 0xf000) {
566 case 0x1000: /* mov.l Rm,@(disp,Rn) */
567 {
568 TCGv addr = tcg_temp_new();
569 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
570 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
571 tcg_temp_free(addr);
572 }
573 return;
574 case 0x5000: /* mov.l @(disp,Rm),Rn */
575 {
576 TCGv addr = tcg_temp_new();
577 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
578 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
579 tcg_temp_free(addr);
580 }
581 return;
582 case 0xe000: /* mov #imm,Rn */
583 tcg_gen_movi_i32(REG(B11_8), B7_0s);
584 return;
585 case 0x9000: /* mov.w @(disp,PC),Rn */
586 {
587 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
588 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
589 tcg_temp_free(addr);
590 }
591 return;
592 case 0xd000: /* mov.l @(disp,PC),Rn */
593 {
594 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
595 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
596 tcg_temp_free(addr);
597 }
598 return;
599 case 0x7000: /* add #imm,Rn */
600 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
601 return;
602 case 0xa000: /* bra disp */
603 CHECK_NOT_DELAY_SLOT
604 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
605 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
606 ctx->flags |= DELAY_SLOT;
607 return;
608 case 0xb000: /* bsr disp */
609 CHECK_NOT_DELAY_SLOT
610 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
611 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
612 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
613 ctx->flags |= DELAY_SLOT;
614 return;
615 }
616
617 switch (ctx->opcode & 0xf00f) {
618 case 0x6003: /* mov Rm,Rn */
619 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
620 return;
621 case 0x2000: /* mov.b Rm,@Rn */
622 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
623 return;
624 case 0x2001: /* mov.w Rm,@Rn */
625 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
626 return;
627 case 0x2002: /* mov.l Rm,@Rn */
628 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
629 return;
630 case 0x6000: /* mov.b @Rm,Rn */
631 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
632 return;
633 case 0x6001: /* mov.w @Rm,Rn */
634 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
635 return;
636 case 0x6002: /* mov.l @Rm,Rn */
637 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
638 return;
639 case 0x2004: /* mov.b Rm,@-Rn */
640 {
641 TCGv addr = tcg_temp_new();
642 tcg_gen_subi_i32(addr, REG(B11_8), 1);
643 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
644 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
645 tcg_temp_free(addr);
646 }
647 return;
648 case 0x2005: /* mov.w Rm,@-Rn */
649 {
650 TCGv addr = tcg_temp_new();
651 tcg_gen_subi_i32(addr, REG(B11_8), 2);
652 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
653 tcg_gen_mov_i32(REG(B11_8), addr);
654 tcg_temp_free(addr);
655 }
656 return;
657 case 0x2006: /* mov.l Rm,@-Rn */
658 {
659 TCGv addr = tcg_temp_new();
660 tcg_gen_subi_i32(addr, REG(B11_8), 4);
661 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
662 tcg_gen_mov_i32(REG(B11_8), addr);
663 }
664 return;
665 case 0x6004: /* mov.b @Rm+,Rn */
666 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
667 if ( B11_8 != B7_4 )
668 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
669 return;
670 case 0x6005: /* mov.w @Rm+,Rn */
671 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
672 if ( B11_8 != B7_4 )
673 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
674 return;
675 case 0x6006: /* mov.l @Rm+,Rn */
676 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
677 if ( B11_8 != B7_4 )
678 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
679 return;
680 case 0x0004: /* mov.b Rm,@(R0,Rn) */
681 {
682 TCGv addr = tcg_temp_new();
683 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
684 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
685 tcg_temp_free(addr);
686 }
687 return;
688 case 0x0005: /* mov.w Rm,@(R0,Rn) */
689 {
690 TCGv addr = tcg_temp_new();
691 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
692 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
693 tcg_temp_free(addr);
694 }
695 return;
696 case 0x0006: /* mov.l Rm,@(R0,Rn) */
697 {
698 TCGv addr = tcg_temp_new();
699 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
700 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
701 tcg_temp_free(addr);
702 }
703 return;
704 case 0x000c: /* mov.b @(R0,Rm),Rn */
705 {
706 TCGv addr = tcg_temp_new();
707 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
708 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
709 tcg_temp_free(addr);
710 }
711 return;
712 case 0x000d: /* mov.w @(R0,Rm),Rn */
713 {
714 TCGv addr = tcg_temp_new();
715 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
716 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
717 tcg_temp_free(addr);
718 }
719 return;
720 case 0x000e: /* mov.l @(R0,Rm),Rn */
721 {
722 TCGv addr = tcg_temp_new();
723 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
724 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
725 tcg_temp_free(addr);
726 }
727 return;
728 case 0x6008: /* swap.b Rm,Rn */
729 {
730 TCGv high, low;
731 high = tcg_temp_new();
732 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
733 low = tcg_temp_new();
734 tcg_gen_ext16u_i32(low, REG(B7_4));
735 tcg_gen_bswap16_i32(low, low);
736 tcg_gen_or_i32(REG(B11_8), high, low);
737 tcg_temp_free(low);
738 tcg_temp_free(high);
739 }
740 return;
741 case 0x6009: /* swap.w Rm,Rn */
742 {
743 TCGv high, low;
744 high = tcg_temp_new();
745 tcg_gen_shli_i32(high, REG(B7_4), 16);
746 low = tcg_temp_new();
747 tcg_gen_shri_i32(low, REG(B7_4), 16);
748 tcg_gen_ext16u_i32(low, low);
749 tcg_gen_or_i32(REG(B11_8), high, low);
750 tcg_temp_free(low);
751 tcg_temp_free(high);
752 }
753 return;
754 case 0x200d: /* xtrct Rm,Rn */
755 {
756 TCGv high, low;
757 high = tcg_temp_new();
758 tcg_gen_shli_i32(high, REG(B7_4), 16);
759 low = tcg_temp_new();
760 tcg_gen_shri_i32(low, REG(B11_8), 16);
761 tcg_gen_ext16u_i32(low, low);
762 tcg_gen_or_i32(REG(B11_8), high, low);
763 tcg_temp_free(low);
764 tcg_temp_free(high);
765 }
766 return;
767 case 0x300c: /* add Rm,Rn */
768 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
769 return;
770 case 0x300e: /* addc Rm,Rn */
771 gen_helper_addc(REG(B11_8), REG(B7_4), REG(B11_8));
772 return;
773 case 0x300f: /* addv Rm,Rn */
774 gen_helper_addv(REG(B11_8), REG(B7_4), REG(B11_8));
775 return;
776 case 0x2009: /* and Rm,Rn */
777 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
778 return;
779 case 0x3000: /* cmp/eq Rm,Rn */
780 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
781 return;
782 case 0x3003: /* cmp/ge Rm,Rn */
783 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
784 return;
785 case 0x3007: /* cmp/gt Rm,Rn */
786 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
787 return;
788 case 0x3006: /* cmp/hi Rm,Rn */
789 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
790 return;
791 case 0x3002: /* cmp/hs Rm,Rn */
792 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
793 return;
794 case 0x200c: /* cmp/str Rm,Rn */
795 {
796 TCGv cmp1 = tcg_temp_new();
797 TCGv cmp2 = tcg_temp_new();
798 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
799 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
800 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
801 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
802 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
803 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
804 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
805 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
806 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
807 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
808 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
809 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
810 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
811 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
812 tcg_temp_free(cmp2);
813 tcg_temp_free(cmp1);
814 }
815 return;
816 case 0x2007: /* div0s Rm,Rn */
817 {
818 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
819 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
820 TCGv val = tcg_temp_new();
821 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
822 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
823 tcg_temp_free(val);
824 }
825 return;
826 case 0x3004: /* div1 Rm,Rn */
827 gen_helper_div1(REG(B11_8), REG(B7_4), REG(B11_8));
828 return;
829 case 0x300d: /* dmuls.l Rm,Rn */
830 {
831 TCGv_i64 tmp1 = tcg_temp_new_i64();
832 TCGv_i64 tmp2 = tcg_temp_new_i64();
833
834 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
835 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
836 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
837 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
838 tcg_gen_shri_i64(tmp1, tmp1, 32);
839 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
840
841 tcg_temp_free_i64(tmp2);
842 tcg_temp_free_i64(tmp1);
843 }
844 return;
845 case 0x3005: /* dmulu.l Rm,Rn */
846 {
847 TCGv_i64 tmp1 = tcg_temp_new_i64();
848 TCGv_i64 tmp2 = tcg_temp_new_i64();
849
850 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
851 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
852 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
853 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
854 tcg_gen_shri_i64(tmp1, tmp1, 32);
855 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
856
857 tcg_temp_free_i64(tmp2);
858 tcg_temp_free_i64(tmp1);
859 }
860 return;
861 case 0x600e: /* exts.b Rm,Rn */
862 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
863 return;
864 case 0x600f: /* exts.w Rm,Rn */
865 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
866 return;
867 case 0x600c: /* extu.b Rm,Rn */
868 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
869 return;
870 case 0x600d: /* extu.w Rm,Rn */
871 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
872 return;
873 case 0x000f: /* mac.l @Rm+,@Rn+ */
874 {
875 TCGv arg0, arg1;
876 arg0 = tcg_temp_new();
877 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
878 arg1 = tcg_temp_new();
879 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
880 gen_helper_macl(arg0, arg1);
881 tcg_temp_free(arg1);
882 tcg_temp_free(arg0);
883 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
884 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
885 }
886 return;
887 case 0x400f: /* mac.w @Rm+,@Rn+ */
888 {
889 TCGv arg0, arg1;
890 arg0 = tcg_temp_new();
891 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
892 arg1 = tcg_temp_new();
893 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
894 gen_helper_macw(arg0, arg1);
895 tcg_temp_free(arg1);
896 tcg_temp_free(arg0);
897 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
898 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
899 }
900 return;
901 case 0x0007: /* mul.l Rm,Rn */
902 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
903 return;
904 case 0x200f: /* muls.w Rm,Rn */
905 {
906 TCGv arg0, arg1;
907 arg0 = tcg_temp_new();
908 tcg_gen_ext16s_i32(arg0, REG(B7_4));
909 arg1 = tcg_temp_new();
910 tcg_gen_ext16s_i32(arg1, REG(B11_8));
911 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
912 tcg_temp_free(arg1);
913 tcg_temp_free(arg0);
914 }
915 return;
916 case 0x200e: /* mulu.w Rm,Rn */
917 {
918 TCGv arg0, arg1;
919 arg0 = tcg_temp_new();
920 tcg_gen_ext16u_i32(arg0, REG(B7_4));
921 arg1 = tcg_temp_new();
922 tcg_gen_ext16u_i32(arg1, REG(B11_8));
923 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
924 tcg_temp_free(arg1);
925 tcg_temp_free(arg0);
926 }
927 return;
928 case 0x600b: /* neg Rm,Rn */
929 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
930 return;
931 case 0x600a: /* negc Rm,Rn */
932 {
933 TCGv t0, t1;
934 t0 = tcg_temp_new();
935 tcg_gen_neg_i32(t0, REG(B7_4));
936 t1 = tcg_temp_new();
937 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
938 tcg_gen_sub_i32(REG(B11_8), t0, t1);
939 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
940 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
941 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
942 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
943 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
944 tcg_temp_free(t0);
945 tcg_temp_free(t1);
946 }
947 return;
948 case 0x6007: /* not Rm,Rn */
949 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
950 return;
951 case 0x200b: /* or Rm,Rn */
952 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
953 return;
954 case 0x400c: /* shad Rm,Rn */
955 {
956 int label1 = gen_new_label();
957 int label2 = gen_new_label();
958 int label3 = gen_new_label();
959 int label4 = gen_new_label();
960 TCGv shift;
961 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
962 /* Rm positive, shift to the left */
963 shift = tcg_temp_new();
964 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
965 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
966 tcg_temp_free(shift);
967 tcg_gen_br(label4);
968 /* Rm negative, shift to the right */
969 gen_set_label(label1);
970 shift = tcg_temp_new();
971 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
972 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
973 tcg_gen_not_i32(shift, REG(B7_4));
974 tcg_gen_andi_i32(shift, shift, 0x1f);
975 tcg_gen_addi_i32(shift, shift, 1);
976 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
977 tcg_temp_free(shift);
978 tcg_gen_br(label4);
979 /* Rm = -32 */
980 gen_set_label(label2);
981 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
982 tcg_gen_movi_i32(REG(B11_8), 0);
983 tcg_gen_br(label4);
984 gen_set_label(label3);
985 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
986 gen_set_label(label4);
987 }
988 return;
989 case 0x400d: /* shld Rm,Rn */
990 {
991 int label1 = gen_new_label();
992 int label2 = gen_new_label();
993 int label3 = gen_new_label();
994 TCGv shift;
995 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
996 /* Rm positive, shift to the left */
997 shift = tcg_temp_new();
998 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
999 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
1000 tcg_temp_free(shift);
1001 tcg_gen_br(label3);
1002 /* Rm negative, shift to the right */
1003 gen_set_label(label1);
1004 shift = tcg_temp_new();
1005 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
1006 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1007 tcg_gen_not_i32(shift, REG(B7_4));
1008 tcg_gen_andi_i32(shift, shift, 0x1f);
1009 tcg_gen_addi_i32(shift, shift, 1);
1010 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1011 tcg_temp_free(shift);
1012 tcg_gen_br(label3);
1013 /* Rm = -32 */
1014 gen_set_label(label2);
1015 tcg_gen_movi_i32(REG(B11_8), 0);
1016 gen_set_label(label3);
1017 }
1018 return;
1019 case 0x3008: /* sub Rm,Rn */
1020 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1021 return;
1022 case 0x300a: /* subc Rm,Rn */
1023 gen_helper_subc(REG(B11_8), REG(B7_4), REG(B11_8));
1024 return;
1025 case 0x300b: /* subv Rm,Rn */
1026 gen_helper_subv(REG(B11_8), REG(B7_4), REG(B11_8));
1027 return;
1028 case 0x2008: /* tst Rm,Rn */
1029 {
1030 TCGv val = tcg_temp_new();
1031 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1032 gen_cmp_imm(TCG_COND_EQ, val, 0);
1033 tcg_temp_free(val);
1034 }
1035 return;
1036 case 0x200a: /* xor Rm,Rn */
1037 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1038 return;
1039 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1040 CHECK_FPU_ENABLED
1041 if (ctx->fpscr & FPSCR_SZ) {
1042 TCGv_i64 fp = tcg_temp_new_i64();
1043 gen_load_fpr64(fp, XREG(B7_4));
1044 gen_store_fpr64(fp, XREG(B11_8));
1045 tcg_temp_free_i64(fp);
1046 } else {
1047 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1048 }
1049 return;
1050 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1051 CHECK_FPU_ENABLED
1052 if (ctx->fpscr & FPSCR_SZ) {
1053 TCGv addr_hi = tcg_temp_new();
1054 int fr = XREG(B7_4);
1055 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1056 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1057 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1058 tcg_temp_free(addr_hi);
1059 } else {
1060 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1061 }
1062 return;
1063 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1064 CHECK_FPU_ENABLED
1065 if (ctx->fpscr & FPSCR_SZ) {
1066 TCGv addr_hi = tcg_temp_new();
1067 int fr = XREG(B11_8);
1068 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1069 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1070 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1071 tcg_temp_free(addr_hi);
1072 } else {
1073 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1074 }
1075 return;
1076 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1077 CHECK_FPU_ENABLED
1078 if (ctx->fpscr & FPSCR_SZ) {
1079 TCGv addr_hi = tcg_temp_new();
1080 int fr = XREG(B11_8);
1081 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1082 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1083 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1084 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1085 tcg_temp_free(addr_hi);
1086 } else {
1087 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1088 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1089 }
1090 return;
1091 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1092 CHECK_FPU_ENABLED
1093 if (ctx->fpscr & FPSCR_SZ) {
1094 TCGv addr = tcg_temp_new_i32();
1095 int fr = XREG(B7_4);
1096 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1097 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1098 tcg_gen_subi_i32(addr, addr, 4);
1099 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1100 tcg_gen_mov_i32(REG(B11_8), addr);
1101 tcg_temp_free(addr);
1102 } else {
1103 TCGv addr;
1104 addr = tcg_temp_new_i32();
1105 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1106 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1107 tcg_gen_mov_i32(REG(B11_8), addr);
1108 tcg_temp_free(addr);
1109 }
1110 return;
1111 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1112 CHECK_FPU_ENABLED
1113 {
1114 TCGv addr = tcg_temp_new_i32();
1115 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1116 if (ctx->fpscr & FPSCR_SZ) {
1117 int fr = XREG(B11_8);
1118 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1119 tcg_gen_addi_i32(addr, addr, 4);
1120 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1121 } else {
1122 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1123 }
1124 tcg_temp_free(addr);
1125 }
1126 return;
1127 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1128 CHECK_FPU_ENABLED
1129 {
1130 TCGv addr = tcg_temp_new();
1131 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1132 if (ctx->fpscr & FPSCR_SZ) {
1133 int fr = XREG(B7_4);
1134 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1135 tcg_gen_addi_i32(addr, addr, 4);
1136 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1137 } else {
1138 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1139 }
1140 tcg_temp_free(addr);
1141 }
1142 return;
1143 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1144 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1145 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1146 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1147 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1148 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1149 {
1150 CHECK_FPU_ENABLED
1151 if (ctx->fpscr & FPSCR_PR) {
1152 TCGv_i64 fp0, fp1;
1153
1154 if (ctx->opcode & 0x0110)
1155 break; /* illegal instruction */
1156 fp0 = tcg_temp_new_i64();
1157 fp1 = tcg_temp_new_i64();
1158 gen_load_fpr64(fp0, DREG(B11_8));
1159 gen_load_fpr64(fp1, DREG(B7_4));
1160 switch (ctx->opcode & 0xf00f) {
1161 case 0xf000: /* fadd Rm,Rn */
1162 gen_helper_fadd_DT(fp0, fp0, fp1);
1163 break;
1164 case 0xf001: /* fsub Rm,Rn */
1165 gen_helper_fsub_DT(fp0, fp0, fp1);
1166 break;
1167 case 0xf002: /* fmul Rm,Rn */
1168 gen_helper_fmul_DT(fp0, fp0, fp1);
1169 break;
1170 case 0xf003: /* fdiv Rm,Rn */
1171 gen_helper_fdiv_DT(fp0, fp0, fp1);
1172 break;
1173 case 0xf004: /* fcmp/eq Rm,Rn */
1174 gen_helper_fcmp_eq_DT(fp0, fp1);
1175 return;
1176 case 0xf005: /* fcmp/gt Rm,Rn */
1177 gen_helper_fcmp_gt_DT(fp0, fp1);
1178 return;
1179 }
1180 gen_store_fpr64(fp0, DREG(B11_8));
1181 tcg_temp_free_i64(fp0);
1182 tcg_temp_free_i64(fp1);
1183 } else {
1184 switch (ctx->opcode & 0xf00f) {
1185 case 0xf000: /* fadd Rm,Rn */
1186 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1187 break;
1188 case 0xf001: /* fsub Rm,Rn */
1189 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1190 break;
1191 case 0xf002: /* fmul Rm,Rn */
1192 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1193 break;
1194 case 0xf003: /* fdiv Rm,Rn */
1195 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1196 break;
1197 case 0xf004: /* fcmp/eq Rm,Rn */
1198 gen_helper_fcmp_eq_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1199 return;
1200 case 0xf005: /* fcmp/gt Rm,Rn */
1201 gen_helper_fcmp_gt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1202 return;
1203 }
1204 }
1205 }
1206 return;
1207 case 0xf00e: /* fmac FR0,RM,Rn */
1208 {
1209 CHECK_FPU_ENABLED
1210 if (ctx->fpscr & FPSCR_PR) {
1211 break; /* illegal instruction */
1212 } else {
1213 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)],
1214 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)], cpu_fregs[FREG(B11_8)]);
1215 return;
1216 }
1217 }
1218 }
1219
1220 switch (ctx->opcode & 0xff00) {
1221 case 0xc900: /* and #imm,R0 */
1222 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1223 return;
1224 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1225 {
1226 TCGv addr, val;
1227 addr = tcg_temp_new();
1228 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1229 val = tcg_temp_new();
1230 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1231 tcg_gen_andi_i32(val, val, B7_0);
1232 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1233 tcg_temp_free(val);
1234 tcg_temp_free(addr);
1235 }
1236 return;
1237 case 0x8b00: /* bf label */
1238 CHECK_NOT_DELAY_SLOT
1239 gen_conditional_jump(ctx, ctx->pc + 2,
1240 ctx->pc + 4 + B7_0s * 2);
1241 ctx->bstate = BS_BRANCH;
1242 return;
1243 case 0x8f00: /* bf/s label */
1244 CHECK_NOT_DELAY_SLOT
1245 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1246 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1247 return;
1248 case 0x8900: /* bt label */
1249 CHECK_NOT_DELAY_SLOT
1250 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1251 ctx->pc + 2);
1252 ctx->bstate = BS_BRANCH;
1253 return;
1254 case 0x8d00: /* bt/s label */
1255 CHECK_NOT_DELAY_SLOT
1256 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1257 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1258 return;
1259 case 0x8800: /* cmp/eq #imm,R0 */
1260 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1261 return;
1262 case 0xc400: /* mov.b @(disp,GBR),R0 */
1263 {
1264 TCGv addr = tcg_temp_new();
1265 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1266 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1267 tcg_temp_free(addr);
1268 }
1269 return;
1270 case 0xc500: /* mov.w @(disp,GBR),R0 */
1271 {
1272 TCGv addr = tcg_temp_new();
1273 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1274 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1275 tcg_temp_free(addr);
1276 }
1277 return;
1278 case 0xc600: /* mov.l @(disp,GBR),R0 */
1279 {
1280 TCGv addr = tcg_temp_new();
1281 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1282 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1283 tcg_temp_free(addr);
1284 }
1285 return;
1286 case 0xc000: /* mov.b R0,@(disp,GBR) */
1287 {
1288 TCGv addr = tcg_temp_new();
1289 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1290 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1291 tcg_temp_free(addr);
1292 }
1293 return;
1294 case 0xc100: /* mov.w R0,@(disp,GBR) */
1295 {
1296 TCGv addr = tcg_temp_new();
1297 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1298 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1299 tcg_temp_free(addr);
1300 }
1301 return;
1302 case 0xc200: /* mov.l R0,@(disp,GBR) */
1303 {
1304 TCGv addr = tcg_temp_new();
1305 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1306 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1307 tcg_temp_free(addr);
1308 }
1309 return;
1310 case 0x8000: /* mov.b R0,@(disp,Rn) */
1311 {
1312 TCGv addr = tcg_temp_new();
1313 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1314 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1315 tcg_temp_free(addr);
1316 }
1317 return;
1318 case 0x8100: /* mov.w R0,@(disp,Rn) */
1319 {
1320 TCGv addr = tcg_temp_new();
1321 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1322 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1323 tcg_temp_free(addr);
1324 }
1325 return;
1326 case 0x8400: /* mov.b @(disp,Rn),R0 */
1327 {
1328 TCGv addr = tcg_temp_new();
1329 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1330 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1331 tcg_temp_free(addr);
1332 }
1333 return;
1334 case 0x8500: /* mov.w @(disp,Rn),R0 */
1335 {
1336 TCGv addr = tcg_temp_new();
1337 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1338 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1339 tcg_temp_free(addr);
1340 }
1341 return;
1342 case 0xc700: /* mova @(disp,PC),R0 */
1343 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1344 return;
1345 case 0xcb00: /* or #imm,R0 */
1346 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1347 return;
1348 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1349 {
1350 TCGv addr, val;
1351 addr = tcg_temp_new();
1352 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1353 val = tcg_temp_new();
1354 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1355 tcg_gen_ori_i32(val, val, B7_0);
1356 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1357 tcg_temp_free(val);
1358 tcg_temp_free(addr);
1359 }
1360 return;
1361 case 0xc300: /* trapa #imm */
1362 {
1363 TCGv imm;
1364 CHECK_NOT_DELAY_SLOT
1365 imm = tcg_const_i32(B7_0);
1366 gen_helper_trapa(imm);
1367 tcg_temp_free(imm);
1368 ctx->bstate = BS_BRANCH;
1369 }
1370 return;
1371 case 0xc800: /* tst #imm,R0 */
1372 {
1373 TCGv val = tcg_temp_new();
1374 tcg_gen_andi_i32(val, REG(0), B7_0);
1375 gen_cmp_imm(TCG_COND_EQ, val, 0);
1376 tcg_temp_free(val);
1377 }
1378 return;
1379 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1380 {
1381 TCGv val = tcg_temp_new();
1382 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1383 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1384 tcg_gen_andi_i32(val, val, B7_0);
1385 gen_cmp_imm(TCG_COND_EQ, val, 0);
1386 tcg_temp_free(val);
1387 }
1388 return;
1389 case 0xca00: /* xor #imm,R0 */
1390 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1391 return;
1392 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1393 {
1394 TCGv addr, val;
1395 addr = tcg_temp_new();
1396 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1397 val = tcg_temp_new();
1398 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1399 tcg_gen_xori_i32(val, val, B7_0);
1400 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1401 tcg_temp_free(val);
1402 tcg_temp_free(addr);
1403 }
1404 return;
1405 }
1406
1407 switch (ctx->opcode & 0xf08f) {
1408 case 0x408e: /* ldc Rm,Rn_BANK */
1409 CHECK_PRIVILEGED
1410 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1411 return;
1412 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1413 CHECK_PRIVILEGED
1414 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1415 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1416 return;
1417 case 0x0082: /* stc Rm_BANK,Rn */
1418 CHECK_PRIVILEGED
1419 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1420 return;
1421 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1422 CHECK_PRIVILEGED
1423 {
1424 TCGv addr = tcg_temp_new();
1425 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1426 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1427 tcg_gen_mov_i32(REG(B11_8), addr);
1428 tcg_temp_free(addr);
1429 }
1430 return;
1431 }
1432
1433 switch (ctx->opcode & 0xf0ff) {
1434 case 0x0023: /* braf Rn */
1435 CHECK_NOT_DELAY_SLOT
1436 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1437 ctx->flags |= DELAY_SLOT;
1438 ctx->delayed_pc = (uint32_t) - 1;
1439 return;
1440 case 0x0003: /* bsrf Rn */
1441 CHECK_NOT_DELAY_SLOT
1442 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1443 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1444 ctx->flags |= DELAY_SLOT;
1445 ctx->delayed_pc = (uint32_t) - 1;
1446 return;
1447 case 0x4015: /* cmp/pl Rn */
1448 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1449 return;
1450 case 0x4011: /* cmp/pz Rn */
1451 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1452 return;
1453 case 0x4010: /* dt Rn */
1454 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1455 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1456 return;
1457 case 0x402b: /* jmp @Rn */
1458 CHECK_NOT_DELAY_SLOT
1459 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1460 ctx->flags |= DELAY_SLOT;
1461 ctx->delayed_pc = (uint32_t) - 1;
1462 return;
1463 case 0x400b: /* jsr @Rn */
1464 CHECK_NOT_DELAY_SLOT
1465 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1466 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1467 ctx->flags |= DELAY_SLOT;
1468 ctx->delayed_pc = (uint32_t) - 1;
1469 return;
1470 case 0x400e: /* ldc Rm,SR */
1471 CHECK_PRIVILEGED
1472 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1473 ctx->bstate = BS_STOP;
1474 return;
1475 case 0x4007: /* ldc.l @Rm+,SR */
1476 CHECK_PRIVILEGED
1477 {
1478 TCGv val = tcg_temp_new();
1479 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1480 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1481 tcg_temp_free(val);
1482 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1483 ctx->bstate = BS_STOP;
1484 }
1485 return;
1486 case 0x0002: /* stc SR,Rn */
1487 CHECK_PRIVILEGED
1488 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1489 return;
1490 case 0x4003: /* stc SR,@-Rn */
1491 CHECK_PRIVILEGED
1492 {
1493 TCGv addr = tcg_temp_new();
1494 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1495 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1496 tcg_gen_mov_i32(REG(B11_8), addr);
1497 tcg_temp_free(addr);
1498 }
1499 return;
1500 #define LD(reg,ldnum,ldpnum,prechk) \
1501 case ldnum: \
1502 prechk \
1503 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1504 return; \
1505 case ldpnum: \
1506 prechk \
1507 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1508 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1509 return;
1510 #define ST(reg,stnum,stpnum,prechk) \
1511 case stnum: \
1512 prechk \
1513 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1514 return; \
1515 case stpnum: \
1516 prechk \
1517 { \
1518 TCGv addr = tcg_temp_new(); \
1519 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1520 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1521 tcg_gen_mov_i32(REG(B11_8), addr); \
1522 tcg_temp_free(addr); \
1523 } \
1524 return;
1525 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1526 LD(reg,ldnum,ldpnum,prechk) \
1527 ST(reg,stnum,stpnum,prechk)
1528 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1529 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1530 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1531 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1532 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1533 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1534 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1535 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1536 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1537 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1538 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1539 case 0x406a: /* lds Rm,FPSCR */
1540 CHECK_FPU_ENABLED
1541 gen_helper_ld_fpscr(REG(B11_8));
1542 ctx->bstate = BS_STOP;
1543 return;
1544 case 0x4066: /* lds.l @Rm+,FPSCR */
1545 CHECK_FPU_ENABLED
1546 {
1547 TCGv addr = tcg_temp_new();
1548 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1549 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1550 gen_helper_ld_fpscr(addr);
1551 tcg_temp_free(addr);
1552 ctx->bstate = BS_STOP;
1553 }
1554 return;
1555 case 0x006a: /* sts FPSCR,Rn */
1556 CHECK_FPU_ENABLED
1557 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1558 return;
1559 case 0x4062: /* sts FPSCR,@-Rn */
1560 CHECK_FPU_ENABLED
1561 {
1562 TCGv addr, val;
1563 val = tcg_temp_new();
1564 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1565 addr = tcg_temp_new();
1566 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1567 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1568 tcg_gen_mov_i32(REG(B11_8), addr);
1569 tcg_temp_free(addr);
1570 tcg_temp_free(val);
1571 }
1572 return;
1573 case 0x00c3: /* movca.l R0,@Rm */
1574 {
1575 TCGv val = tcg_temp_new();
1576 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1577 gen_helper_movcal (REG(B11_8), val);
1578 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1579 }
1580 ctx->has_movcal = 1;
1581 return;
1582 case 0x40a9:
1583 /* MOVUA.L @Rm,R0 (Rm) -> R0
1584 Load non-boundary-aligned data */
1585 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1586 return;
1587 case 0x40e9:
1588 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1589 Load non-boundary-aligned data */
1590 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1591 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1592 return;
1593 case 0x0029: /* movt Rn */
1594 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1595 return;
1596 case 0x0073:
1597 /* MOVCO.L
1598 LDST -> T
1599 If (T == 1) R0 -> (Rn)
1600 0 -> LDST
1601 */
1602 if (ctx->features & SH_FEATURE_SH4A) {
1603 int label = gen_new_label();
1604 gen_clr_t();
1605 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1606 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1607 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1608 gen_set_label(label);
1609 tcg_gen_movi_i32(cpu_ldst, 0);
1610 return;
1611 } else
1612 break;
1613 case 0x0063:
1614 /* MOVLI.L @Rm,R0
1615 1 -> LDST
1616 (Rm) -> R0
1617 When interrupt/exception
1618 occurred 0 -> LDST
1619 */
1620 if (ctx->features & SH_FEATURE_SH4A) {
1621 tcg_gen_movi_i32(cpu_ldst, 0);
1622 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1623 tcg_gen_movi_i32(cpu_ldst, 1);
1624 return;
1625 } else
1626 break;
1627 case 0x0093: /* ocbi @Rn */
1628 {
1629 gen_helper_ocbi (REG(B11_8));
1630 }
1631 return;
1632 case 0x00a3: /* ocbp @Rn */
1633 case 0x00b3: /* ocbwb @Rn */
1634 /* These instructions are supposed to do nothing in case of
1635 a cache miss. Given that we only partially emulate caches
1636 it is safe to simply ignore them. */
1637 return;
1638 case 0x0083: /* pref @Rn */
1639 return;
1640 case 0x00d3: /* prefi @Rn */
1641 if (ctx->features & SH_FEATURE_SH4A)
1642 return;
1643 else
1644 break;
1645 case 0x00e3: /* icbi @Rn */
1646 if (ctx->features & SH_FEATURE_SH4A)
1647 return;
1648 else
1649 break;
1650 case 0x00ab: /* synco */
1651 if (ctx->features & SH_FEATURE_SH4A)
1652 return;
1653 else
1654 break;
1655 case 0x4024: /* rotcl Rn */
1656 {
1657 TCGv tmp = tcg_temp_new();
1658 tcg_gen_mov_i32(tmp, cpu_sr);
1659 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1660 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1661 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1662 tcg_temp_free(tmp);
1663 }
1664 return;
1665 case 0x4025: /* rotcr Rn */
1666 {
1667 TCGv tmp = tcg_temp_new();
1668 tcg_gen_mov_i32(tmp, cpu_sr);
1669 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1670 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1671 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1672 tcg_temp_free(tmp);
1673 }
1674 return;
1675 case 0x4004: /* rotl Rn */
1676 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1677 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1678 return;
1679 case 0x4005: /* rotr Rn */
1680 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1681 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1682 return;
1683 case 0x4000: /* shll Rn */
1684 case 0x4020: /* shal Rn */
1685 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1686 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1687 return;
1688 case 0x4021: /* shar Rn */
1689 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1690 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1691 return;
1692 case 0x4001: /* shlr Rn */
1693 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1694 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1695 return;
1696 case 0x4008: /* shll2 Rn */
1697 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1698 return;
1699 case 0x4018: /* shll8 Rn */
1700 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1701 return;
1702 case 0x4028: /* shll16 Rn */
1703 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1704 return;
1705 case 0x4009: /* shlr2 Rn */
1706 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1707 return;
1708 case 0x4019: /* shlr8 Rn */
1709 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1710 return;
1711 case 0x4029: /* shlr16 Rn */
1712 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1713 return;
1714 case 0x401b: /* tas.b @Rn */
1715 {
1716 TCGv addr, val;
1717 addr = tcg_temp_local_new();
1718 tcg_gen_mov_i32(addr, REG(B11_8));
1719 val = tcg_temp_local_new();
1720 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1721 gen_cmp_imm(TCG_COND_EQ, val, 0);
1722 tcg_gen_ori_i32(val, val, 0x80);
1723 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1724 tcg_temp_free(val);
1725 tcg_temp_free(addr);
1726 }
1727 return;
1728 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1729 CHECK_FPU_ENABLED
1730 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1731 return;
1732 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1733 CHECK_FPU_ENABLED
1734 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1735 return;
1736 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1737 CHECK_FPU_ENABLED
1738 if (ctx->fpscr & FPSCR_PR) {
1739 TCGv_i64 fp;
1740 if (ctx->opcode & 0x0100)
1741 break; /* illegal instruction */
1742 fp = tcg_temp_new_i64();
1743 gen_helper_float_DT(fp, cpu_fpul);
1744 gen_store_fpr64(fp, DREG(B11_8));
1745 tcg_temp_free_i64(fp);
1746 }
1747 else {
1748 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_fpul);
1749 }
1750 return;
1751 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1752 CHECK_FPU_ENABLED
1753 if (ctx->fpscr & FPSCR_PR) {
1754 TCGv_i64 fp;
1755 if (ctx->opcode & 0x0100)
1756 break; /* illegal instruction */
1757 fp = tcg_temp_new_i64();
1758 gen_load_fpr64(fp, DREG(B11_8));
1759 gen_helper_ftrc_DT(cpu_fpul, fp);
1760 tcg_temp_free_i64(fp);
1761 }
1762 else {
1763 gen_helper_ftrc_FT(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1764 }
1765 return;
1766 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1767 CHECK_FPU_ENABLED
1768 {
1769 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1770 }
1771 return;
1772 case 0xf05d: /* fabs FRn/DRn */
1773 CHECK_FPU_ENABLED
1774 if (ctx->fpscr & FPSCR_PR) {
1775 if (ctx->opcode & 0x0100)
1776 break; /* illegal instruction */
1777 TCGv_i64 fp = tcg_temp_new_i64();
1778 gen_load_fpr64(fp, DREG(B11_8));
1779 gen_helper_fabs_DT(fp, fp);
1780 gen_store_fpr64(fp, DREG(B11_8));
1781 tcg_temp_free_i64(fp);
1782 } else {
1783 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1784 }
1785 return;
1786 case 0xf06d: /* fsqrt FRn */
1787 CHECK_FPU_ENABLED
1788 if (ctx->fpscr & FPSCR_PR) {
1789 if (ctx->opcode & 0x0100)
1790 break; /* illegal instruction */
1791 TCGv_i64 fp = tcg_temp_new_i64();
1792 gen_load_fpr64(fp, DREG(B11_8));
1793 gen_helper_fsqrt_DT(fp, fp);
1794 gen_store_fpr64(fp, DREG(B11_8));
1795 tcg_temp_free_i64(fp);
1796 } else {
1797 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1798 }
1799 return;
1800 case 0xf07d: /* fsrra FRn */
1801 CHECK_FPU_ENABLED
1802 break;
1803 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1804 CHECK_FPU_ENABLED
1805 if (!(ctx->fpscr & FPSCR_PR)) {
1806 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1807 }
1808 return;
1809 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1810 CHECK_FPU_ENABLED
1811 if (!(ctx->fpscr & FPSCR_PR)) {
1812 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1813 }
1814 return;
1815 case 0xf0ad: /* fcnvsd FPUL,DRn */
1816 CHECK_FPU_ENABLED
1817 {
1818 TCGv_i64 fp = tcg_temp_new_i64();
1819 gen_helper_fcnvsd_FT_DT(fp, cpu_fpul);
1820 gen_store_fpr64(fp, DREG(B11_8));
1821 tcg_temp_free_i64(fp);
1822 }
1823 return;
1824 case 0xf0bd: /* fcnvds DRn,FPUL */
1825 CHECK_FPU_ENABLED
1826 {
1827 TCGv_i64 fp = tcg_temp_new_i64();
1828 gen_load_fpr64(fp, DREG(B11_8));
1829 gen_helper_fcnvds_DT_FT(cpu_fpul, fp);
1830 tcg_temp_free_i64(fp);
1831 }
1832 return;
1833 case 0xf0ed: /* fipr FVm,FVn */
1834 CHECK_FPU_ENABLED
1835 if ((ctx->fpscr & FPSCR_PR) == 0) {
1836 TCGv m, n;
1837 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1838 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1839 gen_helper_fipr(m, n);
1840 tcg_temp_free(m);
1841 tcg_temp_free(n);
1842 return;
1843 }
1844 break;
1845 case 0xf0fd: /* ftrv XMTRX,FVn */
1846 CHECK_FPU_ENABLED
1847 if ((ctx->opcode & 0x0300) == 0x0100 &&
1848 (ctx->fpscr & FPSCR_PR) == 0) {
1849 TCGv n;
1850 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1851 gen_helper_ftrv(n);
1852 tcg_temp_free(n);
1853 return;
1854 }
1855 break;
1856 }
1857 #if 0
1858 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1859 ctx->opcode, ctx->pc);
1860 fflush(stderr);
1861 #endif
1862 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1863 gen_helper_raise_slot_illegal_instruction();
1864 } else {
1865 gen_helper_raise_illegal_instruction();
1866 }
1867 ctx->bstate = BS_EXCP;
1868 }
1869
1870 static void decode_opc(DisasContext * ctx)
1871 {
1872 uint32_t old_flags = ctx->flags;
1873
1874 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1875 tcg_gen_debug_insn_start(ctx->pc);
1876 }
1877
1878 _decode_opc(ctx);
1879
1880 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1881 if (ctx->flags & DELAY_SLOT_CLEARME) {
1882 gen_store_flags(0);
1883 } else {
1884 /* go out of the delay slot */
1885 uint32_t new_flags = ctx->flags;
1886 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1887 gen_store_flags(new_flags);
1888 }
1889 ctx->flags = 0;
1890 ctx->bstate = BS_BRANCH;
1891 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1892 gen_delayed_conditional_jump(ctx);
1893 } else if (old_flags & DELAY_SLOT) {
1894 gen_jump(ctx);
1895 }
1896
1897 }
1898
1899 /* go into a delay slot */
1900 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1901 gen_store_flags(ctx->flags);
1902 }
1903
1904 static inline void
1905 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1906 int search_pc)
1907 {
1908 DisasContext ctx;
1909 target_ulong pc_start;
1910 static uint16_t *gen_opc_end;
1911 CPUBreakpoint *bp;
1912 int i, ii;
1913 int num_insns;
1914 int max_insns;
1915
1916 pc_start = tb->pc;
1917 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1918 ctx.pc = pc_start;
1919 ctx.flags = (uint32_t)tb->flags;
1920 ctx.bstate = BS_NONE;
1921 ctx.sr = env->sr;
1922 ctx.fpscr = env->fpscr;
1923 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1924 /* We don't know if the delayed pc came from a dynamic or static branch,
1925 so assume it is a dynamic branch. */
1926 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1927 ctx.tb = tb;
1928 ctx.singlestep_enabled = env->singlestep_enabled;
1929 ctx.features = env->features;
1930 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1931
1932 ii = -1;
1933 num_insns = 0;
1934 max_insns = tb->cflags & CF_COUNT_MASK;
1935 if (max_insns == 0)
1936 max_insns = CF_COUNT_MASK;
1937 gen_icount_start();
1938 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1939 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1940 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1941 if (ctx.pc == bp->pc) {
1942 /* We have hit a breakpoint - make sure PC is up-to-date */
1943 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1944 gen_helper_debug();
1945 ctx.bstate = BS_EXCP;
1946 break;
1947 }
1948 }
1949 }
1950 if (search_pc) {
1951 i = gen_opc_ptr - gen_opc_buf;
1952 if (ii < i) {
1953 ii++;
1954 while (ii < i)
1955 gen_opc_instr_start[ii++] = 0;
1956 }
1957 gen_opc_pc[ii] = ctx.pc;
1958 gen_opc_hflags[ii] = ctx.flags;
1959 gen_opc_instr_start[ii] = 1;
1960 gen_opc_icount[ii] = num_insns;
1961 }
1962 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1963 gen_io_start();
1964 #if 0
1965 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1966 fflush(stderr);
1967 #endif
1968 ctx.opcode = lduw_code(ctx.pc);
1969 decode_opc(&ctx);
1970 num_insns++;
1971 ctx.pc += 2;
1972 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1973 break;
1974 if (env->singlestep_enabled)
1975 break;
1976 if (num_insns >= max_insns)
1977 break;
1978 if (singlestep)
1979 break;
1980 }
1981 if (tb->cflags & CF_LAST_IO)
1982 gen_io_end();
1983 if (env->singlestep_enabled) {
1984 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1985 gen_helper_debug();
1986 } else {
1987 switch (ctx.bstate) {
1988 case BS_STOP:
1989 /* gen_op_interrupt_restart(); */
1990 /* fall through */
1991 case BS_NONE:
1992 if (ctx.flags) {
1993 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1994 }
1995 gen_goto_tb(&ctx, 0, ctx.pc);
1996 break;
1997 case BS_EXCP:
1998 /* gen_op_interrupt_restart(); */
1999 tcg_gen_exit_tb(0);
2000 break;
2001 case BS_BRANCH:
2002 default:
2003 break;
2004 }
2005 }
2006
2007 gen_icount_end(tb, num_insns);
2008 *gen_opc_ptr = INDEX_op_end;
2009 if (search_pc) {
2010 i = gen_opc_ptr - gen_opc_buf;
2011 ii++;
2012 while (ii <= i)
2013 gen_opc_instr_start[ii++] = 0;
2014 } else {
2015 tb->size = ctx.pc - pc_start;
2016 tb->icount = num_insns;
2017 }
2018
2019 #ifdef DEBUG_DISAS
2020 #ifdef SH4_DEBUG_DISAS
2021 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2022 #endif
2023 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2024 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2025 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2026 qemu_log("\n");
2027 }
2028 #endif
2029 }
2030
2031 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2032 {
2033 gen_intermediate_code_internal(env, tb, 0);
2034 }
2035
2036 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2037 {
2038 gen_intermediate_code_internal(env, tb, 1);
2039 }
2040
2041 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2042 {
2043 env->pc = gen_opc_pc[pc_pos];
2044 env->flags = gen_opc_hflags[pc_pos];
2045 }