]> git.proxmox.com Git - qemu.git/blob - target-sh4/translate.c
Merge remote-tracking branch 'kwolf/for-anthony' into staging
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 #define SH4_DEBUG_DISAS
22 //#define SH4_SINGLE_STEP
23
24 #include "cpu.h"
25 #include "disas.h"
26 #include "tcg-op.h"
27
28 #include "helper.h"
29 #define GEN_HELPER 1
30 #include "helper.h"
31
32 typedef struct DisasContext {
33 struct TranslationBlock *tb;
34 target_ulong pc;
35 uint32_t sr;
36 uint32_t fpscr;
37 uint16_t opcode;
38 uint32_t flags;
39 int bstate;
40 int memidx;
41 uint32_t delayed_pc;
42 int singlestep_enabled;
43 uint32_t features;
44 int has_movcal;
45 } DisasContext;
46
47 #if defined(CONFIG_USER_ONLY)
48 #define IS_USER(ctx) 1
49 #else
50 #define IS_USER(ctx) (!(ctx->sr & SR_MD))
51 #endif
52
53 enum {
54 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
55 * exception condition
56 */
57 BS_STOP = 1, /* We want to stop translation for any reason */
58 BS_BRANCH = 2, /* We reached a branch condition */
59 BS_EXCP = 3, /* We reached an exception condition */
60 };
61
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_gregs[24];
65 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
66 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
67 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
68 static TCGv cpu_fregs[32];
69
70 /* internal register indexes */
71 static TCGv cpu_flags, cpu_delayed_pc;
72
73 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
74
75 #include "gen-icount.h"
76
77 static void sh4_translate_init(void)
78 {
79 int i;
80 static int done_init = 0;
81 static const char * const gregnames[24] = {
82 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
83 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
84 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
85 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
86 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
87 };
88 static const char * const fregnames[32] = {
89 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
90 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
91 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
92 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
93 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
94 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
95 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
96 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
97 };
98
99 if (done_init)
100 return;
101
102 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
103
104 for (i = 0; i < 24; i++)
105 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, gregs[i]),
107 gregnames[i]);
108
109 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, pc), "PC");
111 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, sr), "SR");
113 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, ssr), "SSR");
115 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, spc), "SPC");
117 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, gbr), "GBR");
119 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, vbr), "VBR");
121 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, sgr), "SGR");
123 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, dbr), "DBR");
125 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, mach), "MACH");
127 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, macl), "MACL");
129 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, pr), "PR");
131 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
132 offsetof(CPUSH4State, fpscr), "FPSCR");
133 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, fpul), "FPUL");
135
136 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
137 offsetof(CPUSH4State, flags), "_flags_");
138 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, delayed_pc),
140 "_delayed_pc_");
141 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, ldst), "_ldst_");
143
144 for (i = 0; i < 32; i++)
145 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
146 offsetof(CPUSH4State, fregs[i]),
147 fregnames[i]);
148
149 /* register helpers */
150 #define GEN_HELPER 2
151 #include "helper.h"
152
153 done_init = 1;
154 }
155
156 void cpu_dump_state(CPUSH4State * env, FILE * f,
157 int (*cpu_fprintf) (FILE * f, const char *fmt, ...),
158 int flags)
159 {
160 int i;
161 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
162 env->pc, env->sr, env->pr, env->fpscr);
163 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
164 env->spc, env->ssr, env->gbr, env->vbr);
165 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
166 env->sgr, env->dbr, env->delayed_pc, env->fpul);
167 for (i = 0; i < 24; i += 4) {
168 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
169 i, env->gregs[i], i + 1, env->gregs[i + 1],
170 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
171 }
172 if (env->flags & DELAY_SLOT) {
173 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
174 env->delayed_pc);
175 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
176 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
177 env->delayed_pc);
178 }
179 }
180
181 typedef struct {
182 const char *name;
183 int id;
184 uint32_t pvr;
185 uint32_t prr;
186 uint32_t cvr;
187 uint32_t features;
188 } sh4_def_t;
189
190 static sh4_def_t sh4_defs[] = {
191 {
192 .name = "SH7750R",
193 .id = SH_CPU_SH7750R,
194 .pvr = 0x00050000,
195 .prr = 0x00000100,
196 .cvr = 0x00110000,
197 .features = SH_FEATURE_BCR3_AND_BCR4,
198 }, {
199 .name = "SH7751R",
200 .id = SH_CPU_SH7751R,
201 .pvr = 0x04050005,
202 .prr = 0x00000113,
203 .cvr = 0x00110000, /* Neutered caches, should be 0x20480000 */
204 .features = SH_FEATURE_BCR3_AND_BCR4,
205 }, {
206 .name = "SH7785",
207 .id = SH_CPU_SH7785,
208 .pvr = 0x10300700,
209 .prr = 0x00000200,
210 .cvr = 0x71440211,
211 .features = SH_FEATURE_SH4A,
212 },
213 };
214
215 static const sh4_def_t *cpu_sh4_find_by_name(const char *name)
216 {
217 int i;
218
219 if (strcasecmp(name, "any") == 0)
220 return &sh4_defs[0];
221
222 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
223 if (strcasecmp(name, sh4_defs[i].name) == 0)
224 return &sh4_defs[i];
225
226 return NULL;
227 }
228
229 void sh4_cpu_list(FILE *f, fprintf_function cpu_fprintf)
230 {
231 int i;
232
233 for (i = 0; i < ARRAY_SIZE(sh4_defs); i++)
234 (*cpu_fprintf)(f, "%s\n", sh4_defs[i].name);
235 }
236
237 static void cpu_register(CPUSH4State *env, const sh4_def_t *def)
238 {
239 env->pvr = def->pvr;
240 env->prr = def->prr;
241 env->cvr = def->cvr;
242 env->id = def->id;
243 }
244
245 SuperHCPU *cpu_sh4_init(const char *cpu_model)
246 {
247 SuperHCPU *cpu;
248 CPUSH4State *env;
249 const sh4_def_t *def;
250
251 def = cpu_sh4_find_by_name(cpu_model);
252 if (!def)
253 return NULL;
254 cpu = SUPERH_CPU(object_new(TYPE_SUPERH_CPU));
255 env = &cpu->env;
256 env->features = def->features;
257 sh4_translate_init();
258 env->cpu_model_str = cpu_model;
259 cpu_reset(CPU(cpu));
260 cpu_register(env, def);
261 qemu_init_vcpu(env);
262 return cpu;
263 }
264
265 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
266 {
267 TranslationBlock *tb;
268 tb = ctx->tb;
269
270 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
271 !ctx->singlestep_enabled) {
272 /* Use a direct jump if in same page and singlestep not enabled */
273 tcg_gen_goto_tb(n);
274 tcg_gen_movi_i32(cpu_pc, dest);
275 tcg_gen_exit_tb((tcg_target_long)tb + n);
276 } else {
277 tcg_gen_movi_i32(cpu_pc, dest);
278 if (ctx->singlestep_enabled)
279 gen_helper_debug(cpu_env);
280 tcg_gen_exit_tb(0);
281 }
282 }
283
284 static void gen_jump(DisasContext * ctx)
285 {
286 if (ctx->delayed_pc == (uint32_t) - 1) {
287 /* Target is not statically known, it comes necessarily from a
288 delayed jump as immediate jump are conditinal jumps */
289 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
290 if (ctx->singlestep_enabled)
291 gen_helper_debug(cpu_env);
292 tcg_gen_exit_tb(0);
293 } else {
294 gen_goto_tb(ctx, 0, ctx->delayed_pc);
295 }
296 }
297
298 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
299 {
300 TCGv sr;
301 int label = gen_new_label();
302 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
303 sr = tcg_temp_new();
304 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
305 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
306 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
307 gen_set_label(label);
308 }
309
310 /* Immediate conditional jump (bt or bf) */
311 static void gen_conditional_jump(DisasContext * ctx,
312 target_ulong ift, target_ulong ifnott)
313 {
314 int l1;
315 TCGv sr;
316
317 l1 = gen_new_label();
318 sr = tcg_temp_new();
319 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
320 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
321 gen_goto_tb(ctx, 0, ifnott);
322 gen_set_label(l1);
323 gen_goto_tb(ctx, 1, ift);
324 }
325
326 /* Delayed conditional jump (bt or bf) */
327 static void gen_delayed_conditional_jump(DisasContext * ctx)
328 {
329 int l1;
330 TCGv ds;
331
332 l1 = gen_new_label();
333 ds = tcg_temp_new();
334 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
335 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
336 gen_goto_tb(ctx, 1, ctx->pc + 2);
337 gen_set_label(l1);
338 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
339 gen_jump(ctx);
340 }
341
342 static inline void gen_set_t(void)
343 {
344 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
345 }
346
347 static inline void gen_clr_t(void)
348 {
349 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
350 }
351
352 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
353 {
354 TCGv t;
355
356 t = tcg_temp_new();
357 tcg_gen_setcond_i32(cond, t, t1, t0);
358 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
359 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
360
361 tcg_temp_free(t);
362 }
363
364 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
365 {
366 TCGv t;
367
368 t = tcg_temp_new();
369 tcg_gen_setcondi_i32(cond, t, t0, imm);
370 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
371 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
372
373 tcg_temp_free(t);
374 }
375
376 static inline void gen_store_flags(uint32_t flags)
377 {
378 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
379 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
380 }
381
382 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
383 {
384 TCGv tmp = tcg_temp_new();
385
386 p0 &= 0x1f;
387 p1 &= 0x1f;
388
389 tcg_gen_andi_i32(tmp, t1, (1 << p1));
390 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
391 if (p0 < p1)
392 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
393 else if (p0 > p1)
394 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
395 tcg_gen_or_i32(t0, t0, tmp);
396
397 tcg_temp_free(tmp);
398 }
399
400 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
401 {
402 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
403 }
404
405 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
406 {
407 TCGv_i32 tmp = tcg_temp_new_i32();
408 tcg_gen_trunc_i64_i32(tmp, t);
409 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
410 tcg_gen_shri_i64(t, t, 32);
411 tcg_gen_trunc_i64_i32(tmp, t);
412 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
413 tcg_temp_free_i32(tmp);
414 }
415
416 #define B3_0 (ctx->opcode & 0xf)
417 #define B6_4 ((ctx->opcode >> 4) & 0x7)
418 #define B7_4 ((ctx->opcode >> 4) & 0xf)
419 #define B7_0 (ctx->opcode & 0xff)
420 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
421 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
422 (ctx->opcode & 0xfff))
423 #define B11_8 ((ctx->opcode >> 8) & 0xf)
424 #define B15_12 ((ctx->opcode >> 12) & 0xf)
425
426 #define REG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB) ? \
427 (cpu_gregs[x + 16]) : (cpu_gregs[x]))
428
429 #define ALTREG(x) ((x) < 8 && (ctx->sr & (SR_MD | SR_RB)) != (SR_MD | SR_RB) \
430 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
431
432 #define FREG(x) (ctx->fpscr & FPSCR_FR ? (x) ^ 0x10 : (x))
433 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
434 #define XREG(x) (ctx->fpscr & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
435 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
436
437 #define CHECK_NOT_DELAY_SLOT \
438 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
439 { \
440 gen_helper_raise_slot_illegal_instruction(cpu_env); \
441 ctx->bstate = BS_EXCP; \
442 return; \
443 }
444
445 #define CHECK_PRIVILEGED \
446 if (IS_USER(ctx)) { \
447 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
448 gen_helper_raise_slot_illegal_instruction(cpu_env); \
449 } else { \
450 gen_helper_raise_illegal_instruction(cpu_env); \
451 } \
452 ctx->bstate = BS_EXCP; \
453 return; \
454 }
455
456 #define CHECK_FPU_ENABLED \
457 if (ctx->flags & SR_FD) { \
458 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
459 gen_helper_raise_slot_fpu_disable(cpu_env); \
460 } else { \
461 gen_helper_raise_fpu_disable(cpu_env); \
462 } \
463 ctx->bstate = BS_EXCP; \
464 return; \
465 }
466
467 static void _decode_opc(DisasContext * ctx)
468 {
469 /* This code tries to make movcal emulation sufficiently
470 accurate for Linux purposes. This instruction writes
471 memory, and prior to that, always allocates a cache line.
472 It is used in two contexts:
473 - in memcpy, where data is copied in blocks, the first write
474 of to a block uses movca.l for performance.
475 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
476 to flush the cache. Here, the data written by movcal.l is never
477 written to memory, and the data written is just bogus.
478
479 To simulate this, we simulate movcal.l, we store the value to memory,
480 but we also remember the previous content. If we see ocbi, we check
481 if movcal.l for that address was done previously. If so, the write should
482 not have hit the memory, so we restore the previous content.
483 When we see an instruction that is neither movca.l
484 nor ocbi, the previous content is discarded.
485
486 To optimize, we only try to flush stores when we're at the start of
487 TB, or if we already saw movca.l in this TB and did not flush stores
488 yet. */
489 if (ctx->has_movcal)
490 {
491 int opcode = ctx->opcode & 0xf0ff;
492 if (opcode != 0x0093 /* ocbi */
493 && opcode != 0x00c3 /* movca.l */)
494 {
495 gen_helper_discard_movcal_backup(cpu_env);
496 ctx->has_movcal = 0;
497 }
498 }
499
500 #if 0
501 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
502 #endif
503
504 switch (ctx->opcode) {
505 case 0x0019: /* div0u */
506 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
507 return;
508 case 0x000b: /* rts */
509 CHECK_NOT_DELAY_SLOT
510 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
511 ctx->flags |= DELAY_SLOT;
512 ctx->delayed_pc = (uint32_t) - 1;
513 return;
514 case 0x0028: /* clrmac */
515 tcg_gen_movi_i32(cpu_mach, 0);
516 tcg_gen_movi_i32(cpu_macl, 0);
517 return;
518 case 0x0048: /* clrs */
519 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
520 return;
521 case 0x0008: /* clrt */
522 gen_clr_t();
523 return;
524 case 0x0038: /* ldtlb */
525 CHECK_PRIVILEGED
526 gen_helper_ldtlb(cpu_env);
527 return;
528 case 0x002b: /* rte */
529 CHECK_PRIVILEGED
530 CHECK_NOT_DELAY_SLOT
531 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
532 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
533 ctx->flags |= DELAY_SLOT;
534 ctx->delayed_pc = (uint32_t) - 1;
535 return;
536 case 0x0058: /* sets */
537 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
538 return;
539 case 0x0018: /* sett */
540 gen_set_t();
541 return;
542 case 0xfbfd: /* frchg */
543 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
544 ctx->bstate = BS_STOP;
545 return;
546 case 0xf3fd: /* fschg */
547 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
548 ctx->bstate = BS_STOP;
549 return;
550 case 0x0009: /* nop */
551 return;
552 case 0x001b: /* sleep */
553 CHECK_PRIVILEGED
554 gen_helper_sleep(cpu_env, tcg_const_i32(ctx->pc + 2));
555 return;
556 }
557
558 switch (ctx->opcode & 0xf000) {
559 case 0x1000: /* mov.l Rm,@(disp,Rn) */
560 {
561 TCGv addr = tcg_temp_new();
562 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
563 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
564 tcg_temp_free(addr);
565 }
566 return;
567 case 0x5000: /* mov.l @(disp,Rm),Rn */
568 {
569 TCGv addr = tcg_temp_new();
570 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
571 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
572 tcg_temp_free(addr);
573 }
574 return;
575 case 0xe000: /* mov #imm,Rn */
576 tcg_gen_movi_i32(REG(B11_8), B7_0s);
577 return;
578 case 0x9000: /* mov.w @(disp,PC),Rn */
579 {
580 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
581 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
582 tcg_temp_free(addr);
583 }
584 return;
585 case 0xd000: /* mov.l @(disp,PC),Rn */
586 {
587 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
588 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
589 tcg_temp_free(addr);
590 }
591 return;
592 case 0x7000: /* add #imm,Rn */
593 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
594 return;
595 case 0xa000: /* bra disp */
596 CHECK_NOT_DELAY_SLOT
597 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
598 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
599 ctx->flags |= DELAY_SLOT;
600 return;
601 case 0xb000: /* bsr disp */
602 CHECK_NOT_DELAY_SLOT
603 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
604 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
605 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
606 ctx->flags |= DELAY_SLOT;
607 return;
608 }
609
610 switch (ctx->opcode & 0xf00f) {
611 case 0x6003: /* mov Rm,Rn */
612 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
613 return;
614 case 0x2000: /* mov.b Rm,@Rn */
615 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
616 return;
617 case 0x2001: /* mov.w Rm,@Rn */
618 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
619 return;
620 case 0x2002: /* mov.l Rm,@Rn */
621 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
622 return;
623 case 0x6000: /* mov.b @Rm,Rn */
624 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
625 return;
626 case 0x6001: /* mov.w @Rm,Rn */
627 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
628 return;
629 case 0x6002: /* mov.l @Rm,Rn */
630 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
631 return;
632 case 0x2004: /* mov.b Rm,@-Rn */
633 {
634 TCGv addr = tcg_temp_new();
635 tcg_gen_subi_i32(addr, REG(B11_8), 1);
636 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
637 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
638 tcg_temp_free(addr);
639 }
640 return;
641 case 0x2005: /* mov.w Rm,@-Rn */
642 {
643 TCGv addr = tcg_temp_new();
644 tcg_gen_subi_i32(addr, REG(B11_8), 2);
645 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
646 tcg_gen_mov_i32(REG(B11_8), addr);
647 tcg_temp_free(addr);
648 }
649 return;
650 case 0x2006: /* mov.l Rm,@-Rn */
651 {
652 TCGv addr = tcg_temp_new();
653 tcg_gen_subi_i32(addr, REG(B11_8), 4);
654 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
655 tcg_gen_mov_i32(REG(B11_8), addr);
656 }
657 return;
658 case 0x6004: /* mov.b @Rm+,Rn */
659 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
660 if ( B11_8 != B7_4 )
661 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
662 return;
663 case 0x6005: /* mov.w @Rm+,Rn */
664 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
665 if ( B11_8 != B7_4 )
666 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
667 return;
668 case 0x6006: /* mov.l @Rm+,Rn */
669 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
670 if ( B11_8 != B7_4 )
671 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
672 return;
673 case 0x0004: /* mov.b Rm,@(R0,Rn) */
674 {
675 TCGv addr = tcg_temp_new();
676 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
677 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
678 tcg_temp_free(addr);
679 }
680 return;
681 case 0x0005: /* mov.w Rm,@(R0,Rn) */
682 {
683 TCGv addr = tcg_temp_new();
684 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
685 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
686 tcg_temp_free(addr);
687 }
688 return;
689 case 0x0006: /* mov.l Rm,@(R0,Rn) */
690 {
691 TCGv addr = tcg_temp_new();
692 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
693 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
694 tcg_temp_free(addr);
695 }
696 return;
697 case 0x000c: /* mov.b @(R0,Rm),Rn */
698 {
699 TCGv addr = tcg_temp_new();
700 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
701 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
702 tcg_temp_free(addr);
703 }
704 return;
705 case 0x000d: /* mov.w @(R0,Rm),Rn */
706 {
707 TCGv addr = tcg_temp_new();
708 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
709 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
710 tcg_temp_free(addr);
711 }
712 return;
713 case 0x000e: /* mov.l @(R0,Rm),Rn */
714 {
715 TCGv addr = tcg_temp_new();
716 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
717 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
718 tcg_temp_free(addr);
719 }
720 return;
721 case 0x6008: /* swap.b Rm,Rn */
722 {
723 TCGv high, low;
724 high = tcg_temp_new();
725 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
726 low = tcg_temp_new();
727 tcg_gen_ext16u_i32(low, REG(B7_4));
728 tcg_gen_bswap16_i32(low, low);
729 tcg_gen_or_i32(REG(B11_8), high, low);
730 tcg_temp_free(low);
731 tcg_temp_free(high);
732 }
733 return;
734 case 0x6009: /* swap.w Rm,Rn */
735 {
736 TCGv high, low;
737 high = tcg_temp_new();
738 tcg_gen_shli_i32(high, REG(B7_4), 16);
739 low = tcg_temp_new();
740 tcg_gen_shri_i32(low, REG(B7_4), 16);
741 tcg_gen_ext16u_i32(low, low);
742 tcg_gen_or_i32(REG(B11_8), high, low);
743 tcg_temp_free(low);
744 tcg_temp_free(high);
745 }
746 return;
747 case 0x200d: /* xtrct Rm,Rn */
748 {
749 TCGv high, low;
750 high = tcg_temp_new();
751 tcg_gen_shli_i32(high, REG(B7_4), 16);
752 low = tcg_temp_new();
753 tcg_gen_shri_i32(low, REG(B11_8), 16);
754 tcg_gen_ext16u_i32(low, low);
755 tcg_gen_or_i32(REG(B11_8), high, low);
756 tcg_temp_free(low);
757 tcg_temp_free(high);
758 }
759 return;
760 case 0x300c: /* add Rm,Rn */
761 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
762 return;
763 case 0x300e: /* addc Rm,Rn */
764 gen_helper_addc(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
765 return;
766 case 0x300f: /* addv Rm,Rn */
767 gen_helper_addv(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
768 return;
769 case 0x2009: /* and Rm,Rn */
770 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
771 return;
772 case 0x3000: /* cmp/eq Rm,Rn */
773 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
774 return;
775 case 0x3003: /* cmp/ge Rm,Rn */
776 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
777 return;
778 case 0x3007: /* cmp/gt Rm,Rn */
779 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
780 return;
781 case 0x3006: /* cmp/hi Rm,Rn */
782 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
783 return;
784 case 0x3002: /* cmp/hs Rm,Rn */
785 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
786 return;
787 case 0x200c: /* cmp/str Rm,Rn */
788 {
789 TCGv cmp1 = tcg_temp_new();
790 TCGv cmp2 = tcg_temp_new();
791 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
792 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
793 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
794 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
795 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
796 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
797 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
798 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
799 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
800 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
801 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
802 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
803 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
804 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
805 tcg_temp_free(cmp2);
806 tcg_temp_free(cmp1);
807 }
808 return;
809 case 0x2007: /* div0s Rm,Rn */
810 {
811 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
812 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
813 TCGv val = tcg_temp_new();
814 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
815 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
816 tcg_temp_free(val);
817 }
818 return;
819 case 0x3004: /* div1 Rm,Rn */
820 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
821 return;
822 case 0x300d: /* dmuls.l Rm,Rn */
823 {
824 TCGv_i64 tmp1 = tcg_temp_new_i64();
825 TCGv_i64 tmp2 = tcg_temp_new_i64();
826
827 tcg_gen_ext_i32_i64(tmp1, REG(B7_4));
828 tcg_gen_ext_i32_i64(tmp2, REG(B11_8));
829 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
830 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
831 tcg_gen_shri_i64(tmp1, tmp1, 32);
832 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
833
834 tcg_temp_free_i64(tmp2);
835 tcg_temp_free_i64(tmp1);
836 }
837 return;
838 case 0x3005: /* dmulu.l Rm,Rn */
839 {
840 TCGv_i64 tmp1 = tcg_temp_new_i64();
841 TCGv_i64 tmp2 = tcg_temp_new_i64();
842
843 tcg_gen_extu_i32_i64(tmp1, REG(B7_4));
844 tcg_gen_extu_i32_i64(tmp2, REG(B11_8));
845 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
846 tcg_gen_trunc_i64_i32(cpu_macl, tmp1);
847 tcg_gen_shri_i64(tmp1, tmp1, 32);
848 tcg_gen_trunc_i64_i32(cpu_mach, tmp1);
849
850 tcg_temp_free_i64(tmp2);
851 tcg_temp_free_i64(tmp1);
852 }
853 return;
854 case 0x600e: /* exts.b Rm,Rn */
855 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
856 return;
857 case 0x600f: /* exts.w Rm,Rn */
858 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
859 return;
860 case 0x600c: /* extu.b Rm,Rn */
861 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
862 return;
863 case 0x600d: /* extu.w Rm,Rn */
864 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
865 return;
866 case 0x000f: /* mac.l @Rm+,@Rn+ */
867 {
868 TCGv arg0, arg1;
869 arg0 = tcg_temp_new();
870 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
871 arg1 = tcg_temp_new();
872 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
873 gen_helper_macl(cpu_env, arg0, arg1);
874 tcg_temp_free(arg1);
875 tcg_temp_free(arg0);
876 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
877 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
878 }
879 return;
880 case 0x400f: /* mac.w @Rm+,@Rn+ */
881 {
882 TCGv arg0, arg1;
883 arg0 = tcg_temp_new();
884 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
885 arg1 = tcg_temp_new();
886 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
887 gen_helper_macw(cpu_env, arg0, arg1);
888 tcg_temp_free(arg1);
889 tcg_temp_free(arg0);
890 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
891 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
892 }
893 return;
894 case 0x0007: /* mul.l Rm,Rn */
895 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
896 return;
897 case 0x200f: /* muls.w Rm,Rn */
898 {
899 TCGv arg0, arg1;
900 arg0 = tcg_temp_new();
901 tcg_gen_ext16s_i32(arg0, REG(B7_4));
902 arg1 = tcg_temp_new();
903 tcg_gen_ext16s_i32(arg1, REG(B11_8));
904 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
905 tcg_temp_free(arg1);
906 tcg_temp_free(arg0);
907 }
908 return;
909 case 0x200e: /* mulu.w Rm,Rn */
910 {
911 TCGv arg0, arg1;
912 arg0 = tcg_temp_new();
913 tcg_gen_ext16u_i32(arg0, REG(B7_4));
914 arg1 = tcg_temp_new();
915 tcg_gen_ext16u_i32(arg1, REG(B11_8));
916 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
917 tcg_temp_free(arg1);
918 tcg_temp_free(arg0);
919 }
920 return;
921 case 0x600b: /* neg Rm,Rn */
922 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
923 return;
924 case 0x600a: /* negc Rm,Rn */
925 {
926 TCGv t0, t1;
927 t0 = tcg_temp_new();
928 tcg_gen_neg_i32(t0, REG(B7_4));
929 t1 = tcg_temp_new();
930 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
931 tcg_gen_sub_i32(REG(B11_8), t0, t1);
932 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
933 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
934 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
935 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
936 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
937 tcg_temp_free(t0);
938 tcg_temp_free(t1);
939 }
940 return;
941 case 0x6007: /* not Rm,Rn */
942 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
943 return;
944 case 0x200b: /* or Rm,Rn */
945 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
946 return;
947 case 0x400c: /* shad Rm,Rn */
948 {
949 int label1 = gen_new_label();
950 int label2 = gen_new_label();
951 int label3 = gen_new_label();
952 int label4 = gen_new_label();
953 TCGv shift;
954 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
955 /* Rm positive, shift to the left */
956 shift = tcg_temp_new();
957 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
958 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
959 tcg_temp_free(shift);
960 tcg_gen_br(label4);
961 /* Rm negative, shift to the right */
962 gen_set_label(label1);
963 shift = tcg_temp_new();
964 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
965 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
966 tcg_gen_not_i32(shift, REG(B7_4));
967 tcg_gen_andi_i32(shift, shift, 0x1f);
968 tcg_gen_addi_i32(shift, shift, 1);
969 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
970 tcg_temp_free(shift);
971 tcg_gen_br(label4);
972 /* Rm = -32 */
973 gen_set_label(label2);
974 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
975 tcg_gen_movi_i32(REG(B11_8), 0);
976 tcg_gen_br(label4);
977 gen_set_label(label3);
978 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
979 gen_set_label(label4);
980 }
981 return;
982 case 0x400d: /* shld Rm,Rn */
983 {
984 int label1 = gen_new_label();
985 int label2 = gen_new_label();
986 int label3 = gen_new_label();
987 TCGv shift;
988 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
989 /* Rm positive, shift to the left */
990 shift = tcg_temp_new();
991 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
992 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
993 tcg_temp_free(shift);
994 tcg_gen_br(label3);
995 /* Rm negative, shift to the right */
996 gen_set_label(label1);
997 shift = tcg_temp_new();
998 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
999 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
1000 tcg_gen_not_i32(shift, REG(B7_4));
1001 tcg_gen_andi_i32(shift, shift, 0x1f);
1002 tcg_gen_addi_i32(shift, shift, 1);
1003 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
1004 tcg_temp_free(shift);
1005 tcg_gen_br(label3);
1006 /* Rm = -32 */
1007 gen_set_label(label2);
1008 tcg_gen_movi_i32(REG(B11_8), 0);
1009 gen_set_label(label3);
1010 }
1011 return;
1012 case 0x3008: /* sub Rm,Rn */
1013 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1014 return;
1015 case 0x300a: /* subc Rm,Rn */
1016 gen_helper_subc(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
1017 return;
1018 case 0x300b: /* subv Rm,Rn */
1019 gen_helper_subv(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
1020 return;
1021 case 0x2008: /* tst Rm,Rn */
1022 {
1023 TCGv val = tcg_temp_new();
1024 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
1025 gen_cmp_imm(TCG_COND_EQ, val, 0);
1026 tcg_temp_free(val);
1027 }
1028 return;
1029 case 0x200a: /* xor Rm,Rn */
1030 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
1031 return;
1032 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
1033 CHECK_FPU_ENABLED
1034 if (ctx->fpscr & FPSCR_SZ) {
1035 TCGv_i64 fp = tcg_temp_new_i64();
1036 gen_load_fpr64(fp, XREG(B7_4));
1037 gen_store_fpr64(fp, XREG(B11_8));
1038 tcg_temp_free_i64(fp);
1039 } else {
1040 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
1041 }
1042 return;
1043 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
1044 CHECK_FPU_ENABLED
1045 if (ctx->fpscr & FPSCR_SZ) {
1046 TCGv addr_hi = tcg_temp_new();
1047 int fr = XREG(B7_4);
1048 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
1049 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
1050 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1051 tcg_temp_free(addr_hi);
1052 } else {
1053 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
1054 }
1055 return;
1056 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
1057 CHECK_FPU_ENABLED
1058 if (ctx->fpscr & FPSCR_SZ) {
1059 TCGv addr_hi = tcg_temp_new();
1060 int fr = XREG(B11_8);
1061 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1062 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1063 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1064 tcg_temp_free(addr_hi);
1065 } else {
1066 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1067 }
1068 return;
1069 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1070 CHECK_FPU_ENABLED
1071 if (ctx->fpscr & FPSCR_SZ) {
1072 TCGv addr_hi = tcg_temp_new();
1073 int fr = XREG(B11_8);
1074 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1075 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1076 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1077 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1078 tcg_temp_free(addr_hi);
1079 } else {
1080 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1081 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1082 }
1083 return;
1084 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1085 CHECK_FPU_ENABLED
1086 if (ctx->fpscr & FPSCR_SZ) {
1087 TCGv addr = tcg_temp_new_i32();
1088 int fr = XREG(B7_4);
1089 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1090 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1091 tcg_gen_subi_i32(addr, addr, 4);
1092 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1093 tcg_gen_mov_i32(REG(B11_8), addr);
1094 tcg_temp_free(addr);
1095 } else {
1096 TCGv addr;
1097 addr = tcg_temp_new_i32();
1098 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1099 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1100 tcg_gen_mov_i32(REG(B11_8), addr);
1101 tcg_temp_free(addr);
1102 }
1103 return;
1104 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1105 CHECK_FPU_ENABLED
1106 {
1107 TCGv addr = tcg_temp_new_i32();
1108 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1109 if (ctx->fpscr & FPSCR_SZ) {
1110 int fr = XREG(B11_8);
1111 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1112 tcg_gen_addi_i32(addr, addr, 4);
1113 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1114 } else {
1115 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1116 }
1117 tcg_temp_free(addr);
1118 }
1119 return;
1120 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1121 CHECK_FPU_ENABLED
1122 {
1123 TCGv addr = tcg_temp_new();
1124 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1125 if (ctx->fpscr & FPSCR_SZ) {
1126 int fr = XREG(B7_4);
1127 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1128 tcg_gen_addi_i32(addr, addr, 4);
1129 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1130 } else {
1131 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1132 }
1133 tcg_temp_free(addr);
1134 }
1135 return;
1136 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1137 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1138 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1139 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1140 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1141 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1142 {
1143 CHECK_FPU_ENABLED
1144 if (ctx->fpscr & FPSCR_PR) {
1145 TCGv_i64 fp0, fp1;
1146
1147 if (ctx->opcode & 0x0110)
1148 break; /* illegal instruction */
1149 fp0 = tcg_temp_new_i64();
1150 fp1 = tcg_temp_new_i64();
1151 gen_load_fpr64(fp0, DREG(B11_8));
1152 gen_load_fpr64(fp1, DREG(B7_4));
1153 switch (ctx->opcode & 0xf00f) {
1154 case 0xf000: /* fadd Rm,Rn */
1155 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1156 break;
1157 case 0xf001: /* fsub Rm,Rn */
1158 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1159 break;
1160 case 0xf002: /* fmul Rm,Rn */
1161 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1162 break;
1163 case 0xf003: /* fdiv Rm,Rn */
1164 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1165 break;
1166 case 0xf004: /* fcmp/eq Rm,Rn */
1167 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1168 return;
1169 case 0xf005: /* fcmp/gt Rm,Rn */
1170 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1171 return;
1172 }
1173 gen_store_fpr64(fp0, DREG(B11_8));
1174 tcg_temp_free_i64(fp0);
1175 tcg_temp_free_i64(fp1);
1176 } else {
1177 switch (ctx->opcode & 0xf00f) {
1178 case 0xf000: /* fadd Rm,Rn */
1179 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1180 cpu_fregs[FREG(B11_8)],
1181 cpu_fregs[FREG(B7_4)]);
1182 break;
1183 case 0xf001: /* fsub Rm,Rn */
1184 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1185 cpu_fregs[FREG(B11_8)],
1186 cpu_fregs[FREG(B7_4)]);
1187 break;
1188 case 0xf002: /* fmul Rm,Rn */
1189 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1190 cpu_fregs[FREG(B11_8)],
1191 cpu_fregs[FREG(B7_4)]);
1192 break;
1193 case 0xf003: /* fdiv Rm,Rn */
1194 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1195 cpu_fregs[FREG(B11_8)],
1196 cpu_fregs[FREG(B7_4)]);
1197 break;
1198 case 0xf004: /* fcmp/eq Rm,Rn */
1199 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1200 cpu_fregs[FREG(B7_4)]);
1201 return;
1202 case 0xf005: /* fcmp/gt Rm,Rn */
1203 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1204 cpu_fregs[FREG(B7_4)]);
1205 return;
1206 }
1207 }
1208 }
1209 return;
1210 case 0xf00e: /* fmac FR0,RM,Rn */
1211 {
1212 CHECK_FPU_ENABLED
1213 if (ctx->fpscr & FPSCR_PR) {
1214 break; /* illegal instruction */
1215 } else {
1216 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1217 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1218 cpu_fregs[FREG(B11_8)]);
1219 return;
1220 }
1221 }
1222 }
1223
1224 switch (ctx->opcode & 0xff00) {
1225 case 0xc900: /* and #imm,R0 */
1226 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1227 return;
1228 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1229 {
1230 TCGv addr, val;
1231 addr = tcg_temp_new();
1232 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1233 val = tcg_temp_new();
1234 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1235 tcg_gen_andi_i32(val, val, B7_0);
1236 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1237 tcg_temp_free(val);
1238 tcg_temp_free(addr);
1239 }
1240 return;
1241 case 0x8b00: /* bf label */
1242 CHECK_NOT_DELAY_SLOT
1243 gen_conditional_jump(ctx, ctx->pc + 2,
1244 ctx->pc + 4 + B7_0s * 2);
1245 ctx->bstate = BS_BRANCH;
1246 return;
1247 case 0x8f00: /* bf/s label */
1248 CHECK_NOT_DELAY_SLOT
1249 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1250 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1251 return;
1252 case 0x8900: /* bt label */
1253 CHECK_NOT_DELAY_SLOT
1254 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1255 ctx->pc + 2);
1256 ctx->bstate = BS_BRANCH;
1257 return;
1258 case 0x8d00: /* bt/s label */
1259 CHECK_NOT_DELAY_SLOT
1260 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1261 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1262 return;
1263 case 0x8800: /* cmp/eq #imm,R0 */
1264 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1265 return;
1266 case 0xc400: /* mov.b @(disp,GBR),R0 */
1267 {
1268 TCGv addr = tcg_temp_new();
1269 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1270 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1271 tcg_temp_free(addr);
1272 }
1273 return;
1274 case 0xc500: /* mov.w @(disp,GBR),R0 */
1275 {
1276 TCGv addr = tcg_temp_new();
1277 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1278 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1279 tcg_temp_free(addr);
1280 }
1281 return;
1282 case 0xc600: /* mov.l @(disp,GBR),R0 */
1283 {
1284 TCGv addr = tcg_temp_new();
1285 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1286 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1287 tcg_temp_free(addr);
1288 }
1289 return;
1290 case 0xc000: /* mov.b R0,@(disp,GBR) */
1291 {
1292 TCGv addr = tcg_temp_new();
1293 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1294 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1295 tcg_temp_free(addr);
1296 }
1297 return;
1298 case 0xc100: /* mov.w R0,@(disp,GBR) */
1299 {
1300 TCGv addr = tcg_temp_new();
1301 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1302 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1303 tcg_temp_free(addr);
1304 }
1305 return;
1306 case 0xc200: /* mov.l R0,@(disp,GBR) */
1307 {
1308 TCGv addr = tcg_temp_new();
1309 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1310 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1311 tcg_temp_free(addr);
1312 }
1313 return;
1314 case 0x8000: /* mov.b R0,@(disp,Rn) */
1315 {
1316 TCGv addr = tcg_temp_new();
1317 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1318 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1319 tcg_temp_free(addr);
1320 }
1321 return;
1322 case 0x8100: /* mov.w R0,@(disp,Rn) */
1323 {
1324 TCGv addr = tcg_temp_new();
1325 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1326 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1327 tcg_temp_free(addr);
1328 }
1329 return;
1330 case 0x8400: /* mov.b @(disp,Rn),R0 */
1331 {
1332 TCGv addr = tcg_temp_new();
1333 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1334 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1335 tcg_temp_free(addr);
1336 }
1337 return;
1338 case 0x8500: /* mov.w @(disp,Rn),R0 */
1339 {
1340 TCGv addr = tcg_temp_new();
1341 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1342 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1343 tcg_temp_free(addr);
1344 }
1345 return;
1346 case 0xc700: /* mova @(disp,PC),R0 */
1347 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1348 return;
1349 case 0xcb00: /* or #imm,R0 */
1350 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1351 return;
1352 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1353 {
1354 TCGv addr, val;
1355 addr = tcg_temp_new();
1356 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1357 val = tcg_temp_new();
1358 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1359 tcg_gen_ori_i32(val, val, B7_0);
1360 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1361 tcg_temp_free(val);
1362 tcg_temp_free(addr);
1363 }
1364 return;
1365 case 0xc300: /* trapa #imm */
1366 {
1367 TCGv imm;
1368 CHECK_NOT_DELAY_SLOT
1369 imm = tcg_const_i32(B7_0);
1370 gen_helper_trapa(cpu_env, imm);
1371 tcg_temp_free(imm);
1372 ctx->bstate = BS_BRANCH;
1373 }
1374 return;
1375 case 0xc800: /* tst #imm,R0 */
1376 {
1377 TCGv val = tcg_temp_new();
1378 tcg_gen_andi_i32(val, REG(0), B7_0);
1379 gen_cmp_imm(TCG_COND_EQ, val, 0);
1380 tcg_temp_free(val);
1381 }
1382 return;
1383 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1384 {
1385 TCGv val = tcg_temp_new();
1386 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1387 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1388 tcg_gen_andi_i32(val, val, B7_0);
1389 gen_cmp_imm(TCG_COND_EQ, val, 0);
1390 tcg_temp_free(val);
1391 }
1392 return;
1393 case 0xca00: /* xor #imm,R0 */
1394 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1395 return;
1396 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1397 {
1398 TCGv addr, val;
1399 addr = tcg_temp_new();
1400 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1401 val = tcg_temp_new();
1402 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1403 tcg_gen_xori_i32(val, val, B7_0);
1404 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1405 tcg_temp_free(val);
1406 tcg_temp_free(addr);
1407 }
1408 return;
1409 }
1410
1411 switch (ctx->opcode & 0xf08f) {
1412 case 0x408e: /* ldc Rm,Rn_BANK */
1413 CHECK_PRIVILEGED
1414 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1415 return;
1416 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1417 CHECK_PRIVILEGED
1418 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1419 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1420 return;
1421 case 0x0082: /* stc Rm_BANK,Rn */
1422 CHECK_PRIVILEGED
1423 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1424 return;
1425 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1426 CHECK_PRIVILEGED
1427 {
1428 TCGv addr = tcg_temp_new();
1429 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1430 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1431 tcg_gen_mov_i32(REG(B11_8), addr);
1432 tcg_temp_free(addr);
1433 }
1434 return;
1435 }
1436
1437 switch (ctx->opcode & 0xf0ff) {
1438 case 0x0023: /* braf Rn */
1439 CHECK_NOT_DELAY_SLOT
1440 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1441 ctx->flags |= DELAY_SLOT;
1442 ctx->delayed_pc = (uint32_t) - 1;
1443 return;
1444 case 0x0003: /* bsrf Rn */
1445 CHECK_NOT_DELAY_SLOT
1446 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1447 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1448 ctx->flags |= DELAY_SLOT;
1449 ctx->delayed_pc = (uint32_t) - 1;
1450 return;
1451 case 0x4015: /* cmp/pl Rn */
1452 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1453 return;
1454 case 0x4011: /* cmp/pz Rn */
1455 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1456 return;
1457 case 0x4010: /* dt Rn */
1458 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1459 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1460 return;
1461 case 0x402b: /* jmp @Rn */
1462 CHECK_NOT_DELAY_SLOT
1463 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1464 ctx->flags |= DELAY_SLOT;
1465 ctx->delayed_pc = (uint32_t) - 1;
1466 return;
1467 case 0x400b: /* jsr @Rn */
1468 CHECK_NOT_DELAY_SLOT
1469 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1470 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1471 ctx->flags |= DELAY_SLOT;
1472 ctx->delayed_pc = (uint32_t) - 1;
1473 return;
1474 case 0x400e: /* ldc Rm,SR */
1475 CHECK_PRIVILEGED
1476 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1477 ctx->bstate = BS_STOP;
1478 return;
1479 case 0x4007: /* ldc.l @Rm+,SR */
1480 CHECK_PRIVILEGED
1481 {
1482 TCGv val = tcg_temp_new();
1483 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1484 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1485 tcg_temp_free(val);
1486 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1487 ctx->bstate = BS_STOP;
1488 }
1489 return;
1490 case 0x0002: /* stc SR,Rn */
1491 CHECK_PRIVILEGED
1492 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1493 return;
1494 case 0x4003: /* stc SR,@-Rn */
1495 CHECK_PRIVILEGED
1496 {
1497 TCGv addr = tcg_temp_new();
1498 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1499 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1500 tcg_gen_mov_i32(REG(B11_8), addr);
1501 tcg_temp_free(addr);
1502 }
1503 return;
1504 #define LD(reg,ldnum,ldpnum,prechk) \
1505 case ldnum: \
1506 prechk \
1507 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1508 return; \
1509 case ldpnum: \
1510 prechk \
1511 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1512 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1513 return;
1514 #define ST(reg,stnum,stpnum,prechk) \
1515 case stnum: \
1516 prechk \
1517 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1518 return; \
1519 case stpnum: \
1520 prechk \
1521 { \
1522 TCGv addr = tcg_temp_new(); \
1523 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1524 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1525 tcg_gen_mov_i32(REG(B11_8), addr); \
1526 tcg_temp_free(addr); \
1527 } \
1528 return;
1529 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1530 LD(reg,ldnum,ldpnum,prechk) \
1531 ST(reg,stnum,stpnum,prechk)
1532 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1533 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1534 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1535 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1536 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1537 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1538 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1539 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1540 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1541 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1542 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1543 case 0x406a: /* lds Rm,FPSCR */
1544 CHECK_FPU_ENABLED
1545 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1546 ctx->bstate = BS_STOP;
1547 return;
1548 case 0x4066: /* lds.l @Rm+,FPSCR */
1549 CHECK_FPU_ENABLED
1550 {
1551 TCGv addr = tcg_temp_new();
1552 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1553 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1554 gen_helper_ld_fpscr(cpu_env, addr);
1555 tcg_temp_free(addr);
1556 ctx->bstate = BS_STOP;
1557 }
1558 return;
1559 case 0x006a: /* sts FPSCR,Rn */
1560 CHECK_FPU_ENABLED
1561 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1562 return;
1563 case 0x4062: /* sts FPSCR,@-Rn */
1564 CHECK_FPU_ENABLED
1565 {
1566 TCGv addr, val;
1567 val = tcg_temp_new();
1568 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1569 addr = tcg_temp_new();
1570 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1571 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1572 tcg_gen_mov_i32(REG(B11_8), addr);
1573 tcg_temp_free(addr);
1574 tcg_temp_free(val);
1575 }
1576 return;
1577 case 0x00c3: /* movca.l R0,@Rm */
1578 {
1579 TCGv val = tcg_temp_new();
1580 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1581 gen_helper_movcal(cpu_env, REG(B11_8), val);
1582 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1583 }
1584 ctx->has_movcal = 1;
1585 return;
1586 case 0x40a9:
1587 /* MOVUA.L @Rm,R0 (Rm) -> R0
1588 Load non-boundary-aligned data */
1589 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1590 return;
1591 case 0x40e9:
1592 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1593 Load non-boundary-aligned data */
1594 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1595 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1596 return;
1597 case 0x0029: /* movt Rn */
1598 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1599 return;
1600 case 0x0073:
1601 /* MOVCO.L
1602 LDST -> T
1603 If (T == 1) R0 -> (Rn)
1604 0 -> LDST
1605 */
1606 if (ctx->features & SH_FEATURE_SH4A) {
1607 int label = gen_new_label();
1608 gen_clr_t();
1609 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1610 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1611 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1612 gen_set_label(label);
1613 tcg_gen_movi_i32(cpu_ldst, 0);
1614 return;
1615 } else
1616 break;
1617 case 0x0063:
1618 /* MOVLI.L @Rm,R0
1619 1 -> LDST
1620 (Rm) -> R0
1621 When interrupt/exception
1622 occurred 0 -> LDST
1623 */
1624 if (ctx->features & SH_FEATURE_SH4A) {
1625 tcg_gen_movi_i32(cpu_ldst, 0);
1626 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1627 tcg_gen_movi_i32(cpu_ldst, 1);
1628 return;
1629 } else
1630 break;
1631 case 0x0093: /* ocbi @Rn */
1632 {
1633 gen_helper_ocbi(cpu_env, REG(B11_8));
1634 }
1635 return;
1636 case 0x00a3: /* ocbp @Rn */
1637 case 0x00b3: /* ocbwb @Rn */
1638 /* These instructions are supposed to do nothing in case of
1639 a cache miss. Given that we only partially emulate caches
1640 it is safe to simply ignore them. */
1641 return;
1642 case 0x0083: /* pref @Rn */
1643 return;
1644 case 0x00d3: /* prefi @Rn */
1645 if (ctx->features & SH_FEATURE_SH4A)
1646 return;
1647 else
1648 break;
1649 case 0x00e3: /* icbi @Rn */
1650 if (ctx->features & SH_FEATURE_SH4A)
1651 return;
1652 else
1653 break;
1654 case 0x00ab: /* synco */
1655 if (ctx->features & SH_FEATURE_SH4A)
1656 return;
1657 else
1658 break;
1659 case 0x4024: /* rotcl Rn */
1660 {
1661 TCGv tmp = tcg_temp_new();
1662 tcg_gen_mov_i32(tmp, cpu_sr);
1663 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1664 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1665 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1666 tcg_temp_free(tmp);
1667 }
1668 return;
1669 case 0x4025: /* rotcr Rn */
1670 {
1671 TCGv tmp = tcg_temp_new();
1672 tcg_gen_mov_i32(tmp, cpu_sr);
1673 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1674 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1675 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1676 tcg_temp_free(tmp);
1677 }
1678 return;
1679 case 0x4004: /* rotl Rn */
1680 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1681 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1682 return;
1683 case 0x4005: /* rotr Rn */
1684 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1685 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1686 return;
1687 case 0x4000: /* shll Rn */
1688 case 0x4020: /* shal Rn */
1689 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1690 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1691 return;
1692 case 0x4021: /* shar Rn */
1693 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1694 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1695 return;
1696 case 0x4001: /* shlr Rn */
1697 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1698 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1699 return;
1700 case 0x4008: /* shll2 Rn */
1701 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1702 return;
1703 case 0x4018: /* shll8 Rn */
1704 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1705 return;
1706 case 0x4028: /* shll16 Rn */
1707 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1708 return;
1709 case 0x4009: /* shlr2 Rn */
1710 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1711 return;
1712 case 0x4019: /* shlr8 Rn */
1713 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1714 return;
1715 case 0x4029: /* shlr16 Rn */
1716 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1717 return;
1718 case 0x401b: /* tas.b @Rn */
1719 {
1720 TCGv addr, val;
1721 addr = tcg_temp_local_new();
1722 tcg_gen_mov_i32(addr, REG(B11_8));
1723 val = tcg_temp_local_new();
1724 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1725 gen_cmp_imm(TCG_COND_EQ, val, 0);
1726 tcg_gen_ori_i32(val, val, 0x80);
1727 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1728 tcg_temp_free(val);
1729 tcg_temp_free(addr);
1730 }
1731 return;
1732 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1733 CHECK_FPU_ENABLED
1734 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1735 return;
1736 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1737 CHECK_FPU_ENABLED
1738 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1739 return;
1740 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1741 CHECK_FPU_ENABLED
1742 if (ctx->fpscr & FPSCR_PR) {
1743 TCGv_i64 fp;
1744 if (ctx->opcode & 0x0100)
1745 break; /* illegal instruction */
1746 fp = tcg_temp_new_i64();
1747 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1748 gen_store_fpr64(fp, DREG(B11_8));
1749 tcg_temp_free_i64(fp);
1750 }
1751 else {
1752 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1753 }
1754 return;
1755 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1756 CHECK_FPU_ENABLED
1757 if (ctx->fpscr & FPSCR_PR) {
1758 TCGv_i64 fp;
1759 if (ctx->opcode & 0x0100)
1760 break; /* illegal instruction */
1761 fp = tcg_temp_new_i64();
1762 gen_load_fpr64(fp, DREG(B11_8));
1763 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1764 tcg_temp_free_i64(fp);
1765 }
1766 else {
1767 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1768 }
1769 return;
1770 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1771 CHECK_FPU_ENABLED
1772 {
1773 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1774 }
1775 return;
1776 case 0xf05d: /* fabs FRn/DRn */
1777 CHECK_FPU_ENABLED
1778 if (ctx->fpscr & FPSCR_PR) {
1779 if (ctx->opcode & 0x0100)
1780 break; /* illegal instruction */
1781 TCGv_i64 fp = tcg_temp_new_i64();
1782 gen_load_fpr64(fp, DREG(B11_8));
1783 gen_helper_fabs_DT(fp, fp);
1784 gen_store_fpr64(fp, DREG(B11_8));
1785 tcg_temp_free_i64(fp);
1786 } else {
1787 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1788 }
1789 return;
1790 case 0xf06d: /* fsqrt FRn */
1791 CHECK_FPU_ENABLED
1792 if (ctx->fpscr & FPSCR_PR) {
1793 if (ctx->opcode & 0x0100)
1794 break; /* illegal instruction */
1795 TCGv_i64 fp = tcg_temp_new_i64();
1796 gen_load_fpr64(fp, DREG(B11_8));
1797 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1798 gen_store_fpr64(fp, DREG(B11_8));
1799 tcg_temp_free_i64(fp);
1800 } else {
1801 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1802 cpu_fregs[FREG(B11_8)]);
1803 }
1804 return;
1805 case 0xf07d: /* fsrra FRn */
1806 CHECK_FPU_ENABLED
1807 break;
1808 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1809 CHECK_FPU_ENABLED
1810 if (!(ctx->fpscr & FPSCR_PR)) {
1811 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1812 }
1813 return;
1814 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1815 CHECK_FPU_ENABLED
1816 if (!(ctx->fpscr & FPSCR_PR)) {
1817 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1818 }
1819 return;
1820 case 0xf0ad: /* fcnvsd FPUL,DRn */
1821 CHECK_FPU_ENABLED
1822 {
1823 TCGv_i64 fp = tcg_temp_new_i64();
1824 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1825 gen_store_fpr64(fp, DREG(B11_8));
1826 tcg_temp_free_i64(fp);
1827 }
1828 return;
1829 case 0xf0bd: /* fcnvds DRn,FPUL */
1830 CHECK_FPU_ENABLED
1831 {
1832 TCGv_i64 fp = tcg_temp_new_i64();
1833 gen_load_fpr64(fp, DREG(B11_8));
1834 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1835 tcg_temp_free_i64(fp);
1836 }
1837 return;
1838 case 0xf0ed: /* fipr FVm,FVn */
1839 CHECK_FPU_ENABLED
1840 if ((ctx->fpscr & FPSCR_PR) == 0) {
1841 TCGv m, n;
1842 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1843 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1844 gen_helper_fipr(cpu_env, m, n);
1845 tcg_temp_free(m);
1846 tcg_temp_free(n);
1847 return;
1848 }
1849 break;
1850 case 0xf0fd: /* ftrv XMTRX,FVn */
1851 CHECK_FPU_ENABLED
1852 if ((ctx->opcode & 0x0300) == 0x0100 &&
1853 (ctx->fpscr & FPSCR_PR) == 0) {
1854 TCGv n;
1855 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1856 gen_helper_ftrv(cpu_env, n);
1857 tcg_temp_free(n);
1858 return;
1859 }
1860 break;
1861 }
1862 #if 0
1863 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1864 ctx->opcode, ctx->pc);
1865 fflush(stderr);
1866 #endif
1867 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1868 gen_helper_raise_slot_illegal_instruction(cpu_env);
1869 } else {
1870 gen_helper_raise_illegal_instruction(cpu_env);
1871 }
1872 ctx->bstate = BS_EXCP;
1873 }
1874
1875 static void decode_opc(DisasContext * ctx)
1876 {
1877 uint32_t old_flags = ctx->flags;
1878
1879 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
1880 tcg_gen_debug_insn_start(ctx->pc);
1881 }
1882
1883 _decode_opc(ctx);
1884
1885 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1886 if (ctx->flags & DELAY_SLOT_CLEARME) {
1887 gen_store_flags(0);
1888 } else {
1889 /* go out of the delay slot */
1890 uint32_t new_flags = ctx->flags;
1891 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1892 gen_store_flags(new_flags);
1893 }
1894 ctx->flags = 0;
1895 ctx->bstate = BS_BRANCH;
1896 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1897 gen_delayed_conditional_jump(ctx);
1898 } else if (old_flags & DELAY_SLOT) {
1899 gen_jump(ctx);
1900 }
1901
1902 }
1903
1904 /* go into a delay slot */
1905 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1906 gen_store_flags(ctx->flags);
1907 }
1908
1909 static inline void
1910 gen_intermediate_code_internal(CPUSH4State * env, TranslationBlock * tb,
1911 int search_pc)
1912 {
1913 DisasContext ctx;
1914 target_ulong pc_start;
1915 static uint16_t *gen_opc_end;
1916 CPUBreakpoint *bp;
1917 int i, ii;
1918 int num_insns;
1919 int max_insns;
1920
1921 pc_start = tb->pc;
1922 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1923 ctx.pc = pc_start;
1924 ctx.flags = (uint32_t)tb->flags;
1925 ctx.bstate = BS_NONE;
1926 ctx.sr = env->sr;
1927 ctx.fpscr = env->fpscr;
1928 ctx.memidx = (env->sr & SR_MD) == 0 ? 1 : 0;
1929 /* We don't know if the delayed pc came from a dynamic or static branch,
1930 so assume it is a dynamic branch. */
1931 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1932 ctx.tb = tb;
1933 ctx.singlestep_enabled = env->singlestep_enabled;
1934 ctx.features = env->features;
1935 ctx.has_movcal = (tb->flags & TB_FLAG_PENDING_MOVCA);
1936
1937 ii = -1;
1938 num_insns = 0;
1939 max_insns = tb->cflags & CF_COUNT_MASK;
1940 if (max_insns == 0)
1941 max_insns = CF_COUNT_MASK;
1942 gen_icount_start();
1943 while (ctx.bstate == BS_NONE && gen_opc_ptr < gen_opc_end) {
1944 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1945 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1946 if (ctx.pc == bp->pc) {
1947 /* We have hit a breakpoint - make sure PC is up-to-date */
1948 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1949 gen_helper_debug(cpu_env);
1950 ctx.bstate = BS_EXCP;
1951 break;
1952 }
1953 }
1954 }
1955 if (search_pc) {
1956 i = gen_opc_ptr - gen_opc_buf;
1957 if (ii < i) {
1958 ii++;
1959 while (ii < i)
1960 gen_opc_instr_start[ii++] = 0;
1961 }
1962 gen_opc_pc[ii] = ctx.pc;
1963 gen_opc_hflags[ii] = ctx.flags;
1964 gen_opc_instr_start[ii] = 1;
1965 gen_opc_icount[ii] = num_insns;
1966 }
1967 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1968 gen_io_start();
1969 #if 0
1970 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1971 fflush(stderr);
1972 #endif
1973 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1974 decode_opc(&ctx);
1975 num_insns++;
1976 ctx.pc += 2;
1977 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1978 break;
1979 if (env->singlestep_enabled)
1980 break;
1981 if (num_insns >= max_insns)
1982 break;
1983 if (singlestep)
1984 break;
1985 }
1986 if (tb->cflags & CF_LAST_IO)
1987 gen_io_end();
1988 if (env->singlestep_enabled) {
1989 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1990 gen_helper_debug(cpu_env);
1991 } else {
1992 switch (ctx.bstate) {
1993 case BS_STOP:
1994 /* gen_op_interrupt_restart(); */
1995 /* fall through */
1996 case BS_NONE:
1997 if (ctx.flags) {
1998 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1999 }
2000 gen_goto_tb(&ctx, 0, ctx.pc);
2001 break;
2002 case BS_EXCP:
2003 /* gen_op_interrupt_restart(); */
2004 tcg_gen_exit_tb(0);
2005 break;
2006 case BS_BRANCH:
2007 default:
2008 break;
2009 }
2010 }
2011
2012 gen_icount_end(tb, num_insns);
2013 *gen_opc_ptr = INDEX_op_end;
2014 if (search_pc) {
2015 i = gen_opc_ptr - gen_opc_buf;
2016 ii++;
2017 while (ii <= i)
2018 gen_opc_instr_start[ii++] = 0;
2019 } else {
2020 tb->size = ctx.pc - pc_start;
2021 tb->icount = num_insns;
2022 }
2023
2024 #ifdef DEBUG_DISAS
2025 #ifdef SH4_DEBUG_DISAS
2026 qemu_log_mask(CPU_LOG_TB_IN_ASM, "\n");
2027 #endif
2028 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
2029 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
2030 log_target_disas(pc_start, ctx.pc - pc_start, 0);
2031 qemu_log("\n");
2032 }
2033 #endif
2034 }
2035
2036 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
2037 {
2038 gen_intermediate_code_internal(env, tb, 0);
2039 }
2040
2041 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
2042 {
2043 gen_intermediate_code_internal(env, tb, 1);
2044 }
2045
2046 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
2047 {
2048 env->pc = gen_opc_pc[pc_pos];
2049 env->flags = gen_opc_hflags[pc_pos];
2050 }