]> git.proxmox.com Git - qemu.git/blob - target-sh4/translate.c
Open 2.0 development tree
[qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 //#define SH4_SINGLE_STEP
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26
27 #include "helper.h"
28 #define GEN_HELPER 1
29 #include "helper.h"
30
31 typedef struct DisasContext {
32 struct TranslationBlock *tb;
33 target_ulong pc;
34 uint16_t opcode;
35 uint32_t flags;
36 int bstate;
37 int memidx;
38 uint32_t delayed_pc;
39 int singlestep_enabled;
40 uint32_t features;
41 int has_movcal;
42 } DisasContext;
43
44 #if defined(CONFIG_USER_ONLY)
45 #define IS_USER(ctx) 1
46 #else
47 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
48 #endif
49
50 enum {
51 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
52 * exception condition
53 */
54 BS_STOP = 1, /* We want to stop translation for any reason */
55 BS_BRANCH = 2, /* We reached a branch condition */
56 BS_EXCP = 3, /* We reached an exception condition */
57 };
58
59 /* global register indexes */
60 static TCGv_ptr cpu_env;
61 static TCGv cpu_gregs[24];
62 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
63 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
64 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
65 static TCGv cpu_fregs[32];
66
67 /* internal register indexes */
68 static TCGv cpu_flags, cpu_delayed_pc;
69
70 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
71
72 #include "exec/gen-icount.h"
73
74 void sh4_translate_init(void)
75 {
76 int i;
77 static int done_init = 0;
78 static const char * const gregnames[24] = {
79 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
80 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
81 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
82 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
83 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
84 };
85 static const char * const fregnames[32] = {
86 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
87 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
88 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
89 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
90 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
91 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
92 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
93 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
94 };
95
96 if (done_init)
97 return;
98
99 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
100
101 for (i = 0; i < 24; i++)
102 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
103 offsetof(CPUSH4State, gregs[i]),
104 gregnames[i]);
105
106 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
107 offsetof(CPUSH4State, pc), "PC");
108 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
109 offsetof(CPUSH4State, sr), "SR");
110 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
111 offsetof(CPUSH4State, ssr), "SSR");
112 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
113 offsetof(CPUSH4State, spc), "SPC");
114 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
115 offsetof(CPUSH4State, gbr), "GBR");
116 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
117 offsetof(CPUSH4State, vbr), "VBR");
118 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
119 offsetof(CPUSH4State, sgr), "SGR");
120 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
121 offsetof(CPUSH4State, dbr), "DBR");
122 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
123 offsetof(CPUSH4State, mach), "MACH");
124 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
125 offsetof(CPUSH4State, macl), "MACL");
126 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
127 offsetof(CPUSH4State, pr), "PR");
128 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
129 offsetof(CPUSH4State, fpscr), "FPSCR");
130 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
131 offsetof(CPUSH4State, fpul), "FPUL");
132
133 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
134 offsetof(CPUSH4State, flags), "_flags_");
135 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
136 offsetof(CPUSH4State, delayed_pc),
137 "_delayed_pc_");
138 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
139 offsetof(CPUSH4State, ldst), "_ldst_");
140
141 for (i = 0; i < 32; i++)
142 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
143 offsetof(CPUSH4State, fregs[i]),
144 fregnames[i]);
145
146 done_init = 1;
147 }
148
149 void superh_cpu_dump_state(CPUState *cs, FILE *f,
150 fprintf_function cpu_fprintf, int flags)
151 {
152 SuperHCPU *cpu = SUPERH_CPU(cs);
153 CPUSH4State *env = &cpu->env;
154 int i;
155 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
156 env->pc, env->sr, env->pr, env->fpscr);
157 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
158 env->spc, env->ssr, env->gbr, env->vbr);
159 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
160 env->sgr, env->dbr, env->delayed_pc, env->fpul);
161 for (i = 0; i < 24; i += 4) {
162 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
163 i, env->gregs[i], i + 1, env->gregs[i + 1],
164 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
165 }
166 if (env->flags & DELAY_SLOT) {
167 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
168 env->delayed_pc);
169 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
170 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
171 env->delayed_pc);
172 }
173 }
174
175 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
176 {
177 TranslationBlock *tb;
178 tb = ctx->tb;
179
180 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
181 !ctx->singlestep_enabled) {
182 /* Use a direct jump if in same page and singlestep not enabled */
183 tcg_gen_goto_tb(n);
184 tcg_gen_movi_i32(cpu_pc, dest);
185 tcg_gen_exit_tb((uintptr_t)tb + n);
186 } else {
187 tcg_gen_movi_i32(cpu_pc, dest);
188 if (ctx->singlestep_enabled)
189 gen_helper_debug(cpu_env);
190 tcg_gen_exit_tb(0);
191 }
192 }
193
194 static void gen_jump(DisasContext * ctx)
195 {
196 if (ctx->delayed_pc == (uint32_t) - 1) {
197 /* Target is not statically known, it comes necessarily from a
198 delayed jump as immediate jump are conditinal jumps */
199 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
200 if (ctx->singlestep_enabled)
201 gen_helper_debug(cpu_env);
202 tcg_gen_exit_tb(0);
203 } else {
204 gen_goto_tb(ctx, 0, ctx->delayed_pc);
205 }
206 }
207
208 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
209 {
210 TCGv sr;
211 int label = gen_new_label();
212 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
213 sr = tcg_temp_new();
214 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
215 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
216 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
217 gen_set_label(label);
218 }
219
220 /* Immediate conditional jump (bt or bf) */
221 static void gen_conditional_jump(DisasContext * ctx,
222 target_ulong ift, target_ulong ifnott)
223 {
224 int l1;
225 TCGv sr;
226
227 l1 = gen_new_label();
228 sr = tcg_temp_new();
229 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
230 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
231 gen_goto_tb(ctx, 0, ifnott);
232 gen_set_label(l1);
233 gen_goto_tb(ctx, 1, ift);
234 }
235
236 /* Delayed conditional jump (bt or bf) */
237 static void gen_delayed_conditional_jump(DisasContext * ctx)
238 {
239 int l1;
240 TCGv ds;
241
242 l1 = gen_new_label();
243 ds = tcg_temp_new();
244 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
245 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
246 gen_goto_tb(ctx, 1, ctx->pc + 2);
247 gen_set_label(l1);
248 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
249 gen_jump(ctx);
250 }
251
252 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
253 {
254 TCGv t;
255
256 t = tcg_temp_new();
257 tcg_gen_setcond_i32(cond, t, t1, t0);
258 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
259 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
260
261 tcg_temp_free(t);
262 }
263
264 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
265 {
266 TCGv t;
267
268 t = tcg_temp_new();
269 tcg_gen_setcondi_i32(cond, t, t0, imm);
270 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
271 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
272
273 tcg_temp_free(t);
274 }
275
276 static inline void gen_store_flags(uint32_t flags)
277 {
278 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
279 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
280 }
281
282 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
283 {
284 TCGv tmp = tcg_temp_new();
285
286 p0 &= 0x1f;
287 p1 &= 0x1f;
288
289 tcg_gen_andi_i32(tmp, t1, (1 << p1));
290 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
291 if (p0 < p1)
292 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
293 else if (p0 > p1)
294 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
295 tcg_gen_or_i32(t0, t0, tmp);
296
297 tcg_temp_free(tmp);
298 }
299
300 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
301 {
302 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
303 }
304
305 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
306 {
307 TCGv_i32 tmp = tcg_temp_new_i32();
308 tcg_gen_trunc_i64_i32(tmp, t);
309 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
310 tcg_gen_shri_i64(t, t, 32);
311 tcg_gen_trunc_i64_i32(tmp, t);
312 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
313 tcg_temp_free_i32(tmp);
314 }
315
316 #define B3_0 (ctx->opcode & 0xf)
317 #define B6_4 ((ctx->opcode >> 4) & 0x7)
318 #define B7_4 ((ctx->opcode >> 4) & 0xf)
319 #define B7_0 (ctx->opcode & 0xff)
320 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
321 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
322 (ctx->opcode & 0xfff))
323 #define B11_8 ((ctx->opcode >> 8) & 0xf)
324 #define B15_12 ((ctx->opcode >> 12) & 0xf)
325
326 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
327 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
328
329 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
330 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
331
332 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
333 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
334 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
335 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
336
337 #define CHECK_NOT_DELAY_SLOT \
338 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
339 { \
340 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
341 gen_helper_raise_slot_illegal_instruction(cpu_env); \
342 ctx->bstate = BS_BRANCH; \
343 return; \
344 }
345
346 #define CHECK_PRIVILEGED \
347 if (IS_USER(ctx)) { \
348 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
349 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
350 gen_helper_raise_slot_illegal_instruction(cpu_env); \
351 } else { \
352 gen_helper_raise_illegal_instruction(cpu_env); \
353 } \
354 ctx->bstate = BS_BRANCH; \
355 return; \
356 }
357
358 #define CHECK_FPU_ENABLED \
359 if (ctx->flags & SR_FD) { \
360 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
361 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
362 gen_helper_raise_slot_fpu_disable(cpu_env); \
363 } else { \
364 gen_helper_raise_fpu_disable(cpu_env); \
365 } \
366 ctx->bstate = BS_BRANCH; \
367 return; \
368 }
369
370 static void _decode_opc(DisasContext * ctx)
371 {
372 /* This code tries to make movcal emulation sufficiently
373 accurate for Linux purposes. This instruction writes
374 memory, and prior to that, always allocates a cache line.
375 It is used in two contexts:
376 - in memcpy, where data is copied in blocks, the first write
377 of to a block uses movca.l for performance.
378 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
379 to flush the cache. Here, the data written by movcal.l is never
380 written to memory, and the data written is just bogus.
381
382 To simulate this, we simulate movcal.l, we store the value to memory,
383 but we also remember the previous content. If we see ocbi, we check
384 if movcal.l for that address was done previously. If so, the write should
385 not have hit the memory, so we restore the previous content.
386 When we see an instruction that is neither movca.l
387 nor ocbi, the previous content is discarded.
388
389 To optimize, we only try to flush stores when we're at the start of
390 TB, or if we already saw movca.l in this TB and did not flush stores
391 yet. */
392 if (ctx->has_movcal)
393 {
394 int opcode = ctx->opcode & 0xf0ff;
395 if (opcode != 0x0093 /* ocbi */
396 && opcode != 0x00c3 /* movca.l */)
397 {
398 gen_helper_discard_movcal_backup(cpu_env);
399 ctx->has_movcal = 0;
400 }
401 }
402
403 #if 0
404 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
405 #endif
406
407 switch (ctx->opcode) {
408 case 0x0019: /* div0u */
409 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
410 return;
411 case 0x000b: /* rts */
412 CHECK_NOT_DELAY_SLOT
413 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
414 ctx->flags |= DELAY_SLOT;
415 ctx->delayed_pc = (uint32_t) - 1;
416 return;
417 case 0x0028: /* clrmac */
418 tcg_gen_movi_i32(cpu_mach, 0);
419 tcg_gen_movi_i32(cpu_macl, 0);
420 return;
421 case 0x0048: /* clrs */
422 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
423 return;
424 case 0x0008: /* clrt */
425 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
426 return;
427 case 0x0038: /* ldtlb */
428 CHECK_PRIVILEGED
429 gen_helper_ldtlb(cpu_env);
430 return;
431 case 0x002b: /* rte */
432 CHECK_PRIVILEGED
433 CHECK_NOT_DELAY_SLOT
434 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
435 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
436 ctx->flags |= DELAY_SLOT;
437 ctx->delayed_pc = (uint32_t) - 1;
438 return;
439 case 0x0058: /* sets */
440 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
441 return;
442 case 0x0018: /* sett */
443 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
444 return;
445 case 0xfbfd: /* frchg */
446 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
447 ctx->bstate = BS_STOP;
448 return;
449 case 0xf3fd: /* fschg */
450 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
451 ctx->bstate = BS_STOP;
452 return;
453 case 0x0009: /* nop */
454 return;
455 case 0x001b: /* sleep */
456 CHECK_PRIVILEGED
457 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
458 gen_helper_sleep(cpu_env);
459 return;
460 }
461
462 switch (ctx->opcode & 0xf000) {
463 case 0x1000: /* mov.l Rm,@(disp,Rn) */
464 {
465 TCGv addr = tcg_temp_new();
466 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
467 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
468 tcg_temp_free(addr);
469 }
470 return;
471 case 0x5000: /* mov.l @(disp,Rm),Rn */
472 {
473 TCGv addr = tcg_temp_new();
474 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
475 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
476 tcg_temp_free(addr);
477 }
478 return;
479 case 0xe000: /* mov #imm,Rn */
480 tcg_gen_movi_i32(REG(B11_8), B7_0s);
481 return;
482 case 0x9000: /* mov.w @(disp,PC),Rn */
483 {
484 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
485 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
486 tcg_temp_free(addr);
487 }
488 return;
489 case 0xd000: /* mov.l @(disp,PC),Rn */
490 {
491 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
492 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
493 tcg_temp_free(addr);
494 }
495 return;
496 case 0x7000: /* add #imm,Rn */
497 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
498 return;
499 case 0xa000: /* bra disp */
500 CHECK_NOT_DELAY_SLOT
501 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
502 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
503 ctx->flags |= DELAY_SLOT;
504 return;
505 case 0xb000: /* bsr disp */
506 CHECK_NOT_DELAY_SLOT
507 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
508 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
509 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
510 ctx->flags |= DELAY_SLOT;
511 return;
512 }
513
514 switch (ctx->opcode & 0xf00f) {
515 case 0x6003: /* mov Rm,Rn */
516 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
517 return;
518 case 0x2000: /* mov.b Rm,@Rn */
519 tcg_gen_qemu_st8(REG(B7_4), REG(B11_8), ctx->memidx);
520 return;
521 case 0x2001: /* mov.w Rm,@Rn */
522 tcg_gen_qemu_st16(REG(B7_4), REG(B11_8), ctx->memidx);
523 return;
524 case 0x2002: /* mov.l Rm,@Rn */
525 tcg_gen_qemu_st32(REG(B7_4), REG(B11_8), ctx->memidx);
526 return;
527 case 0x6000: /* mov.b @Rm,Rn */
528 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
529 return;
530 case 0x6001: /* mov.w @Rm,Rn */
531 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
532 return;
533 case 0x6002: /* mov.l @Rm,Rn */
534 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
535 return;
536 case 0x2004: /* mov.b Rm,@-Rn */
537 {
538 TCGv addr = tcg_temp_new();
539 tcg_gen_subi_i32(addr, REG(B11_8), 1);
540 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx); /* might cause re-execution */
541 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
542 tcg_temp_free(addr);
543 }
544 return;
545 case 0x2005: /* mov.w Rm,@-Rn */
546 {
547 TCGv addr = tcg_temp_new();
548 tcg_gen_subi_i32(addr, REG(B11_8), 2);
549 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
550 tcg_gen_mov_i32(REG(B11_8), addr);
551 tcg_temp_free(addr);
552 }
553 return;
554 case 0x2006: /* mov.l Rm,@-Rn */
555 {
556 TCGv addr = tcg_temp_new();
557 tcg_gen_subi_i32(addr, REG(B11_8), 4);
558 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
559 tcg_gen_mov_i32(REG(B11_8), addr);
560 }
561 return;
562 case 0x6004: /* mov.b @Rm+,Rn */
563 tcg_gen_qemu_ld8s(REG(B11_8), REG(B7_4), ctx->memidx);
564 if ( B11_8 != B7_4 )
565 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
566 return;
567 case 0x6005: /* mov.w @Rm+,Rn */
568 tcg_gen_qemu_ld16s(REG(B11_8), REG(B7_4), ctx->memidx);
569 if ( B11_8 != B7_4 )
570 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
571 return;
572 case 0x6006: /* mov.l @Rm+,Rn */
573 tcg_gen_qemu_ld32s(REG(B11_8), REG(B7_4), ctx->memidx);
574 if ( B11_8 != B7_4 )
575 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
576 return;
577 case 0x0004: /* mov.b Rm,@(R0,Rn) */
578 {
579 TCGv addr = tcg_temp_new();
580 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
581 tcg_gen_qemu_st8(REG(B7_4), addr, ctx->memidx);
582 tcg_temp_free(addr);
583 }
584 return;
585 case 0x0005: /* mov.w Rm,@(R0,Rn) */
586 {
587 TCGv addr = tcg_temp_new();
588 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
589 tcg_gen_qemu_st16(REG(B7_4), addr, ctx->memidx);
590 tcg_temp_free(addr);
591 }
592 return;
593 case 0x0006: /* mov.l Rm,@(R0,Rn) */
594 {
595 TCGv addr = tcg_temp_new();
596 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
597 tcg_gen_qemu_st32(REG(B7_4), addr, ctx->memidx);
598 tcg_temp_free(addr);
599 }
600 return;
601 case 0x000c: /* mov.b @(R0,Rm),Rn */
602 {
603 TCGv addr = tcg_temp_new();
604 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
605 tcg_gen_qemu_ld8s(REG(B11_8), addr, ctx->memidx);
606 tcg_temp_free(addr);
607 }
608 return;
609 case 0x000d: /* mov.w @(R0,Rm),Rn */
610 {
611 TCGv addr = tcg_temp_new();
612 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
613 tcg_gen_qemu_ld16s(REG(B11_8), addr, ctx->memidx);
614 tcg_temp_free(addr);
615 }
616 return;
617 case 0x000e: /* mov.l @(R0,Rm),Rn */
618 {
619 TCGv addr = tcg_temp_new();
620 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
621 tcg_gen_qemu_ld32s(REG(B11_8), addr, ctx->memidx);
622 tcg_temp_free(addr);
623 }
624 return;
625 case 0x6008: /* swap.b Rm,Rn */
626 {
627 TCGv high, low;
628 high = tcg_temp_new();
629 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
630 low = tcg_temp_new();
631 tcg_gen_ext16u_i32(low, REG(B7_4));
632 tcg_gen_bswap16_i32(low, low);
633 tcg_gen_or_i32(REG(B11_8), high, low);
634 tcg_temp_free(low);
635 tcg_temp_free(high);
636 }
637 return;
638 case 0x6009: /* swap.w Rm,Rn */
639 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
640 return;
641 case 0x200d: /* xtrct Rm,Rn */
642 {
643 TCGv high, low;
644 high = tcg_temp_new();
645 tcg_gen_shli_i32(high, REG(B7_4), 16);
646 low = tcg_temp_new();
647 tcg_gen_shri_i32(low, REG(B11_8), 16);
648 tcg_gen_or_i32(REG(B11_8), high, low);
649 tcg_temp_free(low);
650 tcg_temp_free(high);
651 }
652 return;
653 case 0x300c: /* add Rm,Rn */
654 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
655 return;
656 case 0x300e: /* addc Rm,Rn */
657 {
658 TCGv t0, t1, t2;
659 t0 = tcg_temp_new();
660 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
661 t1 = tcg_temp_new();
662 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
663 tcg_gen_add_i32(t0, t0, t1);
664 t2 = tcg_temp_new();
665 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
666 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
667 tcg_gen_or_i32(t1, t1, t2);
668 tcg_temp_free(t2);
669 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
670 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
671 tcg_temp_free(t1);
672 tcg_gen_mov_i32(REG(B11_8), t0);
673 tcg_temp_free(t0);
674 }
675 return;
676 case 0x300f: /* addv Rm,Rn */
677 {
678 TCGv t0, t1, t2;
679 t0 = tcg_temp_new();
680 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
681 t1 = tcg_temp_new();
682 tcg_gen_xor_i32(t1, t0, REG(B11_8));
683 t2 = tcg_temp_new();
684 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
685 tcg_gen_andc_i32(t1, t1, t2);
686 tcg_temp_free(t2);
687 tcg_gen_shri_i32(t1, t1, 31);
688 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
689 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
690 tcg_temp_free(t1);
691 tcg_gen_mov_i32(REG(B7_4), t0);
692 tcg_temp_free(t0);
693 }
694 return;
695 case 0x2009: /* and Rm,Rn */
696 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
697 return;
698 case 0x3000: /* cmp/eq Rm,Rn */
699 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
700 return;
701 case 0x3003: /* cmp/ge Rm,Rn */
702 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
703 return;
704 case 0x3007: /* cmp/gt Rm,Rn */
705 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
706 return;
707 case 0x3006: /* cmp/hi Rm,Rn */
708 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
709 return;
710 case 0x3002: /* cmp/hs Rm,Rn */
711 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
712 return;
713 case 0x200c: /* cmp/str Rm,Rn */
714 {
715 TCGv cmp1 = tcg_temp_new();
716 TCGv cmp2 = tcg_temp_new();
717 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
718 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
719 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
720 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
721 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
722 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
723 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
724 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
725 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
726 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
727 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
728 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
729 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
730 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
731 tcg_temp_free(cmp2);
732 tcg_temp_free(cmp1);
733 }
734 return;
735 case 0x2007: /* div0s Rm,Rn */
736 {
737 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
738 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
739 TCGv val = tcg_temp_new();
740 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
741 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
742 tcg_temp_free(val);
743 }
744 return;
745 case 0x3004: /* div1 Rm,Rn */
746 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
747 return;
748 case 0x300d: /* dmuls.l Rm,Rn */
749 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
750 return;
751 case 0x3005: /* dmulu.l Rm,Rn */
752 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
753 return;
754 case 0x600e: /* exts.b Rm,Rn */
755 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
756 return;
757 case 0x600f: /* exts.w Rm,Rn */
758 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
759 return;
760 case 0x600c: /* extu.b Rm,Rn */
761 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
762 return;
763 case 0x600d: /* extu.w Rm,Rn */
764 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
765 return;
766 case 0x000f: /* mac.l @Rm+,@Rn+ */
767 {
768 TCGv arg0, arg1;
769 arg0 = tcg_temp_new();
770 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
771 arg1 = tcg_temp_new();
772 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
773 gen_helper_macl(cpu_env, arg0, arg1);
774 tcg_temp_free(arg1);
775 tcg_temp_free(arg0);
776 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
777 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
778 }
779 return;
780 case 0x400f: /* mac.w @Rm+,@Rn+ */
781 {
782 TCGv arg0, arg1;
783 arg0 = tcg_temp_new();
784 tcg_gen_qemu_ld32s(arg0, REG(B7_4), ctx->memidx);
785 arg1 = tcg_temp_new();
786 tcg_gen_qemu_ld32s(arg1, REG(B11_8), ctx->memidx);
787 gen_helper_macw(cpu_env, arg0, arg1);
788 tcg_temp_free(arg1);
789 tcg_temp_free(arg0);
790 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
791 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
792 }
793 return;
794 case 0x0007: /* mul.l Rm,Rn */
795 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
796 return;
797 case 0x200f: /* muls.w Rm,Rn */
798 {
799 TCGv arg0, arg1;
800 arg0 = tcg_temp_new();
801 tcg_gen_ext16s_i32(arg0, REG(B7_4));
802 arg1 = tcg_temp_new();
803 tcg_gen_ext16s_i32(arg1, REG(B11_8));
804 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
805 tcg_temp_free(arg1);
806 tcg_temp_free(arg0);
807 }
808 return;
809 case 0x200e: /* mulu.w Rm,Rn */
810 {
811 TCGv arg0, arg1;
812 arg0 = tcg_temp_new();
813 tcg_gen_ext16u_i32(arg0, REG(B7_4));
814 arg1 = tcg_temp_new();
815 tcg_gen_ext16u_i32(arg1, REG(B11_8));
816 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
817 tcg_temp_free(arg1);
818 tcg_temp_free(arg0);
819 }
820 return;
821 case 0x600b: /* neg Rm,Rn */
822 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
823 return;
824 case 0x600a: /* negc Rm,Rn */
825 {
826 TCGv t0, t1;
827 t0 = tcg_temp_new();
828 tcg_gen_neg_i32(t0, REG(B7_4));
829 t1 = tcg_temp_new();
830 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
831 tcg_gen_sub_i32(REG(B11_8), t0, t1);
832 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
833 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
834 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
835 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
836 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
837 tcg_temp_free(t0);
838 tcg_temp_free(t1);
839 }
840 return;
841 case 0x6007: /* not Rm,Rn */
842 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
843 return;
844 case 0x200b: /* or Rm,Rn */
845 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
846 return;
847 case 0x400c: /* shad Rm,Rn */
848 {
849 int label1 = gen_new_label();
850 int label2 = gen_new_label();
851 int label3 = gen_new_label();
852 int label4 = gen_new_label();
853 TCGv shift;
854 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
855 /* Rm positive, shift to the left */
856 shift = tcg_temp_new();
857 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
858 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
859 tcg_temp_free(shift);
860 tcg_gen_br(label4);
861 /* Rm negative, shift to the right */
862 gen_set_label(label1);
863 shift = tcg_temp_new();
864 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
865 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
866 tcg_gen_not_i32(shift, REG(B7_4));
867 tcg_gen_andi_i32(shift, shift, 0x1f);
868 tcg_gen_addi_i32(shift, shift, 1);
869 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
870 tcg_temp_free(shift);
871 tcg_gen_br(label4);
872 /* Rm = -32 */
873 gen_set_label(label2);
874 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
875 tcg_gen_movi_i32(REG(B11_8), 0);
876 tcg_gen_br(label4);
877 gen_set_label(label3);
878 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
879 gen_set_label(label4);
880 }
881 return;
882 case 0x400d: /* shld Rm,Rn */
883 {
884 int label1 = gen_new_label();
885 int label2 = gen_new_label();
886 int label3 = gen_new_label();
887 TCGv shift;
888 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
889 /* Rm positive, shift to the left */
890 shift = tcg_temp_new();
891 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
892 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
893 tcg_temp_free(shift);
894 tcg_gen_br(label3);
895 /* Rm negative, shift to the right */
896 gen_set_label(label1);
897 shift = tcg_temp_new();
898 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
899 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
900 tcg_gen_not_i32(shift, REG(B7_4));
901 tcg_gen_andi_i32(shift, shift, 0x1f);
902 tcg_gen_addi_i32(shift, shift, 1);
903 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
904 tcg_temp_free(shift);
905 tcg_gen_br(label3);
906 /* Rm = -32 */
907 gen_set_label(label2);
908 tcg_gen_movi_i32(REG(B11_8), 0);
909 gen_set_label(label3);
910 }
911 return;
912 case 0x3008: /* sub Rm,Rn */
913 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
914 return;
915 case 0x300a: /* subc Rm,Rn */
916 {
917 TCGv t0, t1, t2;
918 t0 = tcg_temp_new();
919 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
920 t1 = tcg_temp_new();
921 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
922 tcg_gen_sub_i32(t0, t1, t0);
923 t2 = tcg_temp_new();
924 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
925 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
926 tcg_gen_or_i32(t1, t1, t2);
927 tcg_temp_free(t2);
928 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
929 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
930 tcg_temp_free(t1);
931 tcg_gen_mov_i32(REG(B11_8), t0);
932 tcg_temp_free(t0);
933 }
934 return;
935 case 0x300b: /* subv Rm,Rn */
936 {
937 TCGv t0, t1, t2;
938 t0 = tcg_temp_new();
939 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
940 t1 = tcg_temp_new();
941 tcg_gen_xor_i32(t1, t0, REG(B7_4));
942 t2 = tcg_temp_new();
943 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
944 tcg_gen_and_i32(t1, t1, t2);
945 tcg_temp_free(t2);
946 tcg_gen_shri_i32(t1, t1, 31);
947 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
948 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
949 tcg_temp_free(t1);
950 tcg_gen_mov_i32(REG(B11_8), t0);
951 tcg_temp_free(t0);
952 }
953 return;
954 case 0x2008: /* tst Rm,Rn */
955 {
956 TCGv val = tcg_temp_new();
957 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
958 gen_cmp_imm(TCG_COND_EQ, val, 0);
959 tcg_temp_free(val);
960 }
961 return;
962 case 0x200a: /* xor Rm,Rn */
963 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
964 return;
965 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
966 CHECK_FPU_ENABLED
967 if (ctx->flags & FPSCR_SZ) {
968 TCGv_i64 fp = tcg_temp_new_i64();
969 gen_load_fpr64(fp, XREG(B7_4));
970 gen_store_fpr64(fp, XREG(B11_8));
971 tcg_temp_free_i64(fp);
972 } else {
973 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
974 }
975 return;
976 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
977 CHECK_FPU_ENABLED
978 if (ctx->flags & FPSCR_SZ) {
979 TCGv addr_hi = tcg_temp_new();
980 int fr = XREG(B7_4);
981 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
982 tcg_gen_qemu_st32(cpu_fregs[fr ], REG(B11_8), ctx->memidx);
983 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr_hi, ctx->memidx);
984 tcg_temp_free(addr_hi);
985 } else {
986 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], REG(B11_8), ctx->memidx);
987 }
988 return;
989 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
990 CHECK_FPU_ENABLED
991 if (ctx->flags & FPSCR_SZ) {
992 TCGv addr_hi = tcg_temp_new();
993 int fr = XREG(B11_8);
994 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
995 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
996 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
997 tcg_temp_free(addr_hi);
998 } else {
999 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1000 }
1001 return;
1002 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1003 CHECK_FPU_ENABLED
1004 if (ctx->flags & FPSCR_SZ) {
1005 TCGv addr_hi = tcg_temp_new();
1006 int fr = XREG(B11_8);
1007 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1008 tcg_gen_qemu_ld32u(cpu_fregs[fr ], REG(B7_4), ctx->memidx);
1009 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr_hi, ctx->memidx);
1010 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1011 tcg_temp_free(addr_hi);
1012 } else {
1013 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], REG(B7_4), ctx->memidx);
1014 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1015 }
1016 return;
1017 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1018 CHECK_FPU_ENABLED
1019 if (ctx->flags & FPSCR_SZ) {
1020 TCGv addr = tcg_temp_new_i32();
1021 int fr = XREG(B7_4);
1022 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1023 tcg_gen_qemu_st32(cpu_fregs[fr+1], addr, ctx->memidx);
1024 tcg_gen_subi_i32(addr, addr, 4);
1025 tcg_gen_qemu_st32(cpu_fregs[fr ], addr, ctx->memidx);
1026 tcg_gen_mov_i32(REG(B11_8), addr);
1027 tcg_temp_free(addr);
1028 } else {
1029 TCGv addr;
1030 addr = tcg_temp_new_i32();
1031 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1032 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1033 tcg_gen_mov_i32(REG(B11_8), addr);
1034 tcg_temp_free(addr);
1035 }
1036 return;
1037 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1038 CHECK_FPU_ENABLED
1039 {
1040 TCGv addr = tcg_temp_new_i32();
1041 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1042 if (ctx->flags & FPSCR_SZ) {
1043 int fr = XREG(B11_8);
1044 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1045 tcg_gen_addi_i32(addr, addr, 4);
1046 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1047 } else {
1048 tcg_gen_qemu_ld32u(cpu_fregs[FREG(B11_8)], addr, ctx->memidx);
1049 }
1050 tcg_temp_free(addr);
1051 }
1052 return;
1053 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1054 CHECK_FPU_ENABLED
1055 {
1056 TCGv addr = tcg_temp_new();
1057 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1058 if (ctx->flags & FPSCR_SZ) {
1059 int fr = XREG(B7_4);
1060 tcg_gen_qemu_ld32u(cpu_fregs[fr ], addr, ctx->memidx);
1061 tcg_gen_addi_i32(addr, addr, 4);
1062 tcg_gen_qemu_ld32u(cpu_fregs[fr+1], addr, ctx->memidx);
1063 } else {
1064 tcg_gen_qemu_st32(cpu_fregs[FREG(B7_4)], addr, ctx->memidx);
1065 }
1066 tcg_temp_free(addr);
1067 }
1068 return;
1069 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1070 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1071 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1072 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1073 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1074 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1075 {
1076 CHECK_FPU_ENABLED
1077 if (ctx->flags & FPSCR_PR) {
1078 TCGv_i64 fp0, fp1;
1079
1080 if (ctx->opcode & 0x0110)
1081 break; /* illegal instruction */
1082 fp0 = tcg_temp_new_i64();
1083 fp1 = tcg_temp_new_i64();
1084 gen_load_fpr64(fp0, DREG(B11_8));
1085 gen_load_fpr64(fp1, DREG(B7_4));
1086 switch (ctx->opcode & 0xf00f) {
1087 case 0xf000: /* fadd Rm,Rn */
1088 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1089 break;
1090 case 0xf001: /* fsub Rm,Rn */
1091 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1092 break;
1093 case 0xf002: /* fmul Rm,Rn */
1094 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1095 break;
1096 case 0xf003: /* fdiv Rm,Rn */
1097 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1098 break;
1099 case 0xf004: /* fcmp/eq Rm,Rn */
1100 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1101 return;
1102 case 0xf005: /* fcmp/gt Rm,Rn */
1103 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1104 return;
1105 }
1106 gen_store_fpr64(fp0, DREG(B11_8));
1107 tcg_temp_free_i64(fp0);
1108 tcg_temp_free_i64(fp1);
1109 } else {
1110 switch (ctx->opcode & 0xf00f) {
1111 case 0xf000: /* fadd Rm,Rn */
1112 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1113 cpu_fregs[FREG(B11_8)],
1114 cpu_fregs[FREG(B7_4)]);
1115 break;
1116 case 0xf001: /* fsub Rm,Rn */
1117 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1118 cpu_fregs[FREG(B11_8)],
1119 cpu_fregs[FREG(B7_4)]);
1120 break;
1121 case 0xf002: /* fmul Rm,Rn */
1122 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1123 cpu_fregs[FREG(B11_8)],
1124 cpu_fregs[FREG(B7_4)]);
1125 break;
1126 case 0xf003: /* fdiv Rm,Rn */
1127 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1128 cpu_fregs[FREG(B11_8)],
1129 cpu_fregs[FREG(B7_4)]);
1130 break;
1131 case 0xf004: /* fcmp/eq Rm,Rn */
1132 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1133 cpu_fregs[FREG(B7_4)]);
1134 return;
1135 case 0xf005: /* fcmp/gt Rm,Rn */
1136 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1137 cpu_fregs[FREG(B7_4)]);
1138 return;
1139 }
1140 }
1141 }
1142 return;
1143 case 0xf00e: /* fmac FR0,RM,Rn */
1144 {
1145 CHECK_FPU_ENABLED
1146 if (ctx->flags & FPSCR_PR) {
1147 break; /* illegal instruction */
1148 } else {
1149 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1150 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1151 cpu_fregs[FREG(B11_8)]);
1152 return;
1153 }
1154 }
1155 }
1156
1157 switch (ctx->opcode & 0xff00) {
1158 case 0xc900: /* and #imm,R0 */
1159 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1160 return;
1161 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1162 {
1163 TCGv addr, val;
1164 addr = tcg_temp_new();
1165 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1166 val = tcg_temp_new();
1167 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1168 tcg_gen_andi_i32(val, val, B7_0);
1169 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1170 tcg_temp_free(val);
1171 tcg_temp_free(addr);
1172 }
1173 return;
1174 case 0x8b00: /* bf label */
1175 CHECK_NOT_DELAY_SLOT
1176 gen_conditional_jump(ctx, ctx->pc + 2,
1177 ctx->pc + 4 + B7_0s * 2);
1178 ctx->bstate = BS_BRANCH;
1179 return;
1180 case 0x8f00: /* bf/s label */
1181 CHECK_NOT_DELAY_SLOT
1182 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1183 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1184 return;
1185 case 0x8900: /* bt label */
1186 CHECK_NOT_DELAY_SLOT
1187 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1188 ctx->pc + 2);
1189 ctx->bstate = BS_BRANCH;
1190 return;
1191 case 0x8d00: /* bt/s label */
1192 CHECK_NOT_DELAY_SLOT
1193 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1194 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1195 return;
1196 case 0x8800: /* cmp/eq #imm,R0 */
1197 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1198 return;
1199 case 0xc400: /* mov.b @(disp,GBR),R0 */
1200 {
1201 TCGv addr = tcg_temp_new();
1202 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1203 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1204 tcg_temp_free(addr);
1205 }
1206 return;
1207 case 0xc500: /* mov.w @(disp,GBR),R0 */
1208 {
1209 TCGv addr = tcg_temp_new();
1210 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1211 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1212 tcg_temp_free(addr);
1213 }
1214 return;
1215 case 0xc600: /* mov.l @(disp,GBR),R0 */
1216 {
1217 TCGv addr = tcg_temp_new();
1218 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1219 tcg_gen_qemu_ld32s(REG(0), addr, ctx->memidx);
1220 tcg_temp_free(addr);
1221 }
1222 return;
1223 case 0xc000: /* mov.b R0,@(disp,GBR) */
1224 {
1225 TCGv addr = tcg_temp_new();
1226 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1227 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1228 tcg_temp_free(addr);
1229 }
1230 return;
1231 case 0xc100: /* mov.w R0,@(disp,GBR) */
1232 {
1233 TCGv addr = tcg_temp_new();
1234 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1235 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1236 tcg_temp_free(addr);
1237 }
1238 return;
1239 case 0xc200: /* mov.l R0,@(disp,GBR) */
1240 {
1241 TCGv addr = tcg_temp_new();
1242 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1243 tcg_gen_qemu_st32(REG(0), addr, ctx->memidx);
1244 tcg_temp_free(addr);
1245 }
1246 return;
1247 case 0x8000: /* mov.b R0,@(disp,Rn) */
1248 {
1249 TCGv addr = tcg_temp_new();
1250 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1251 tcg_gen_qemu_st8(REG(0), addr, ctx->memidx);
1252 tcg_temp_free(addr);
1253 }
1254 return;
1255 case 0x8100: /* mov.w R0,@(disp,Rn) */
1256 {
1257 TCGv addr = tcg_temp_new();
1258 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1259 tcg_gen_qemu_st16(REG(0), addr, ctx->memidx);
1260 tcg_temp_free(addr);
1261 }
1262 return;
1263 case 0x8400: /* mov.b @(disp,Rn),R0 */
1264 {
1265 TCGv addr = tcg_temp_new();
1266 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1267 tcg_gen_qemu_ld8s(REG(0), addr, ctx->memidx);
1268 tcg_temp_free(addr);
1269 }
1270 return;
1271 case 0x8500: /* mov.w @(disp,Rn),R0 */
1272 {
1273 TCGv addr = tcg_temp_new();
1274 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1275 tcg_gen_qemu_ld16s(REG(0), addr, ctx->memidx);
1276 tcg_temp_free(addr);
1277 }
1278 return;
1279 case 0xc700: /* mova @(disp,PC),R0 */
1280 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1281 return;
1282 case 0xcb00: /* or #imm,R0 */
1283 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1284 return;
1285 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1286 {
1287 TCGv addr, val;
1288 addr = tcg_temp_new();
1289 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1290 val = tcg_temp_new();
1291 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1292 tcg_gen_ori_i32(val, val, B7_0);
1293 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1294 tcg_temp_free(val);
1295 tcg_temp_free(addr);
1296 }
1297 return;
1298 case 0xc300: /* trapa #imm */
1299 {
1300 TCGv imm;
1301 CHECK_NOT_DELAY_SLOT
1302 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1303 imm = tcg_const_i32(B7_0);
1304 gen_helper_trapa(cpu_env, imm);
1305 tcg_temp_free(imm);
1306 ctx->bstate = BS_BRANCH;
1307 }
1308 return;
1309 case 0xc800: /* tst #imm,R0 */
1310 {
1311 TCGv val = tcg_temp_new();
1312 tcg_gen_andi_i32(val, REG(0), B7_0);
1313 gen_cmp_imm(TCG_COND_EQ, val, 0);
1314 tcg_temp_free(val);
1315 }
1316 return;
1317 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1318 {
1319 TCGv val = tcg_temp_new();
1320 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1321 tcg_gen_qemu_ld8u(val, val, ctx->memidx);
1322 tcg_gen_andi_i32(val, val, B7_0);
1323 gen_cmp_imm(TCG_COND_EQ, val, 0);
1324 tcg_temp_free(val);
1325 }
1326 return;
1327 case 0xca00: /* xor #imm,R0 */
1328 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1329 return;
1330 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1331 {
1332 TCGv addr, val;
1333 addr = tcg_temp_new();
1334 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1335 val = tcg_temp_new();
1336 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1337 tcg_gen_xori_i32(val, val, B7_0);
1338 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1339 tcg_temp_free(val);
1340 tcg_temp_free(addr);
1341 }
1342 return;
1343 }
1344
1345 switch (ctx->opcode & 0xf08f) {
1346 case 0x408e: /* ldc Rm,Rn_BANK */
1347 CHECK_PRIVILEGED
1348 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1349 return;
1350 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1351 CHECK_PRIVILEGED
1352 tcg_gen_qemu_ld32s(ALTREG(B6_4), REG(B11_8), ctx->memidx);
1353 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1354 return;
1355 case 0x0082: /* stc Rm_BANK,Rn */
1356 CHECK_PRIVILEGED
1357 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1358 return;
1359 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1360 CHECK_PRIVILEGED
1361 {
1362 TCGv addr = tcg_temp_new();
1363 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1364 tcg_gen_qemu_st32(ALTREG(B6_4), addr, ctx->memidx);
1365 tcg_gen_mov_i32(REG(B11_8), addr);
1366 tcg_temp_free(addr);
1367 }
1368 return;
1369 }
1370
1371 switch (ctx->opcode & 0xf0ff) {
1372 case 0x0023: /* braf Rn */
1373 CHECK_NOT_DELAY_SLOT
1374 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1375 ctx->flags |= DELAY_SLOT;
1376 ctx->delayed_pc = (uint32_t) - 1;
1377 return;
1378 case 0x0003: /* bsrf Rn */
1379 CHECK_NOT_DELAY_SLOT
1380 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1381 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1382 ctx->flags |= DELAY_SLOT;
1383 ctx->delayed_pc = (uint32_t) - 1;
1384 return;
1385 case 0x4015: /* cmp/pl Rn */
1386 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1387 return;
1388 case 0x4011: /* cmp/pz Rn */
1389 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1390 return;
1391 case 0x4010: /* dt Rn */
1392 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1393 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1394 return;
1395 case 0x402b: /* jmp @Rn */
1396 CHECK_NOT_DELAY_SLOT
1397 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1398 ctx->flags |= DELAY_SLOT;
1399 ctx->delayed_pc = (uint32_t) - 1;
1400 return;
1401 case 0x400b: /* jsr @Rn */
1402 CHECK_NOT_DELAY_SLOT
1403 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1404 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1405 ctx->flags |= DELAY_SLOT;
1406 ctx->delayed_pc = (uint32_t) - 1;
1407 return;
1408 case 0x400e: /* ldc Rm,SR */
1409 CHECK_PRIVILEGED
1410 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1411 ctx->bstate = BS_STOP;
1412 return;
1413 case 0x4007: /* ldc.l @Rm+,SR */
1414 CHECK_PRIVILEGED
1415 {
1416 TCGv val = tcg_temp_new();
1417 tcg_gen_qemu_ld32s(val, REG(B11_8), ctx->memidx);
1418 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1419 tcg_temp_free(val);
1420 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1421 ctx->bstate = BS_STOP;
1422 }
1423 return;
1424 case 0x0002: /* stc SR,Rn */
1425 CHECK_PRIVILEGED
1426 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1427 return;
1428 case 0x4003: /* stc SR,@-Rn */
1429 CHECK_PRIVILEGED
1430 {
1431 TCGv addr = tcg_temp_new();
1432 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1433 tcg_gen_qemu_st32(cpu_sr, addr, ctx->memidx);
1434 tcg_gen_mov_i32(REG(B11_8), addr);
1435 tcg_temp_free(addr);
1436 }
1437 return;
1438 #define LD(reg,ldnum,ldpnum,prechk) \
1439 case ldnum: \
1440 prechk \
1441 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1442 return; \
1443 case ldpnum: \
1444 prechk \
1445 tcg_gen_qemu_ld32s (cpu_##reg, REG(B11_8), ctx->memidx); \
1446 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1447 return;
1448 #define ST(reg,stnum,stpnum,prechk) \
1449 case stnum: \
1450 prechk \
1451 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1452 return; \
1453 case stpnum: \
1454 prechk \
1455 { \
1456 TCGv addr = tcg_temp_new(); \
1457 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1458 tcg_gen_qemu_st32 (cpu_##reg, addr, ctx->memidx); \
1459 tcg_gen_mov_i32(REG(B11_8), addr); \
1460 tcg_temp_free(addr); \
1461 } \
1462 return;
1463 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1464 LD(reg,ldnum,ldpnum,prechk) \
1465 ST(reg,stnum,stpnum,prechk)
1466 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1467 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1468 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1469 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1470 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1471 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1472 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1473 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1474 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1475 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1476 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1477 case 0x406a: /* lds Rm,FPSCR */
1478 CHECK_FPU_ENABLED
1479 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1480 ctx->bstate = BS_STOP;
1481 return;
1482 case 0x4066: /* lds.l @Rm+,FPSCR */
1483 CHECK_FPU_ENABLED
1484 {
1485 TCGv addr = tcg_temp_new();
1486 tcg_gen_qemu_ld32s(addr, REG(B11_8), ctx->memidx);
1487 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1488 gen_helper_ld_fpscr(cpu_env, addr);
1489 tcg_temp_free(addr);
1490 ctx->bstate = BS_STOP;
1491 }
1492 return;
1493 case 0x006a: /* sts FPSCR,Rn */
1494 CHECK_FPU_ENABLED
1495 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1496 return;
1497 case 0x4062: /* sts FPSCR,@-Rn */
1498 CHECK_FPU_ENABLED
1499 {
1500 TCGv addr, val;
1501 val = tcg_temp_new();
1502 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1503 addr = tcg_temp_new();
1504 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1505 tcg_gen_qemu_st32(val, addr, ctx->memidx);
1506 tcg_gen_mov_i32(REG(B11_8), addr);
1507 tcg_temp_free(addr);
1508 tcg_temp_free(val);
1509 }
1510 return;
1511 case 0x00c3: /* movca.l R0,@Rm */
1512 {
1513 TCGv val = tcg_temp_new();
1514 tcg_gen_qemu_ld32u(val, REG(B11_8), ctx->memidx);
1515 gen_helper_movcal(cpu_env, REG(B11_8), val);
1516 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1517 }
1518 ctx->has_movcal = 1;
1519 return;
1520 case 0x40a9:
1521 /* MOVUA.L @Rm,R0 (Rm) -> R0
1522 Load non-boundary-aligned data */
1523 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1524 return;
1525 case 0x40e9:
1526 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1527 Load non-boundary-aligned data */
1528 tcg_gen_qemu_ld32u(REG(0), REG(B11_8), ctx->memidx);
1529 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1530 return;
1531 case 0x0029: /* movt Rn */
1532 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1533 return;
1534 case 0x0073:
1535 /* MOVCO.L
1536 LDST -> T
1537 If (T == 1) R0 -> (Rn)
1538 0 -> LDST
1539 */
1540 if (ctx->features & SH_FEATURE_SH4A) {
1541 int label = gen_new_label();
1542 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1543 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1544 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1545 tcg_gen_qemu_st32(REG(0), REG(B11_8), ctx->memidx);
1546 gen_set_label(label);
1547 tcg_gen_movi_i32(cpu_ldst, 0);
1548 return;
1549 } else
1550 break;
1551 case 0x0063:
1552 /* MOVLI.L @Rm,R0
1553 1 -> LDST
1554 (Rm) -> R0
1555 When interrupt/exception
1556 occurred 0 -> LDST
1557 */
1558 if (ctx->features & SH_FEATURE_SH4A) {
1559 tcg_gen_movi_i32(cpu_ldst, 0);
1560 tcg_gen_qemu_ld32s(REG(0), REG(B11_8), ctx->memidx);
1561 tcg_gen_movi_i32(cpu_ldst, 1);
1562 return;
1563 } else
1564 break;
1565 case 0x0093: /* ocbi @Rn */
1566 {
1567 gen_helper_ocbi(cpu_env, REG(B11_8));
1568 }
1569 return;
1570 case 0x00a3: /* ocbp @Rn */
1571 case 0x00b3: /* ocbwb @Rn */
1572 /* These instructions are supposed to do nothing in case of
1573 a cache miss. Given that we only partially emulate caches
1574 it is safe to simply ignore them. */
1575 return;
1576 case 0x0083: /* pref @Rn */
1577 return;
1578 case 0x00d3: /* prefi @Rn */
1579 if (ctx->features & SH_FEATURE_SH4A)
1580 return;
1581 else
1582 break;
1583 case 0x00e3: /* icbi @Rn */
1584 if (ctx->features & SH_FEATURE_SH4A)
1585 return;
1586 else
1587 break;
1588 case 0x00ab: /* synco */
1589 if (ctx->features & SH_FEATURE_SH4A)
1590 return;
1591 else
1592 break;
1593 case 0x4024: /* rotcl Rn */
1594 {
1595 TCGv tmp = tcg_temp_new();
1596 tcg_gen_mov_i32(tmp, cpu_sr);
1597 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1598 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1599 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1600 tcg_temp_free(tmp);
1601 }
1602 return;
1603 case 0x4025: /* rotcr Rn */
1604 {
1605 TCGv tmp = tcg_temp_new();
1606 tcg_gen_mov_i32(tmp, cpu_sr);
1607 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1608 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1609 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1610 tcg_temp_free(tmp);
1611 }
1612 return;
1613 case 0x4004: /* rotl Rn */
1614 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1615 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1616 return;
1617 case 0x4005: /* rotr Rn */
1618 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1619 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1620 return;
1621 case 0x4000: /* shll Rn */
1622 case 0x4020: /* shal Rn */
1623 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1624 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1625 return;
1626 case 0x4021: /* shar Rn */
1627 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1628 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1629 return;
1630 case 0x4001: /* shlr Rn */
1631 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1632 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1633 return;
1634 case 0x4008: /* shll2 Rn */
1635 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1636 return;
1637 case 0x4018: /* shll8 Rn */
1638 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1639 return;
1640 case 0x4028: /* shll16 Rn */
1641 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1642 return;
1643 case 0x4009: /* shlr2 Rn */
1644 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1645 return;
1646 case 0x4019: /* shlr8 Rn */
1647 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1648 return;
1649 case 0x4029: /* shlr16 Rn */
1650 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1651 return;
1652 case 0x401b: /* tas.b @Rn */
1653 {
1654 TCGv addr, val;
1655 addr = tcg_temp_local_new();
1656 tcg_gen_mov_i32(addr, REG(B11_8));
1657 val = tcg_temp_local_new();
1658 tcg_gen_qemu_ld8u(val, addr, ctx->memidx);
1659 gen_cmp_imm(TCG_COND_EQ, val, 0);
1660 tcg_gen_ori_i32(val, val, 0x80);
1661 tcg_gen_qemu_st8(val, addr, ctx->memidx);
1662 tcg_temp_free(val);
1663 tcg_temp_free(addr);
1664 }
1665 return;
1666 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1667 CHECK_FPU_ENABLED
1668 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1669 return;
1670 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1671 CHECK_FPU_ENABLED
1672 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1673 return;
1674 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1675 CHECK_FPU_ENABLED
1676 if (ctx->flags & FPSCR_PR) {
1677 TCGv_i64 fp;
1678 if (ctx->opcode & 0x0100)
1679 break; /* illegal instruction */
1680 fp = tcg_temp_new_i64();
1681 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1682 gen_store_fpr64(fp, DREG(B11_8));
1683 tcg_temp_free_i64(fp);
1684 }
1685 else {
1686 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1687 }
1688 return;
1689 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1690 CHECK_FPU_ENABLED
1691 if (ctx->flags & FPSCR_PR) {
1692 TCGv_i64 fp;
1693 if (ctx->opcode & 0x0100)
1694 break; /* illegal instruction */
1695 fp = tcg_temp_new_i64();
1696 gen_load_fpr64(fp, DREG(B11_8));
1697 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1698 tcg_temp_free_i64(fp);
1699 }
1700 else {
1701 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1702 }
1703 return;
1704 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1705 CHECK_FPU_ENABLED
1706 {
1707 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1708 }
1709 return;
1710 case 0xf05d: /* fabs FRn/DRn */
1711 CHECK_FPU_ENABLED
1712 if (ctx->flags & FPSCR_PR) {
1713 if (ctx->opcode & 0x0100)
1714 break; /* illegal instruction */
1715 TCGv_i64 fp = tcg_temp_new_i64();
1716 gen_load_fpr64(fp, DREG(B11_8));
1717 gen_helper_fabs_DT(fp, fp);
1718 gen_store_fpr64(fp, DREG(B11_8));
1719 tcg_temp_free_i64(fp);
1720 } else {
1721 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1722 }
1723 return;
1724 case 0xf06d: /* fsqrt FRn */
1725 CHECK_FPU_ENABLED
1726 if (ctx->flags & FPSCR_PR) {
1727 if (ctx->opcode & 0x0100)
1728 break; /* illegal instruction */
1729 TCGv_i64 fp = tcg_temp_new_i64();
1730 gen_load_fpr64(fp, DREG(B11_8));
1731 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1732 gen_store_fpr64(fp, DREG(B11_8));
1733 tcg_temp_free_i64(fp);
1734 } else {
1735 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1736 cpu_fregs[FREG(B11_8)]);
1737 }
1738 return;
1739 case 0xf07d: /* fsrra FRn */
1740 CHECK_FPU_ENABLED
1741 break;
1742 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1743 CHECK_FPU_ENABLED
1744 if (!(ctx->flags & FPSCR_PR)) {
1745 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1746 }
1747 return;
1748 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1749 CHECK_FPU_ENABLED
1750 if (!(ctx->flags & FPSCR_PR)) {
1751 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1752 }
1753 return;
1754 case 0xf0ad: /* fcnvsd FPUL,DRn */
1755 CHECK_FPU_ENABLED
1756 {
1757 TCGv_i64 fp = tcg_temp_new_i64();
1758 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1759 gen_store_fpr64(fp, DREG(B11_8));
1760 tcg_temp_free_i64(fp);
1761 }
1762 return;
1763 case 0xf0bd: /* fcnvds DRn,FPUL */
1764 CHECK_FPU_ENABLED
1765 {
1766 TCGv_i64 fp = tcg_temp_new_i64();
1767 gen_load_fpr64(fp, DREG(B11_8));
1768 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1769 tcg_temp_free_i64(fp);
1770 }
1771 return;
1772 case 0xf0ed: /* fipr FVm,FVn */
1773 CHECK_FPU_ENABLED
1774 if ((ctx->flags & FPSCR_PR) == 0) {
1775 TCGv m, n;
1776 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1777 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1778 gen_helper_fipr(cpu_env, m, n);
1779 tcg_temp_free(m);
1780 tcg_temp_free(n);
1781 return;
1782 }
1783 break;
1784 case 0xf0fd: /* ftrv XMTRX,FVn */
1785 CHECK_FPU_ENABLED
1786 if ((ctx->opcode & 0x0300) == 0x0100 &&
1787 (ctx->flags & FPSCR_PR) == 0) {
1788 TCGv n;
1789 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1790 gen_helper_ftrv(cpu_env, n);
1791 tcg_temp_free(n);
1792 return;
1793 }
1794 break;
1795 }
1796 #if 0
1797 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1798 ctx->opcode, ctx->pc);
1799 fflush(stderr);
1800 #endif
1801 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1802 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1803 gen_helper_raise_slot_illegal_instruction(cpu_env);
1804 } else {
1805 gen_helper_raise_illegal_instruction(cpu_env);
1806 }
1807 ctx->bstate = BS_BRANCH;
1808 }
1809
1810 static void decode_opc(DisasContext * ctx)
1811 {
1812 uint32_t old_flags = ctx->flags;
1813
1814 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1815 tcg_gen_debug_insn_start(ctx->pc);
1816 }
1817
1818 _decode_opc(ctx);
1819
1820 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1821 if (ctx->flags & DELAY_SLOT_CLEARME) {
1822 gen_store_flags(0);
1823 } else {
1824 /* go out of the delay slot */
1825 uint32_t new_flags = ctx->flags;
1826 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1827 gen_store_flags(new_flags);
1828 }
1829 ctx->flags = 0;
1830 ctx->bstate = BS_BRANCH;
1831 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1832 gen_delayed_conditional_jump(ctx);
1833 } else if (old_flags & DELAY_SLOT) {
1834 gen_jump(ctx);
1835 }
1836
1837 }
1838
1839 /* go into a delay slot */
1840 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1841 gen_store_flags(ctx->flags);
1842 }
1843
1844 static inline void
1845 gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
1846 bool search_pc)
1847 {
1848 CPUState *cs = CPU(cpu);
1849 CPUSH4State *env = &cpu->env;
1850 DisasContext ctx;
1851 target_ulong pc_start;
1852 static uint16_t *gen_opc_end;
1853 CPUBreakpoint *bp;
1854 int i, ii;
1855 int num_insns;
1856 int max_insns;
1857
1858 pc_start = tb->pc;
1859 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1860 ctx.pc = pc_start;
1861 ctx.flags = (uint32_t)tb->flags;
1862 ctx.bstate = BS_NONE;
1863 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1864 /* We don't know if the delayed pc came from a dynamic or static branch,
1865 so assume it is a dynamic branch. */
1866 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1867 ctx.tb = tb;
1868 ctx.singlestep_enabled = cs->singlestep_enabled;
1869 ctx.features = env->features;
1870 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1871
1872 ii = -1;
1873 num_insns = 0;
1874 max_insns = tb->cflags & CF_COUNT_MASK;
1875 if (max_insns == 0)
1876 max_insns = CF_COUNT_MASK;
1877 gen_tb_start();
1878 while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
1879 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
1880 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1881 if (ctx.pc == bp->pc) {
1882 /* We have hit a breakpoint - make sure PC is up-to-date */
1883 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1884 gen_helper_debug(cpu_env);
1885 ctx.bstate = BS_BRANCH;
1886 break;
1887 }
1888 }
1889 }
1890 if (search_pc) {
1891 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1892 if (ii < i) {
1893 ii++;
1894 while (ii < i)
1895 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1896 }
1897 tcg_ctx.gen_opc_pc[ii] = ctx.pc;
1898 gen_opc_hflags[ii] = ctx.flags;
1899 tcg_ctx.gen_opc_instr_start[ii] = 1;
1900 tcg_ctx.gen_opc_icount[ii] = num_insns;
1901 }
1902 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1903 gen_io_start();
1904 #if 0
1905 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1906 fflush(stderr);
1907 #endif
1908 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1909 decode_opc(&ctx);
1910 num_insns++;
1911 ctx.pc += 2;
1912 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1913 break;
1914 if (cs->singlestep_enabled) {
1915 break;
1916 }
1917 if (num_insns >= max_insns)
1918 break;
1919 if (singlestep)
1920 break;
1921 }
1922 if (tb->cflags & CF_LAST_IO)
1923 gen_io_end();
1924 if (cs->singlestep_enabled) {
1925 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1926 gen_helper_debug(cpu_env);
1927 } else {
1928 switch (ctx.bstate) {
1929 case BS_STOP:
1930 /* gen_op_interrupt_restart(); */
1931 /* fall through */
1932 case BS_NONE:
1933 if (ctx.flags) {
1934 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1935 }
1936 gen_goto_tb(&ctx, 0, ctx.pc);
1937 break;
1938 case BS_EXCP:
1939 /* gen_op_interrupt_restart(); */
1940 tcg_gen_exit_tb(0);
1941 break;
1942 case BS_BRANCH:
1943 default:
1944 break;
1945 }
1946 }
1947
1948 gen_tb_end(tb, num_insns);
1949 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1950 if (search_pc) {
1951 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1952 ii++;
1953 while (ii <= i)
1954 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1955 } else {
1956 tb->size = ctx.pc - pc_start;
1957 tb->icount = num_insns;
1958 }
1959
1960 #ifdef DEBUG_DISAS
1961 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1962 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1963 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
1964 qemu_log("\n");
1965 }
1966 #endif
1967 }
1968
1969 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1970 {
1971 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, false);
1972 }
1973
1974 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
1975 {
1976 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, true);
1977 }
1978
1979 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
1980 {
1981 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1982 env->flags = gen_opc_hflags[pc_pos];
1983 }