]> git.proxmox.com Git - mirror_qemu.git/blob - target-sh4/translate.c
169c87fc1b02ecb256e4a04b12c1d5ea437fedb5
[mirror_qemu.git] / target-sh4 / translate.c
1 /*
2 * SH4 translation
3 *
4 * Copyright (c) 2005 Samuel Tardieu
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #define DEBUG_DISAS
21 //#define SH4_SINGLE_STEP
22
23 #include "cpu.h"
24 #include "disas/disas.h"
25 #include "tcg-op.h"
26
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29
30 typedef struct DisasContext {
31 struct TranslationBlock *tb;
32 target_ulong pc;
33 uint16_t opcode;
34 uint32_t flags;
35 int bstate;
36 int memidx;
37 uint32_t delayed_pc;
38 int singlestep_enabled;
39 uint32_t features;
40 int has_movcal;
41 } DisasContext;
42
43 #if defined(CONFIG_USER_ONLY)
44 #define IS_USER(ctx) 1
45 #else
46 #define IS_USER(ctx) (!(ctx->flags & SR_MD))
47 #endif
48
49 enum {
50 BS_NONE = 0, /* We go out of the TB without reaching a branch or an
51 * exception condition
52 */
53 BS_STOP = 1, /* We want to stop translation for any reason */
54 BS_BRANCH = 2, /* We reached a branch condition */
55 BS_EXCP = 3, /* We reached an exception condition */
56 };
57
58 /* global register indexes */
59 static TCGv_ptr cpu_env;
60 static TCGv cpu_gregs[24];
61 static TCGv cpu_pc, cpu_sr, cpu_ssr, cpu_spc, cpu_gbr;
62 static TCGv cpu_vbr, cpu_sgr, cpu_dbr, cpu_mach, cpu_macl;
63 static TCGv cpu_pr, cpu_fpscr, cpu_fpul, cpu_ldst;
64 static TCGv cpu_fregs[32];
65
66 /* internal register indexes */
67 static TCGv cpu_flags, cpu_delayed_pc;
68
69 static uint32_t gen_opc_hflags[OPC_BUF_SIZE];
70
71 #include "exec/gen-icount.h"
72
73 void sh4_translate_init(void)
74 {
75 int i;
76 static int done_init = 0;
77 static const char * const gregnames[24] = {
78 "R0_BANK0", "R1_BANK0", "R2_BANK0", "R3_BANK0",
79 "R4_BANK0", "R5_BANK0", "R6_BANK0", "R7_BANK0",
80 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15",
81 "R0_BANK1", "R1_BANK1", "R2_BANK1", "R3_BANK1",
82 "R4_BANK1", "R5_BANK1", "R6_BANK1", "R7_BANK1"
83 };
84 static const char * const fregnames[32] = {
85 "FPR0_BANK0", "FPR1_BANK0", "FPR2_BANK0", "FPR3_BANK0",
86 "FPR4_BANK0", "FPR5_BANK0", "FPR6_BANK0", "FPR7_BANK0",
87 "FPR8_BANK0", "FPR9_BANK0", "FPR10_BANK0", "FPR11_BANK0",
88 "FPR12_BANK0", "FPR13_BANK0", "FPR14_BANK0", "FPR15_BANK0",
89 "FPR0_BANK1", "FPR1_BANK1", "FPR2_BANK1", "FPR3_BANK1",
90 "FPR4_BANK1", "FPR5_BANK1", "FPR6_BANK1", "FPR7_BANK1",
91 "FPR8_BANK1", "FPR9_BANK1", "FPR10_BANK1", "FPR11_BANK1",
92 "FPR12_BANK1", "FPR13_BANK1", "FPR14_BANK1", "FPR15_BANK1",
93 };
94
95 if (done_init)
96 return;
97
98 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
99
100 for (i = 0; i < 24; i++)
101 cpu_gregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
102 offsetof(CPUSH4State, gregs[i]),
103 gregnames[i]);
104
105 cpu_pc = tcg_global_mem_new_i32(TCG_AREG0,
106 offsetof(CPUSH4State, pc), "PC");
107 cpu_sr = tcg_global_mem_new_i32(TCG_AREG0,
108 offsetof(CPUSH4State, sr), "SR");
109 cpu_ssr = tcg_global_mem_new_i32(TCG_AREG0,
110 offsetof(CPUSH4State, ssr), "SSR");
111 cpu_spc = tcg_global_mem_new_i32(TCG_AREG0,
112 offsetof(CPUSH4State, spc), "SPC");
113 cpu_gbr = tcg_global_mem_new_i32(TCG_AREG0,
114 offsetof(CPUSH4State, gbr), "GBR");
115 cpu_vbr = tcg_global_mem_new_i32(TCG_AREG0,
116 offsetof(CPUSH4State, vbr), "VBR");
117 cpu_sgr = tcg_global_mem_new_i32(TCG_AREG0,
118 offsetof(CPUSH4State, sgr), "SGR");
119 cpu_dbr = tcg_global_mem_new_i32(TCG_AREG0,
120 offsetof(CPUSH4State, dbr), "DBR");
121 cpu_mach = tcg_global_mem_new_i32(TCG_AREG0,
122 offsetof(CPUSH4State, mach), "MACH");
123 cpu_macl = tcg_global_mem_new_i32(TCG_AREG0,
124 offsetof(CPUSH4State, macl), "MACL");
125 cpu_pr = tcg_global_mem_new_i32(TCG_AREG0,
126 offsetof(CPUSH4State, pr), "PR");
127 cpu_fpscr = tcg_global_mem_new_i32(TCG_AREG0,
128 offsetof(CPUSH4State, fpscr), "FPSCR");
129 cpu_fpul = tcg_global_mem_new_i32(TCG_AREG0,
130 offsetof(CPUSH4State, fpul), "FPUL");
131
132 cpu_flags = tcg_global_mem_new_i32(TCG_AREG0,
133 offsetof(CPUSH4State, flags), "_flags_");
134 cpu_delayed_pc = tcg_global_mem_new_i32(TCG_AREG0,
135 offsetof(CPUSH4State, delayed_pc),
136 "_delayed_pc_");
137 cpu_ldst = tcg_global_mem_new_i32(TCG_AREG0,
138 offsetof(CPUSH4State, ldst), "_ldst_");
139
140 for (i = 0; i < 32; i++)
141 cpu_fregs[i] = tcg_global_mem_new_i32(TCG_AREG0,
142 offsetof(CPUSH4State, fregs[i]),
143 fregnames[i]);
144
145 done_init = 1;
146 }
147
148 void superh_cpu_dump_state(CPUState *cs, FILE *f,
149 fprintf_function cpu_fprintf, int flags)
150 {
151 SuperHCPU *cpu = SUPERH_CPU(cs);
152 CPUSH4State *env = &cpu->env;
153 int i;
154 cpu_fprintf(f, "pc=0x%08x sr=0x%08x pr=0x%08x fpscr=0x%08x\n",
155 env->pc, env->sr, env->pr, env->fpscr);
156 cpu_fprintf(f, "spc=0x%08x ssr=0x%08x gbr=0x%08x vbr=0x%08x\n",
157 env->spc, env->ssr, env->gbr, env->vbr);
158 cpu_fprintf(f, "sgr=0x%08x dbr=0x%08x delayed_pc=0x%08x fpul=0x%08x\n",
159 env->sgr, env->dbr, env->delayed_pc, env->fpul);
160 for (i = 0; i < 24; i += 4) {
161 cpu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
162 i, env->gregs[i], i + 1, env->gregs[i + 1],
163 i + 2, env->gregs[i + 2], i + 3, env->gregs[i + 3]);
164 }
165 if (env->flags & DELAY_SLOT) {
166 cpu_fprintf(f, "in delay slot (delayed_pc=0x%08x)\n",
167 env->delayed_pc);
168 } else if (env->flags & DELAY_SLOT_CONDITIONAL) {
169 cpu_fprintf(f, "in conditional delay slot (delayed_pc=0x%08x)\n",
170 env->delayed_pc);
171 }
172 }
173
174 static void gen_goto_tb(DisasContext * ctx, int n, target_ulong dest)
175 {
176 TranslationBlock *tb;
177 tb = ctx->tb;
178
179 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
180 !ctx->singlestep_enabled) {
181 /* Use a direct jump if in same page and singlestep not enabled */
182 tcg_gen_goto_tb(n);
183 tcg_gen_movi_i32(cpu_pc, dest);
184 tcg_gen_exit_tb((uintptr_t)tb + n);
185 } else {
186 tcg_gen_movi_i32(cpu_pc, dest);
187 if (ctx->singlestep_enabled)
188 gen_helper_debug(cpu_env);
189 tcg_gen_exit_tb(0);
190 }
191 }
192
193 static void gen_jump(DisasContext * ctx)
194 {
195 if (ctx->delayed_pc == (uint32_t) - 1) {
196 /* Target is not statically known, it comes necessarily from a
197 delayed jump as immediate jump are conditinal jumps */
198 tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc);
199 if (ctx->singlestep_enabled)
200 gen_helper_debug(cpu_env);
201 tcg_gen_exit_tb(0);
202 } else {
203 gen_goto_tb(ctx, 0, ctx->delayed_pc);
204 }
205 }
206
207 static inline void gen_branch_slot(uint32_t delayed_pc, int t)
208 {
209 TCGv sr;
210 int label = gen_new_label();
211 tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
212 sr = tcg_temp_new();
213 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
214 tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
215 tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
216 gen_set_label(label);
217 }
218
219 /* Immediate conditional jump (bt or bf) */
220 static void gen_conditional_jump(DisasContext * ctx,
221 target_ulong ift, target_ulong ifnott)
222 {
223 int l1;
224 TCGv sr;
225
226 l1 = gen_new_label();
227 sr = tcg_temp_new();
228 tcg_gen_andi_i32(sr, cpu_sr, SR_T);
229 tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
230 gen_goto_tb(ctx, 0, ifnott);
231 gen_set_label(l1);
232 gen_goto_tb(ctx, 1, ift);
233 }
234
235 /* Delayed conditional jump (bt or bf) */
236 static void gen_delayed_conditional_jump(DisasContext * ctx)
237 {
238 int l1;
239 TCGv ds;
240
241 l1 = gen_new_label();
242 ds = tcg_temp_new();
243 tcg_gen_andi_i32(ds, cpu_flags, DELAY_SLOT_TRUE);
244 tcg_gen_brcondi_i32(TCG_COND_NE, ds, 0, l1);
245 gen_goto_tb(ctx, 1, ctx->pc + 2);
246 gen_set_label(l1);
247 tcg_gen_andi_i32(cpu_flags, cpu_flags, ~DELAY_SLOT_TRUE);
248 gen_jump(ctx);
249 }
250
251 static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
252 {
253 TCGv t;
254
255 t = tcg_temp_new();
256 tcg_gen_setcond_i32(cond, t, t1, t0);
257 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
258 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
259
260 tcg_temp_free(t);
261 }
262
263 static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
264 {
265 TCGv t;
266
267 t = tcg_temp_new();
268 tcg_gen_setcondi_i32(cond, t, t0, imm);
269 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
270 tcg_gen_or_i32(cpu_sr, cpu_sr, t);
271
272 tcg_temp_free(t);
273 }
274
275 static inline void gen_store_flags(uint32_t flags)
276 {
277 tcg_gen_andi_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
278 tcg_gen_ori_i32(cpu_flags, cpu_flags, flags);
279 }
280
281 static inline void gen_copy_bit_i32(TCGv t0, int p0, TCGv t1, int p1)
282 {
283 TCGv tmp = tcg_temp_new();
284
285 p0 &= 0x1f;
286 p1 &= 0x1f;
287
288 tcg_gen_andi_i32(tmp, t1, (1 << p1));
289 tcg_gen_andi_i32(t0, t0, ~(1 << p0));
290 if (p0 < p1)
291 tcg_gen_shri_i32(tmp, tmp, p1 - p0);
292 else if (p0 > p1)
293 tcg_gen_shli_i32(tmp, tmp, p0 - p1);
294 tcg_gen_or_i32(t0, t0, tmp);
295
296 tcg_temp_free(tmp);
297 }
298
299 static inline void gen_load_fpr64(TCGv_i64 t, int reg)
300 {
301 tcg_gen_concat_i32_i64(t, cpu_fregs[reg + 1], cpu_fregs[reg]);
302 }
303
304 static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
305 {
306 TCGv_i32 tmp = tcg_temp_new_i32();
307 tcg_gen_trunc_i64_i32(tmp, t);
308 tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp);
309 tcg_gen_shri_i64(t, t, 32);
310 tcg_gen_trunc_i64_i32(tmp, t);
311 tcg_gen_mov_i32(cpu_fregs[reg], tmp);
312 tcg_temp_free_i32(tmp);
313 }
314
315 #define B3_0 (ctx->opcode & 0xf)
316 #define B6_4 ((ctx->opcode >> 4) & 0x7)
317 #define B7_4 ((ctx->opcode >> 4) & 0xf)
318 #define B7_0 (ctx->opcode & 0xff)
319 #define B7_0s ((int32_t) (int8_t) (ctx->opcode & 0xff))
320 #define B11_0s (ctx->opcode & 0x800 ? 0xfffff000 | (ctx->opcode & 0xfff) : \
321 (ctx->opcode & 0xfff))
322 #define B11_8 ((ctx->opcode >> 8) & 0xf)
323 #define B15_12 ((ctx->opcode >> 12) & 0xf)
324
325 #define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
326 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
327
328 #define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
329 ? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
330
331 #define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
332 #define XHACK(x) ((((x) & 1 ) << 4) | ((x) & 0xe))
333 #define XREG(x) (ctx->flags & FPSCR_FR ? XHACK(x) ^ 0x10 : XHACK(x))
334 #define DREG(x) FREG(x) /* Assumes lsb of (x) is always 0 */
335
336 #define CHECK_NOT_DELAY_SLOT \
337 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) \
338 { \
339 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
340 gen_helper_raise_slot_illegal_instruction(cpu_env); \
341 ctx->bstate = BS_BRANCH; \
342 return; \
343 }
344
345 #define CHECK_PRIVILEGED \
346 if (IS_USER(ctx)) { \
347 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
348 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
349 gen_helper_raise_slot_illegal_instruction(cpu_env); \
350 } else { \
351 gen_helper_raise_illegal_instruction(cpu_env); \
352 } \
353 ctx->bstate = BS_BRANCH; \
354 return; \
355 }
356
357 #define CHECK_FPU_ENABLED \
358 if (ctx->flags & SR_FD) { \
359 tcg_gen_movi_i32(cpu_pc, ctx->pc); \
360 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
361 gen_helper_raise_slot_fpu_disable(cpu_env); \
362 } else { \
363 gen_helper_raise_fpu_disable(cpu_env); \
364 } \
365 ctx->bstate = BS_BRANCH; \
366 return; \
367 }
368
369 static void _decode_opc(DisasContext * ctx)
370 {
371 /* This code tries to make movcal emulation sufficiently
372 accurate for Linux purposes. This instruction writes
373 memory, and prior to that, always allocates a cache line.
374 It is used in two contexts:
375 - in memcpy, where data is copied in blocks, the first write
376 of to a block uses movca.l for performance.
377 - in arch/sh/mm/cache-sh4.c, movcal.l + ocbi combination is used
378 to flush the cache. Here, the data written by movcal.l is never
379 written to memory, and the data written is just bogus.
380
381 To simulate this, we simulate movcal.l, we store the value to memory,
382 but we also remember the previous content. If we see ocbi, we check
383 if movcal.l for that address was done previously. If so, the write should
384 not have hit the memory, so we restore the previous content.
385 When we see an instruction that is neither movca.l
386 nor ocbi, the previous content is discarded.
387
388 To optimize, we only try to flush stores when we're at the start of
389 TB, or if we already saw movca.l in this TB and did not flush stores
390 yet. */
391 if (ctx->has_movcal)
392 {
393 int opcode = ctx->opcode & 0xf0ff;
394 if (opcode != 0x0093 /* ocbi */
395 && opcode != 0x00c3 /* movca.l */)
396 {
397 gen_helper_discard_movcal_backup(cpu_env);
398 ctx->has_movcal = 0;
399 }
400 }
401
402 #if 0
403 fprintf(stderr, "Translating opcode 0x%04x\n", ctx->opcode);
404 #endif
405
406 switch (ctx->opcode) {
407 case 0x0019: /* div0u */
408 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
409 return;
410 case 0x000b: /* rts */
411 CHECK_NOT_DELAY_SLOT
412 tcg_gen_mov_i32(cpu_delayed_pc, cpu_pr);
413 ctx->flags |= DELAY_SLOT;
414 ctx->delayed_pc = (uint32_t) - 1;
415 return;
416 case 0x0028: /* clrmac */
417 tcg_gen_movi_i32(cpu_mach, 0);
418 tcg_gen_movi_i32(cpu_macl, 0);
419 return;
420 case 0x0048: /* clrs */
421 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
422 return;
423 case 0x0008: /* clrt */
424 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
425 return;
426 case 0x0038: /* ldtlb */
427 CHECK_PRIVILEGED
428 gen_helper_ldtlb(cpu_env);
429 return;
430 case 0x002b: /* rte */
431 CHECK_PRIVILEGED
432 CHECK_NOT_DELAY_SLOT
433 tcg_gen_mov_i32(cpu_sr, cpu_ssr);
434 tcg_gen_mov_i32(cpu_delayed_pc, cpu_spc);
435 ctx->flags |= DELAY_SLOT;
436 ctx->delayed_pc = (uint32_t) - 1;
437 return;
438 case 0x0058: /* sets */
439 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
440 return;
441 case 0x0018: /* sett */
442 tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
443 return;
444 case 0xfbfd: /* frchg */
445 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
446 ctx->bstate = BS_STOP;
447 return;
448 case 0xf3fd: /* fschg */
449 tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_SZ);
450 ctx->bstate = BS_STOP;
451 return;
452 case 0x0009: /* nop */
453 return;
454 case 0x001b: /* sleep */
455 CHECK_PRIVILEGED
456 tcg_gen_movi_i32(cpu_pc, ctx->pc + 2);
457 gen_helper_sleep(cpu_env);
458 return;
459 }
460
461 switch (ctx->opcode & 0xf000) {
462 case 0x1000: /* mov.l Rm,@(disp,Rn) */
463 {
464 TCGv addr = tcg_temp_new();
465 tcg_gen_addi_i32(addr, REG(B11_8), B3_0 * 4);
466 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
467 tcg_temp_free(addr);
468 }
469 return;
470 case 0x5000: /* mov.l @(disp,Rm),Rn */
471 {
472 TCGv addr = tcg_temp_new();
473 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 4);
474 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
475 tcg_temp_free(addr);
476 }
477 return;
478 case 0xe000: /* mov #imm,Rn */
479 tcg_gen_movi_i32(REG(B11_8), B7_0s);
480 return;
481 case 0x9000: /* mov.w @(disp,PC),Rn */
482 {
483 TCGv addr = tcg_const_i32(ctx->pc + 4 + B7_0 * 2);
484 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
485 tcg_temp_free(addr);
486 }
487 return;
488 case 0xd000: /* mov.l @(disp,PC),Rn */
489 {
490 TCGv addr = tcg_const_i32((ctx->pc + 4 + B7_0 * 4) & ~3);
491 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
492 tcg_temp_free(addr);
493 }
494 return;
495 case 0x7000: /* add #imm,Rn */
496 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), B7_0s);
497 return;
498 case 0xa000: /* bra disp */
499 CHECK_NOT_DELAY_SLOT
500 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
501 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
502 ctx->flags |= DELAY_SLOT;
503 return;
504 case 0xb000: /* bsr disp */
505 CHECK_NOT_DELAY_SLOT
506 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
507 ctx->delayed_pc = ctx->pc + 4 + B11_0s * 2;
508 tcg_gen_movi_i32(cpu_delayed_pc, ctx->delayed_pc);
509 ctx->flags |= DELAY_SLOT;
510 return;
511 }
512
513 switch (ctx->opcode & 0xf00f) {
514 case 0x6003: /* mov Rm,Rn */
515 tcg_gen_mov_i32(REG(B11_8), REG(B7_4));
516 return;
517 case 0x2000: /* mov.b Rm,@Rn */
518 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_UB);
519 return;
520 case 0x2001: /* mov.w Rm,@Rn */
521 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUW);
522 return;
523 case 0x2002: /* mov.l Rm,@Rn */
524 tcg_gen_qemu_st_i32(REG(B7_4), REG(B11_8), ctx->memidx, MO_TEUL);
525 return;
526 case 0x6000: /* mov.b @Rm,Rn */
527 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
528 return;
529 case 0x6001: /* mov.w @Rm,Rn */
530 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
531 return;
532 case 0x6002: /* mov.l @Rm,Rn */
533 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
534 return;
535 case 0x2004: /* mov.b Rm,@-Rn */
536 {
537 TCGv addr = tcg_temp_new();
538 tcg_gen_subi_i32(addr, REG(B11_8), 1);
539 /* might cause re-execution */
540 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
541 tcg_gen_mov_i32(REG(B11_8), addr); /* modify register status */
542 tcg_temp_free(addr);
543 }
544 return;
545 case 0x2005: /* mov.w Rm,@-Rn */
546 {
547 TCGv addr = tcg_temp_new();
548 tcg_gen_subi_i32(addr, REG(B11_8), 2);
549 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
550 tcg_gen_mov_i32(REG(B11_8), addr);
551 tcg_temp_free(addr);
552 }
553 return;
554 case 0x2006: /* mov.l Rm,@-Rn */
555 {
556 TCGv addr = tcg_temp_new();
557 tcg_gen_subi_i32(addr, REG(B11_8), 4);
558 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
559 tcg_gen_mov_i32(REG(B11_8), addr);
560 }
561 return;
562 case 0x6004: /* mov.b @Rm+,Rn */
563 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_SB);
564 if ( B11_8 != B7_4 )
565 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 1);
566 return;
567 case 0x6005: /* mov.w @Rm+,Rn */
568 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESW);
569 if ( B11_8 != B7_4 )
570 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
571 return;
572 case 0x6006: /* mov.l @Rm+,Rn */
573 tcg_gen_qemu_ld_i32(REG(B11_8), REG(B7_4), ctx->memidx, MO_TESL);
574 if ( B11_8 != B7_4 )
575 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
576 return;
577 case 0x0004: /* mov.b Rm,@(R0,Rn) */
578 {
579 TCGv addr = tcg_temp_new();
580 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
581 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_UB);
582 tcg_temp_free(addr);
583 }
584 return;
585 case 0x0005: /* mov.w Rm,@(R0,Rn) */
586 {
587 TCGv addr = tcg_temp_new();
588 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
589 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUW);
590 tcg_temp_free(addr);
591 }
592 return;
593 case 0x0006: /* mov.l Rm,@(R0,Rn) */
594 {
595 TCGv addr = tcg_temp_new();
596 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
597 tcg_gen_qemu_st_i32(REG(B7_4), addr, ctx->memidx, MO_TEUL);
598 tcg_temp_free(addr);
599 }
600 return;
601 case 0x000c: /* mov.b @(R0,Rm),Rn */
602 {
603 TCGv addr = tcg_temp_new();
604 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
605 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_SB);
606 tcg_temp_free(addr);
607 }
608 return;
609 case 0x000d: /* mov.w @(R0,Rm),Rn */
610 {
611 TCGv addr = tcg_temp_new();
612 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
613 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESW);
614 tcg_temp_free(addr);
615 }
616 return;
617 case 0x000e: /* mov.l @(R0,Rm),Rn */
618 {
619 TCGv addr = tcg_temp_new();
620 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
621 tcg_gen_qemu_ld_i32(REG(B11_8), addr, ctx->memidx, MO_TESL);
622 tcg_temp_free(addr);
623 }
624 return;
625 case 0x6008: /* swap.b Rm,Rn */
626 {
627 TCGv high, low;
628 high = tcg_temp_new();
629 tcg_gen_andi_i32(high, REG(B7_4), 0xffff0000);
630 low = tcg_temp_new();
631 tcg_gen_ext16u_i32(low, REG(B7_4));
632 tcg_gen_bswap16_i32(low, low);
633 tcg_gen_or_i32(REG(B11_8), high, low);
634 tcg_temp_free(low);
635 tcg_temp_free(high);
636 }
637 return;
638 case 0x6009: /* swap.w Rm,Rn */
639 tcg_gen_rotli_i32(REG(B11_8), REG(B7_4), 16);
640 return;
641 case 0x200d: /* xtrct Rm,Rn */
642 {
643 TCGv high, low;
644 high = tcg_temp_new();
645 tcg_gen_shli_i32(high, REG(B7_4), 16);
646 low = tcg_temp_new();
647 tcg_gen_shri_i32(low, REG(B11_8), 16);
648 tcg_gen_or_i32(REG(B11_8), high, low);
649 tcg_temp_free(low);
650 tcg_temp_free(high);
651 }
652 return;
653 case 0x300c: /* add Rm,Rn */
654 tcg_gen_add_i32(REG(B11_8), REG(B11_8), REG(B7_4));
655 return;
656 case 0x300e: /* addc Rm,Rn */
657 {
658 TCGv t0, t1, t2;
659 t0 = tcg_temp_new();
660 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
661 t1 = tcg_temp_new();
662 tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
663 tcg_gen_add_i32(t0, t0, t1);
664 t2 = tcg_temp_new();
665 tcg_gen_setcond_i32(TCG_COND_GTU, t2, REG(B11_8), t1);
666 tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
667 tcg_gen_or_i32(t1, t1, t2);
668 tcg_temp_free(t2);
669 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
670 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
671 tcg_temp_free(t1);
672 tcg_gen_mov_i32(REG(B11_8), t0);
673 tcg_temp_free(t0);
674 }
675 return;
676 case 0x300f: /* addv Rm,Rn */
677 {
678 TCGv t0, t1, t2;
679 t0 = tcg_temp_new();
680 tcg_gen_add_i32(t0, REG(B7_4), REG(B11_8));
681 t1 = tcg_temp_new();
682 tcg_gen_xor_i32(t1, t0, REG(B11_8));
683 t2 = tcg_temp_new();
684 tcg_gen_xor_i32(t2, REG(B7_4), REG(B11_8));
685 tcg_gen_andc_i32(t1, t1, t2);
686 tcg_temp_free(t2);
687 tcg_gen_shri_i32(t1, t1, 31);
688 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
689 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
690 tcg_temp_free(t1);
691 tcg_gen_mov_i32(REG(B7_4), t0);
692 tcg_temp_free(t0);
693 }
694 return;
695 case 0x2009: /* and Rm,Rn */
696 tcg_gen_and_i32(REG(B11_8), REG(B11_8), REG(B7_4));
697 return;
698 case 0x3000: /* cmp/eq Rm,Rn */
699 gen_cmp(TCG_COND_EQ, REG(B7_4), REG(B11_8));
700 return;
701 case 0x3003: /* cmp/ge Rm,Rn */
702 gen_cmp(TCG_COND_GE, REG(B7_4), REG(B11_8));
703 return;
704 case 0x3007: /* cmp/gt Rm,Rn */
705 gen_cmp(TCG_COND_GT, REG(B7_4), REG(B11_8));
706 return;
707 case 0x3006: /* cmp/hi Rm,Rn */
708 gen_cmp(TCG_COND_GTU, REG(B7_4), REG(B11_8));
709 return;
710 case 0x3002: /* cmp/hs Rm,Rn */
711 gen_cmp(TCG_COND_GEU, REG(B7_4), REG(B11_8));
712 return;
713 case 0x200c: /* cmp/str Rm,Rn */
714 {
715 TCGv cmp1 = tcg_temp_new();
716 TCGv cmp2 = tcg_temp_new();
717 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
718 tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
719 tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
720 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
721 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
722 tcg_gen_andi_i32(cmp2, cmp1, 0x00ff0000);
723 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
724 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
725 tcg_gen_andi_i32(cmp2, cmp1, 0x0000ff00);
726 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
727 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
728 tcg_gen_andi_i32(cmp2, cmp1, 0x000000ff);
729 tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
730 tcg_gen_or_i32(cpu_sr, cpu_sr, cmp2);
731 tcg_temp_free(cmp2);
732 tcg_temp_free(cmp1);
733 }
734 return;
735 case 0x2007: /* div0s Rm,Rn */
736 {
737 gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
738 gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
739 TCGv val = tcg_temp_new();
740 tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
741 gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
742 tcg_temp_free(val);
743 }
744 return;
745 case 0x3004: /* div1 Rm,Rn */
746 gen_helper_div1(REG(B11_8), cpu_env, REG(B7_4), REG(B11_8));
747 return;
748 case 0x300d: /* dmuls.l Rm,Rn */
749 tcg_gen_muls2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
750 return;
751 case 0x3005: /* dmulu.l Rm,Rn */
752 tcg_gen_mulu2_i32(cpu_macl, cpu_mach, REG(B7_4), REG(B11_8));
753 return;
754 case 0x600e: /* exts.b Rm,Rn */
755 tcg_gen_ext8s_i32(REG(B11_8), REG(B7_4));
756 return;
757 case 0x600f: /* exts.w Rm,Rn */
758 tcg_gen_ext16s_i32(REG(B11_8), REG(B7_4));
759 return;
760 case 0x600c: /* extu.b Rm,Rn */
761 tcg_gen_ext8u_i32(REG(B11_8), REG(B7_4));
762 return;
763 case 0x600d: /* extu.w Rm,Rn */
764 tcg_gen_ext16u_i32(REG(B11_8), REG(B7_4));
765 return;
766 case 0x000f: /* mac.l @Rm+,@Rn+ */
767 {
768 TCGv arg0, arg1;
769 arg0 = tcg_temp_new();
770 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
771 arg1 = tcg_temp_new();
772 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
773 gen_helper_macl(cpu_env, arg0, arg1);
774 tcg_temp_free(arg1);
775 tcg_temp_free(arg0);
776 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
777 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
778 }
779 return;
780 case 0x400f: /* mac.w @Rm+,@Rn+ */
781 {
782 TCGv arg0, arg1;
783 arg0 = tcg_temp_new();
784 tcg_gen_qemu_ld_i32(arg0, REG(B7_4), ctx->memidx, MO_TESL);
785 arg1 = tcg_temp_new();
786 tcg_gen_qemu_ld_i32(arg1, REG(B11_8), ctx->memidx, MO_TESL);
787 gen_helper_macw(cpu_env, arg0, arg1);
788 tcg_temp_free(arg1);
789 tcg_temp_free(arg0);
790 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 2);
791 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 2);
792 }
793 return;
794 case 0x0007: /* mul.l Rm,Rn */
795 tcg_gen_mul_i32(cpu_macl, REG(B7_4), REG(B11_8));
796 return;
797 case 0x200f: /* muls.w Rm,Rn */
798 {
799 TCGv arg0, arg1;
800 arg0 = tcg_temp_new();
801 tcg_gen_ext16s_i32(arg0, REG(B7_4));
802 arg1 = tcg_temp_new();
803 tcg_gen_ext16s_i32(arg1, REG(B11_8));
804 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
805 tcg_temp_free(arg1);
806 tcg_temp_free(arg0);
807 }
808 return;
809 case 0x200e: /* mulu.w Rm,Rn */
810 {
811 TCGv arg0, arg1;
812 arg0 = tcg_temp_new();
813 tcg_gen_ext16u_i32(arg0, REG(B7_4));
814 arg1 = tcg_temp_new();
815 tcg_gen_ext16u_i32(arg1, REG(B11_8));
816 tcg_gen_mul_i32(cpu_macl, arg0, arg1);
817 tcg_temp_free(arg1);
818 tcg_temp_free(arg0);
819 }
820 return;
821 case 0x600b: /* neg Rm,Rn */
822 tcg_gen_neg_i32(REG(B11_8), REG(B7_4));
823 return;
824 case 0x600a: /* negc Rm,Rn */
825 {
826 TCGv t0, t1;
827 t0 = tcg_temp_new();
828 tcg_gen_neg_i32(t0, REG(B7_4));
829 t1 = tcg_temp_new();
830 tcg_gen_andi_i32(t1, cpu_sr, SR_T);
831 tcg_gen_sub_i32(REG(B11_8), t0, t1);
832 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
833 tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
834 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
835 tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
836 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
837 tcg_temp_free(t0);
838 tcg_temp_free(t1);
839 }
840 return;
841 case 0x6007: /* not Rm,Rn */
842 tcg_gen_not_i32(REG(B11_8), REG(B7_4));
843 return;
844 case 0x200b: /* or Rm,Rn */
845 tcg_gen_or_i32(REG(B11_8), REG(B11_8), REG(B7_4));
846 return;
847 case 0x400c: /* shad Rm,Rn */
848 {
849 int label1 = gen_new_label();
850 int label2 = gen_new_label();
851 int label3 = gen_new_label();
852 int label4 = gen_new_label();
853 TCGv shift;
854 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
855 /* Rm positive, shift to the left */
856 shift = tcg_temp_new();
857 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
858 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
859 tcg_temp_free(shift);
860 tcg_gen_br(label4);
861 /* Rm negative, shift to the right */
862 gen_set_label(label1);
863 shift = tcg_temp_new();
864 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
865 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
866 tcg_gen_not_i32(shift, REG(B7_4));
867 tcg_gen_andi_i32(shift, shift, 0x1f);
868 tcg_gen_addi_i32(shift, shift, 1);
869 tcg_gen_sar_i32(REG(B11_8), REG(B11_8), shift);
870 tcg_temp_free(shift);
871 tcg_gen_br(label4);
872 /* Rm = -32 */
873 gen_set_label(label2);
874 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B11_8), 0, label3);
875 tcg_gen_movi_i32(REG(B11_8), 0);
876 tcg_gen_br(label4);
877 gen_set_label(label3);
878 tcg_gen_movi_i32(REG(B11_8), 0xffffffff);
879 gen_set_label(label4);
880 }
881 return;
882 case 0x400d: /* shld Rm,Rn */
883 {
884 int label1 = gen_new_label();
885 int label2 = gen_new_label();
886 int label3 = gen_new_label();
887 TCGv shift;
888 tcg_gen_brcondi_i32(TCG_COND_LT, REG(B7_4), 0, label1);
889 /* Rm positive, shift to the left */
890 shift = tcg_temp_new();
891 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
892 tcg_gen_shl_i32(REG(B11_8), REG(B11_8), shift);
893 tcg_temp_free(shift);
894 tcg_gen_br(label3);
895 /* Rm negative, shift to the right */
896 gen_set_label(label1);
897 shift = tcg_temp_new();
898 tcg_gen_andi_i32(shift, REG(B7_4), 0x1f);
899 tcg_gen_brcondi_i32(TCG_COND_EQ, shift, 0, label2);
900 tcg_gen_not_i32(shift, REG(B7_4));
901 tcg_gen_andi_i32(shift, shift, 0x1f);
902 tcg_gen_addi_i32(shift, shift, 1);
903 tcg_gen_shr_i32(REG(B11_8), REG(B11_8), shift);
904 tcg_temp_free(shift);
905 tcg_gen_br(label3);
906 /* Rm = -32 */
907 gen_set_label(label2);
908 tcg_gen_movi_i32(REG(B11_8), 0);
909 gen_set_label(label3);
910 }
911 return;
912 case 0x3008: /* sub Rm,Rn */
913 tcg_gen_sub_i32(REG(B11_8), REG(B11_8), REG(B7_4));
914 return;
915 case 0x300a: /* subc Rm,Rn */
916 {
917 TCGv t0, t1, t2;
918 t0 = tcg_temp_new();
919 tcg_gen_andi_i32(t0, cpu_sr, SR_T);
920 t1 = tcg_temp_new();
921 tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
922 tcg_gen_sub_i32(t0, t1, t0);
923 t2 = tcg_temp_new();
924 tcg_gen_setcond_i32(TCG_COND_LTU, t2, REG(B11_8), t1);
925 tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
926 tcg_gen_or_i32(t1, t1, t2);
927 tcg_temp_free(t2);
928 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
929 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
930 tcg_temp_free(t1);
931 tcg_gen_mov_i32(REG(B11_8), t0);
932 tcg_temp_free(t0);
933 }
934 return;
935 case 0x300b: /* subv Rm,Rn */
936 {
937 TCGv t0, t1, t2;
938 t0 = tcg_temp_new();
939 tcg_gen_sub_i32(t0, REG(B11_8), REG(B7_4));
940 t1 = tcg_temp_new();
941 tcg_gen_xor_i32(t1, t0, REG(B7_4));
942 t2 = tcg_temp_new();
943 tcg_gen_xor_i32(t2, REG(B11_8), REG(B7_4));
944 tcg_gen_and_i32(t1, t1, t2);
945 tcg_temp_free(t2);
946 tcg_gen_shri_i32(t1, t1, 31);
947 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
948 tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
949 tcg_temp_free(t1);
950 tcg_gen_mov_i32(REG(B11_8), t0);
951 tcg_temp_free(t0);
952 }
953 return;
954 case 0x2008: /* tst Rm,Rn */
955 {
956 TCGv val = tcg_temp_new();
957 tcg_gen_and_i32(val, REG(B7_4), REG(B11_8));
958 gen_cmp_imm(TCG_COND_EQ, val, 0);
959 tcg_temp_free(val);
960 }
961 return;
962 case 0x200a: /* xor Rm,Rn */
963 tcg_gen_xor_i32(REG(B11_8), REG(B11_8), REG(B7_4));
964 return;
965 case 0xf00c: /* fmov {F,D,X}Rm,{F,D,X}Rn - FPSCR: Nothing */
966 CHECK_FPU_ENABLED
967 if (ctx->flags & FPSCR_SZ) {
968 TCGv_i64 fp = tcg_temp_new_i64();
969 gen_load_fpr64(fp, XREG(B7_4));
970 gen_store_fpr64(fp, XREG(B11_8));
971 tcg_temp_free_i64(fp);
972 } else {
973 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B7_4)]);
974 }
975 return;
976 case 0xf00a: /* fmov {F,D,X}Rm,@Rn - FPSCR: Nothing */
977 CHECK_FPU_ENABLED
978 if (ctx->flags & FPSCR_SZ) {
979 TCGv addr_hi = tcg_temp_new();
980 int fr = XREG(B7_4);
981 tcg_gen_addi_i32(addr_hi, REG(B11_8), 4);
982 tcg_gen_qemu_st_i32(cpu_fregs[fr], REG(B11_8),
983 ctx->memidx, MO_TEUL);
984 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr_hi,
985 ctx->memidx, MO_TEUL);
986 tcg_temp_free(addr_hi);
987 } else {
988 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], REG(B11_8),
989 ctx->memidx, MO_TEUL);
990 }
991 return;
992 case 0xf008: /* fmov @Rm,{F,D,X}Rn - FPSCR: Nothing */
993 CHECK_FPU_ENABLED
994 if (ctx->flags & FPSCR_SZ) {
995 TCGv addr_hi = tcg_temp_new();
996 int fr = XREG(B11_8);
997 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
998 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
999 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1000 tcg_temp_free(addr_hi);
1001 } else {
1002 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1003 ctx->memidx, MO_TEUL);
1004 }
1005 return;
1006 case 0xf009: /* fmov @Rm+,{F,D,X}Rn - FPSCR: Nothing */
1007 CHECK_FPU_ENABLED
1008 if (ctx->flags & FPSCR_SZ) {
1009 TCGv addr_hi = tcg_temp_new();
1010 int fr = XREG(B11_8);
1011 tcg_gen_addi_i32(addr_hi, REG(B7_4), 4);
1012 tcg_gen_qemu_ld_i32(cpu_fregs[fr], REG(B7_4), ctx->memidx, MO_TEUL);
1013 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr_hi, ctx->memidx, MO_TEUL);
1014 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 8);
1015 tcg_temp_free(addr_hi);
1016 } else {
1017 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], REG(B7_4),
1018 ctx->memidx, MO_TEUL);
1019 tcg_gen_addi_i32(REG(B7_4), REG(B7_4), 4);
1020 }
1021 return;
1022 case 0xf00b: /* fmov {F,D,X}Rm,@-Rn - FPSCR: Nothing */
1023 CHECK_FPU_ENABLED
1024 if (ctx->flags & FPSCR_SZ) {
1025 TCGv addr = tcg_temp_new_i32();
1026 int fr = XREG(B7_4);
1027 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1028 tcg_gen_qemu_st_i32(cpu_fregs[fr+1], addr, ctx->memidx, MO_TEUL);
1029 tcg_gen_subi_i32(addr, addr, 4);
1030 tcg_gen_qemu_st_i32(cpu_fregs[fr], addr, ctx->memidx, MO_TEUL);
1031 tcg_gen_mov_i32(REG(B11_8), addr);
1032 tcg_temp_free(addr);
1033 } else {
1034 TCGv addr;
1035 addr = tcg_temp_new_i32();
1036 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1037 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1038 ctx->memidx, MO_TEUL);
1039 tcg_gen_mov_i32(REG(B11_8), addr);
1040 tcg_temp_free(addr);
1041 }
1042 return;
1043 case 0xf006: /* fmov @(R0,Rm),{F,D,X}Rm - FPSCR: Nothing */
1044 CHECK_FPU_ENABLED
1045 {
1046 TCGv addr = tcg_temp_new_i32();
1047 tcg_gen_add_i32(addr, REG(B7_4), REG(0));
1048 if (ctx->flags & FPSCR_SZ) {
1049 int fr = XREG(B11_8);
1050 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1051 ctx->memidx, MO_TEUL);
1052 tcg_gen_addi_i32(addr, addr, 4);
1053 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1054 ctx->memidx, MO_TEUL);
1055 } else {
1056 tcg_gen_qemu_ld_i32(cpu_fregs[FREG(B11_8)], addr,
1057 ctx->memidx, MO_TEUL);
1058 }
1059 tcg_temp_free(addr);
1060 }
1061 return;
1062 case 0xf007: /* fmov {F,D,X}Rn,@(R0,Rn) - FPSCR: Nothing */
1063 CHECK_FPU_ENABLED
1064 {
1065 TCGv addr = tcg_temp_new();
1066 tcg_gen_add_i32(addr, REG(B11_8), REG(0));
1067 if (ctx->flags & FPSCR_SZ) {
1068 int fr = XREG(B7_4);
1069 tcg_gen_qemu_ld_i32(cpu_fregs[fr], addr,
1070 ctx->memidx, MO_TEUL);
1071 tcg_gen_addi_i32(addr, addr, 4);
1072 tcg_gen_qemu_ld_i32(cpu_fregs[fr+1], addr,
1073 ctx->memidx, MO_TEUL);
1074 } else {
1075 tcg_gen_qemu_st_i32(cpu_fregs[FREG(B7_4)], addr,
1076 ctx->memidx, MO_TEUL);
1077 }
1078 tcg_temp_free(addr);
1079 }
1080 return;
1081 case 0xf000: /* fadd Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1082 case 0xf001: /* fsub Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1083 case 0xf002: /* fmul Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1084 case 0xf003: /* fdiv Rm,Rn - FPSCR: R[PR,Enable.O/U/I]/W[Cause,Flag] */
1085 case 0xf004: /* fcmp/eq Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1086 case 0xf005: /* fcmp/gt Rm,Rn - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1087 {
1088 CHECK_FPU_ENABLED
1089 if (ctx->flags & FPSCR_PR) {
1090 TCGv_i64 fp0, fp1;
1091
1092 if (ctx->opcode & 0x0110)
1093 break; /* illegal instruction */
1094 fp0 = tcg_temp_new_i64();
1095 fp1 = tcg_temp_new_i64();
1096 gen_load_fpr64(fp0, DREG(B11_8));
1097 gen_load_fpr64(fp1, DREG(B7_4));
1098 switch (ctx->opcode & 0xf00f) {
1099 case 0xf000: /* fadd Rm,Rn */
1100 gen_helper_fadd_DT(fp0, cpu_env, fp0, fp1);
1101 break;
1102 case 0xf001: /* fsub Rm,Rn */
1103 gen_helper_fsub_DT(fp0, cpu_env, fp0, fp1);
1104 break;
1105 case 0xf002: /* fmul Rm,Rn */
1106 gen_helper_fmul_DT(fp0, cpu_env, fp0, fp1);
1107 break;
1108 case 0xf003: /* fdiv Rm,Rn */
1109 gen_helper_fdiv_DT(fp0, cpu_env, fp0, fp1);
1110 break;
1111 case 0xf004: /* fcmp/eq Rm,Rn */
1112 gen_helper_fcmp_eq_DT(cpu_env, fp0, fp1);
1113 return;
1114 case 0xf005: /* fcmp/gt Rm,Rn */
1115 gen_helper_fcmp_gt_DT(cpu_env, fp0, fp1);
1116 return;
1117 }
1118 gen_store_fpr64(fp0, DREG(B11_8));
1119 tcg_temp_free_i64(fp0);
1120 tcg_temp_free_i64(fp1);
1121 } else {
1122 switch (ctx->opcode & 0xf00f) {
1123 case 0xf000: /* fadd Rm,Rn */
1124 gen_helper_fadd_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1125 cpu_fregs[FREG(B11_8)],
1126 cpu_fregs[FREG(B7_4)]);
1127 break;
1128 case 0xf001: /* fsub Rm,Rn */
1129 gen_helper_fsub_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1130 cpu_fregs[FREG(B11_8)],
1131 cpu_fregs[FREG(B7_4)]);
1132 break;
1133 case 0xf002: /* fmul Rm,Rn */
1134 gen_helper_fmul_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1135 cpu_fregs[FREG(B11_8)],
1136 cpu_fregs[FREG(B7_4)]);
1137 break;
1138 case 0xf003: /* fdiv Rm,Rn */
1139 gen_helper_fdiv_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1140 cpu_fregs[FREG(B11_8)],
1141 cpu_fregs[FREG(B7_4)]);
1142 break;
1143 case 0xf004: /* fcmp/eq Rm,Rn */
1144 gen_helper_fcmp_eq_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1145 cpu_fregs[FREG(B7_4)]);
1146 return;
1147 case 0xf005: /* fcmp/gt Rm,Rn */
1148 gen_helper_fcmp_gt_FT(cpu_env, cpu_fregs[FREG(B11_8)],
1149 cpu_fregs[FREG(B7_4)]);
1150 return;
1151 }
1152 }
1153 }
1154 return;
1155 case 0xf00e: /* fmac FR0,RM,Rn */
1156 {
1157 CHECK_FPU_ENABLED
1158 if (ctx->flags & FPSCR_PR) {
1159 break; /* illegal instruction */
1160 } else {
1161 gen_helper_fmac_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1162 cpu_fregs[FREG(0)], cpu_fregs[FREG(B7_4)],
1163 cpu_fregs[FREG(B11_8)]);
1164 return;
1165 }
1166 }
1167 }
1168
1169 switch (ctx->opcode & 0xff00) {
1170 case 0xc900: /* and #imm,R0 */
1171 tcg_gen_andi_i32(REG(0), REG(0), B7_0);
1172 return;
1173 case 0xcd00: /* and.b #imm,@(R0,GBR) */
1174 {
1175 TCGv addr, val;
1176 addr = tcg_temp_new();
1177 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1178 val = tcg_temp_new();
1179 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1180 tcg_gen_andi_i32(val, val, B7_0);
1181 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1182 tcg_temp_free(val);
1183 tcg_temp_free(addr);
1184 }
1185 return;
1186 case 0x8b00: /* bf label */
1187 CHECK_NOT_DELAY_SLOT
1188 gen_conditional_jump(ctx, ctx->pc + 2,
1189 ctx->pc + 4 + B7_0s * 2);
1190 ctx->bstate = BS_BRANCH;
1191 return;
1192 case 0x8f00: /* bf/s label */
1193 CHECK_NOT_DELAY_SLOT
1194 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 0);
1195 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1196 return;
1197 case 0x8900: /* bt label */
1198 CHECK_NOT_DELAY_SLOT
1199 gen_conditional_jump(ctx, ctx->pc + 4 + B7_0s * 2,
1200 ctx->pc + 2);
1201 ctx->bstate = BS_BRANCH;
1202 return;
1203 case 0x8d00: /* bt/s label */
1204 CHECK_NOT_DELAY_SLOT
1205 gen_branch_slot(ctx->delayed_pc = ctx->pc + 4 + B7_0s * 2, 1);
1206 ctx->flags |= DELAY_SLOT_CONDITIONAL;
1207 return;
1208 case 0x8800: /* cmp/eq #imm,R0 */
1209 gen_cmp_imm(TCG_COND_EQ, REG(0), B7_0s);
1210 return;
1211 case 0xc400: /* mov.b @(disp,GBR),R0 */
1212 {
1213 TCGv addr = tcg_temp_new();
1214 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1215 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1216 tcg_temp_free(addr);
1217 }
1218 return;
1219 case 0xc500: /* mov.w @(disp,GBR),R0 */
1220 {
1221 TCGv addr = tcg_temp_new();
1222 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1223 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1224 tcg_temp_free(addr);
1225 }
1226 return;
1227 case 0xc600: /* mov.l @(disp,GBR),R0 */
1228 {
1229 TCGv addr = tcg_temp_new();
1230 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1231 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESL);
1232 tcg_temp_free(addr);
1233 }
1234 return;
1235 case 0xc000: /* mov.b R0,@(disp,GBR) */
1236 {
1237 TCGv addr = tcg_temp_new();
1238 tcg_gen_addi_i32(addr, cpu_gbr, B7_0);
1239 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1240 tcg_temp_free(addr);
1241 }
1242 return;
1243 case 0xc100: /* mov.w R0,@(disp,GBR) */
1244 {
1245 TCGv addr = tcg_temp_new();
1246 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 2);
1247 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1248 tcg_temp_free(addr);
1249 }
1250 return;
1251 case 0xc200: /* mov.l R0,@(disp,GBR) */
1252 {
1253 TCGv addr = tcg_temp_new();
1254 tcg_gen_addi_i32(addr, cpu_gbr, B7_0 * 4);
1255 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUL);
1256 tcg_temp_free(addr);
1257 }
1258 return;
1259 case 0x8000: /* mov.b R0,@(disp,Rn) */
1260 {
1261 TCGv addr = tcg_temp_new();
1262 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1263 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_UB);
1264 tcg_temp_free(addr);
1265 }
1266 return;
1267 case 0x8100: /* mov.w R0,@(disp,Rn) */
1268 {
1269 TCGv addr = tcg_temp_new();
1270 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1271 tcg_gen_qemu_st_i32(REG(0), addr, ctx->memidx, MO_TEUW);
1272 tcg_temp_free(addr);
1273 }
1274 return;
1275 case 0x8400: /* mov.b @(disp,Rn),R0 */
1276 {
1277 TCGv addr = tcg_temp_new();
1278 tcg_gen_addi_i32(addr, REG(B7_4), B3_0);
1279 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_SB);
1280 tcg_temp_free(addr);
1281 }
1282 return;
1283 case 0x8500: /* mov.w @(disp,Rn),R0 */
1284 {
1285 TCGv addr = tcg_temp_new();
1286 tcg_gen_addi_i32(addr, REG(B7_4), B3_0 * 2);
1287 tcg_gen_qemu_ld_i32(REG(0), addr, ctx->memidx, MO_TESW);
1288 tcg_temp_free(addr);
1289 }
1290 return;
1291 case 0xc700: /* mova @(disp,PC),R0 */
1292 tcg_gen_movi_i32(REG(0), ((ctx->pc & 0xfffffffc) + 4 + B7_0 * 4) & ~3);
1293 return;
1294 case 0xcb00: /* or #imm,R0 */
1295 tcg_gen_ori_i32(REG(0), REG(0), B7_0);
1296 return;
1297 case 0xcf00: /* or.b #imm,@(R0,GBR) */
1298 {
1299 TCGv addr, val;
1300 addr = tcg_temp_new();
1301 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1302 val = tcg_temp_new();
1303 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1304 tcg_gen_ori_i32(val, val, B7_0);
1305 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1306 tcg_temp_free(val);
1307 tcg_temp_free(addr);
1308 }
1309 return;
1310 case 0xc300: /* trapa #imm */
1311 {
1312 TCGv imm;
1313 CHECK_NOT_DELAY_SLOT
1314 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1315 imm = tcg_const_i32(B7_0);
1316 gen_helper_trapa(cpu_env, imm);
1317 tcg_temp_free(imm);
1318 ctx->bstate = BS_BRANCH;
1319 }
1320 return;
1321 case 0xc800: /* tst #imm,R0 */
1322 {
1323 TCGv val = tcg_temp_new();
1324 tcg_gen_andi_i32(val, REG(0), B7_0);
1325 gen_cmp_imm(TCG_COND_EQ, val, 0);
1326 tcg_temp_free(val);
1327 }
1328 return;
1329 case 0xcc00: /* tst.b #imm,@(R0,GBR) */
1330 {
1331 TCGv val = tcg_temp_new();
1332 tcg_gen_add_i32(val, REG(0), cpu_gbr);
1333 tcg_gen_qemu_ld_i32(val, val, ctx->memidx, MO_UB);
1334 tcg_gen_andi_i32(val, val, B7_0);
1335 gen_cmp_imm(TCG_COND_EQ, val, 0);
1336 tcg_temp_free(val);
1337 }
1338 return;
1339 case 0xca00: /* xor #imm,R0 */
1340 tcg_gen_xori_i32(REG(0), REG(0), B7_0);
1341 return;
1342 case 0xce00: /* xor.b #imm,@(R0,GBR) */
1343 {
1344 TCGv addr, val;
1345 addr = tcg_temp_new();
1346 tcg_gen_add_i32(addr, REG(0), cpu_gbr);
1347 val = tcg_temp_new();
1348 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1349 tcg_gen_xori_i32(val, val, B7_0);
1350 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1351 tcg_temp_free(val);
1352 tcg_temp_free(addr);
1353 }
1354 return;
1355 }
1356
1357 switch (ctx->opcode & 0xf08f) {
1358 case 0x408e: /* ldc Rm,Rn_BANK */
1359 CHECK_PRIVILEGED
1360 tcg_gen_mov_i32(ALTREG(B6_4), REG(B11_8));
1361 return;
1362 case 0x4087: /* ldc.l @Rm+,Rn_BANK */
1363 CHECK_PRIVILEGED
1364 tcg_gen_qemu_ld_i32(ALTREG(B6_4), REG(B11_8), ctx->memidx, MO_TESL);
1365 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1366 return;
1367 case 0x0082: /* stc Rm_BANK,Rn */
1368 CHECK_PRIVILEGED
1369 tcg_gen_mov_i32(REG(B11_8), ALTREG(B6_4));
1370 return;
1371 case 0x4083: /* stc.l Rm_BANK,@-Rn */
1372 CHECK_PRIVILEGED
1373 {
1374 TCGv addr = tcg_temp_new();
1375 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1376 tcg_gen_qemu_st_i32(ALTREG(B6_4), addr, ctx->memidx, MO_TEUL);
1377 tcg_gen_mov_i32(REG(B11_8), addr);
1378 tcg_temp_free(addr);
1379 }
1380 return;
1381 }
1382
1383 switch (ctx->opcode & 0xf0ff) {
1384 case 0x0023: /* braf Rn */
1385 CHECK_NOT_DELAY_SLOT
1386 tcg_gen_addi_i32(cpu_delayed_pc, REG(B11_8), ctx->pc + 4);
1387 ctx->flags |= DELAY_SLOT;
1388 ctx->delayed_pc = (uint32_t) - 1;
1389 return;
1390 case 0x0003: /* bsrf Rn */
1391 CHECK_NOT_DELAY_SLOT
1392 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1393 tcg_gen_add_i32(cpu_delayed_pc, REG(B11_8), cpu_pr);
1394 ctx->flags |= DELAY_SLOT;
1395 ctx->delayed_pc = (uint32_t) - 1;
1396 return;
1397 case 0x4015: /* cmp/pl Rn */
1398 gen_cmp_imm(TCG_COND_GT, REG(B11_8), 0);
1399 return;
1400 case 0x4011: /* cmp/pz Rn */
1401 gen_cmp_imm(TCG_COND_GE, REG(B11_8), 0);
1402 return;
1403 case 0x4010: /* dt Rn */
1404 tcg_gen_subi_i32(REG(B11_8), REG(B11_8), 1);
1405 gen_cmp_imm(TCG_COND_EQ, REG(B11_8), 0);
1406 return;
1407 case 0x402b: /* jmp @Rn */
1408 CHECK_NOT_DELAY_SLOT
1409 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1410 ctx->flags |= DELAY_SLOT;
1411 ctx->delayed_pc = (uint32_t) - 1;
1412 return;
1413 case 0x400b: /* jsr @Rn */
1414 CHECK_NOT_DELAY_SLOT
1415 tcg_gen_movi_i32(cpu_pr, ctx->pc + 4);
1416 tcg_gen_mov_i32(cpu_delayed_pc, REG(B11_8));
1417 ctx->flags |= DELAY_SLOT;
1418 ctx->delayed_pc = (uint32_t) - 1;
1419 return;
1420 case 0x400e: /* ldc Rm,SR */
1421 CHECK_PRIVILEGED
1422 tcg_gen_andi_i32(cpu_sr, REG(B11_8), 0x700083f3);
1423 ctx->bstate = BS_STOP;
1424 return;
1425 case 0x4007: /* ldc.l @Rm+,SR */
1426 CHECK_PRIVILEGED
1427 {
1428 TCGv val = tcg_temp_new();
1429 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TESL);
1430 tcg_gen_andi_i32(cpu_sr, val, 0x700083f3);
1431 tcg_temp_free(val);
1432 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1433 ctx->bstate = BS_STOP;
1434 }
1435 return;
1436 case 0x0002: /* stc SR,Rn */
1437 CHECK_PRIVILEGED
1438 tcg_gen_mov_i32(REG(B11_8), cpu_sr);
1439 return;
1440 case 0x4003: /* stc SR,@-Rn */
1441 CHECK_PRIVILEGED
1442 {
1443 TCGv addr = tcg_temp_new();
1444 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1445 tcg_gen_qemu_st_i32(cpu_sr, addr, ctx->memidx, MO_TEUL);
1446 tcg_gen_mov_i32(REG(B11_8), addr);
1447 tcg_temp_free(addr);
1448 }
1449 return;
1450 #define LD(reg,ldnum,ldpnum,prechk) \
1451 case ldnum: \
1452 prechk \
1453 tcg_gen_mov_i32 (cpu_##reg, REG(B11_8)); \
1454 return; \
1455 case ldpnum: \
1456 prechk \
1457 tcg_gen_qemu_ld_i32(cpu_##reg, REG(B11_8), ctx->memidx, MO_TESL); \
1458 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4); \
1459 return;
1460 #define ST(reg,stnum,stpnum,prechk) \
1461 case stnum: \
1462 prechk \
1463 tcg_gen_mov_i32 (REG(B11_8), cpu_##reg); \
1464 return; \
1465 case stpnum: \
1466 prechk \
1467 { \
1468 TCGv addr = tcg_temp_new(); \
1469 tcg_gen_subi_i32(addr, REG(B11_8), 4); \
1470 tcg_gen_qemu_st_i32(cpu_##reg, addr, ctx->memidx, MO_TEUL); \
1471 tcg_gen_mov_i32(REG(B11_8), addr); \
1472 tcg_temp_free(addr); \
1473 } \
1474 return;
1475 #define LDST(reg,ldnum,ldpnum,stnum,stpnum,prechk) \
1476 LD(reg,ldnum,ldpnum,prechk) \
1477 ST(reg,stnum,stpnum,prechk)
1478 LDST(gbr, 0x401e, 0x4017, 0x0012, 0x4013, {})
1479 LDST(vbr, 0x402e, 0x4027, 0x0022, 0x4023, CHECK_PRIVILEGED)
1480 LDST(ssr, 0x403e, 0x4037, 0x0032, 0x4033, CHECK_PRIVILEGED)
1481 LDST(spc, 0x404e, 0x4047, 0x0042, 0x4043, CHECK_PRIVILEGED)
1482 ST(sgr, 0x003a, 0x4032, CHECK_PRIVILEGED)
1483 LD(sgr, 0x403a, 0x4036, CHECK_PRIVILEGED if (!(ctx->features & SH_FEATURE_SH4A)) break;)
1484 LDST(dbr, 0x40fa, 0x40f6, 0x00fa, 0x40f2, CHECK_PRIVILEGED)
1485 LDST(mach, 0x400a, 0x4006, 0x000a, 0x4002, {})
1486 LDST(macl, 0x401a, 0x4016, 0x001a, 0x4012, {})
1487 LDST(pr, 0x402a, 0x4026, 0x002a, 0x4022, {})
1488 LDST(fpul, 0x405a, 0x4056, 0x005a, 0x4052, {CHECK_FPU_ENABLED})
1489 case 0x406a: /* lds Rm,FPSCR */
1490 CHECK_FPU_ENABLED
1491 gen_helper_ld_fpscr(cpu_env, REG(B11_8));
1492 ctx->bstate = BS_STOP;
1493 return;
1494 case 0x4066: /* lds.l @Rm+,FPSCR */
1495 CHECK_FPU_ENABLED
1496 {
1497 TCGv addr = tcg_temp_new();
1498 tcg_gen_qemu_ld_i32(addr, REG(B11_8), ctx->memidx, MO_TESL);
1499 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1500 gen_helper_ld_fpscr(cpu_env, addr);
1501 tcg_temp_free(addr);
1502 ctx->bstate = BS_STOP;
1503 }
1504 return;
1505 case 0x006a: /* sts FPSCR,Rn */
1506 CHECK_FPU_ENABLED
1507 tcg_gen_andi_i32(REG(B11_8), cpu_fpscr, 0x003fffff);
1508 return;
1509 case 0x4062: /* sts FPSCR,@-Rn */
1510 CHECK_FPU_ENABLED
1511 {
1512 TCGv addr, val;
1513 val = tcg_temp_new();
1514 tcg_gen_andi_i32(val, cpu_fpscr, 0x003fffff);
1515 addr = tcg_temp_new();
1516 tcg_gen_subi_i32(addr, REG(B11_8), 4);
1517 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_TEUL);
1518 tcg_gen_mov_i32(REG(B11_8), addr);
1519 tcg_temp_free(addr);
1520 tcg_temp_free(val);
1521 }
1522 return;
1523 case 0x00c3: /* movca.l R0,@Rm */
1524 {
1525 TCGv val = tcg_temp_new();
1526 tcg_gen_qemu_ld_i32(val, REG(B11_8), ctx->memidx, MO_TEUL);
1527 gen_helper_movcal(cpu_env, REG(B11_8), val);
1528 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1529 }
1530 ctx->has_movcal = 1;
1531 return;
1532 case 0x40a9:
1533 /* MOVUA.L @Rm,R0 (Rm) -> R0
1534 Load non-boundary-aligned data */
1535 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1536 return;
1537 case 0x40e9:
1538 /* MOVUA.L @Rm+,R0 (Rm) -> R0, Rm + 4 -> Rm
1539 Load non-boundary-aligned data */
1540 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1541 tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
1542 return;
1543 case 0x0029: /* movt Rn */
1544 tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
1545 return;
1546 case 0x0073:
1547 /* MOVCO.L
1548 LDST -> T
1549 If (T == 1) R0 -> (Rn)
1550 0 -> LDST
1551 */
1552 if (ctx->features & SH_FEATURE_SH4A) {
1553 int label = gen_new_label();
1554 tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
1555 tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
1556 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
1557 tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
1558 gen_set_label(label);
1559 tcg_gen_movi_i32(cpu_ldst, 0);
1560 return;
1561 } else
1562 break;
1563 case 0x0063:
1564 /* MOVLI.L @Rm,R0
1565 1 -> LDST
1566 (Rm) -> R0
1567 When interrupt/exception
1568 occurred 0 -> LDST
1569 */
1570 if (ctx->features & SH_FEATURE_SH4A) {
1571 tcg_gen_movi_i32(cpu_ldst, 0);
1572 tcg_gen_qemu_ld_i32(REG(0), REG(B11_8), ctx->memidx, MO_TESL);
1573 tcg_gen_movi_i32(cpu_ldst, 1);
1574 return;
1575 } else
1576 break;
1577 case 0x0093: /* ocbi @Rn */
1578 {
1579 gen_helper_ocbi(cpu_env, REG(B11_8));
1580 }
1581 return;
1582 case 0x00a3: /* ocbp @Rn */
1583 case 0x00b3: /* ocbwb @Rn */
1584 /* These instructions are supposed to do nothing in case of
1585 a cache miss. Given that we only partially emulate caches
1586 it is safe to simply ignore them. */
1587 return;
1588 case 0x0083: /* pref @Rn */
1589 return;
1590 case 0x00d3: /* prefi @Rn */
1591 if (ctx->features & SH_FEATURE_SH4A)
1592 return;
1593 else
1594 break;
1595 case 0x00e3: /* icbi @Rn */
1596 if (ctx->features & SH_FEATURE_SH4A)
1597 return;
1598 else
1599 break;
1600 case 0x00ab: /* synco */
1601 if (ctx->features & SH_FEATURE_SH4A)
1602 return;
1603 else
1604 break;
1605 case 0x4024: /* rotcl Rn */
1606 {
1607 TCGv tmp = tcg_temp_new();
1608 tcg_gen_mov_i32(tmp, cpu_sr);
1609 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1610 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1611 gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
1612 tcg_temp_free(tmp);
1613 }
1614 return;
1615 case 0x4025: /* rotcr Rn */
1616 {
1617 TCGv tmp = tcg_temp_new();
1618 tcg_gen_mov_i32(tmp, cpu_sr);
1619 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1620 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1621 gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
1622 tcg_temp_free(tmp);
1623 }
1624 return;
1625 case 0x4004: /* rotl Rn */
1626 tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
1627 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1628 return;
1629 case 0x4005: /* rotr Rn */
1630 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1631 tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
1632 return;
1633 case 0x4000: /* shll Rn */
1634 case 0x4020: /* shal Rn */
1635 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
1636 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
1637 return;
1638 case 0x4021: /* shar Rn */
1639 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1640 tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
1641 return;
1642 case 0x4001: /* shlr Rn */
1643 gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
1644 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
1645 return;
1646 case 0x4008: /* shll2 Rn */
1647 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 2);
1648 return;
1649 case 0x4018: /* shll8 Rn */
1650 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 8);
1651 return;
1652 case 0x4028: /* shll16 Rn */
1653 tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 16);
1654 return;
1655 case 0x4009: /* shlr2 Rn */
1656 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 2);
1657 return;
1658 case 0x4019: /* shlr8 Rn */
1659 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 8);
1660 return;
1661 case 0x4029: /* shlr16 Rn */
1662 tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 16);
1663 return;
1664 case 0x401b: /* tas.b @Rn */
1665 {
1666 TCGv addr, val;
1667 addr = tcg_temp_local_new();
1668 tcg_gen_mov_i32(addr, REG(B11_8));
1669 val = tcg_temp_local_new();
1670 tcg_gen_qemu_ld_i32(val, addr, ctx->memidx, MO_UB);
1671 gen_cmp_imm(TCG_COND_EQ, val, 0);
1672 tcg_gen_ori_i32(val, val, 0x80);
1673 tcg_gen_qemu_st_i32(val, addr, ctx->memidx, MO_UB);
1674 tcg_temp_free(val);
1675 tcg_temp_free(addr);
1676 }
1677 return;
1678 case 0xf00d: /* fsts FPUL,FRn - FPSCR: Nothing */
1679 CHECK_FPU_ENABLED
1680 tcg_gen_mov_i32(cpu_fregs[FREG(B11_8)], cpu_fpul);
1681 return;
1682 case 0xf01d: /* flds FRm,FPUL - FPSCR: Nothing */
1683 CHECK_FPU_ENABLED
1684 tcg_gen_mov_i32(cpu_fpul, cpu_fregs[FREG(B11_8)]);
1685 return;
1686 case 0xf02d: /* float FPUL,FRn/DRn - FPSCR: R[PR,Enable.I]/W[Cause,Flag] */
1687 CHECK_FPU_ENABLED
1688 if (ctx->flags & FPSCR_PR) {
1689 TCGv_i64 fp;
1690 if (ctx->opcode & 0x0100)
1691 break; /* illegal instruction */
1692 fp = tcg_temp_new_i64();
1693 gen_helper_float_DT(fp, cpu_env, cpu_fpul);
1694 gen_store_fpr64(fp, DREG(B11_8));
1695 tcg_temp_free_i64(fp);
1696 }
1697 else {
1698 gen_helper_float_FT(cpu_fregs[FREG(B11_8)], cpu_env, cpu_fpul);
1699 }
1700 return;
1701 case 0xf03d: /* ftrc FRm/DRm,FPUL - FPSCR: R[PR,Enable.V]/W[Cause,Flag] */
1702 CHECK_FPU_ENABLED
1703 if (ctx->flags & FPSCR_PR) {
1704 TCGv_i64 fp;
1705 if (ctx->opcode & 0x0100)
1706 break; /* illegal instruction */
1707 fp = tcg_temp_new_i64();
1708 gen_load_fpr64(fp, DREG(B11_8));
1709 gen_helper_ftrc_DT(cpu_fpul, cpu_env, fp);
1710 tcg_temp_free_i64(fp);
1711 }
1712 else {
1713 gen_helper_ftrc_FT(cpu_fpul, cpu_env, cpu_fregs[FREG(B11_8)]);
1714 }
1715 return;
1716 case 0xf04d: /* fneg FRn/DRn - FPSCR: Nothing */
1717 CHECK_FPU_ENABLED
1718 {
1719 gen_helper_fneg_T(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1720 }
1721 return;
1722 case 0xf05d: /* fabs FRn/DRn */
1723 CHECK_FPU_ENABLED
1724 if (ctx->flags & FPSCR_PR) {
1725 if (ctx->opcode & 0x0100)
1726 break; /* illegal instruction */
1727 TCGv_i64 fp = tcg_temp_new_i64();
1728 gen_load_fpr64(fp, DREG(B11_8));
1729 gen_helper_fabs_DT(fp, fp);
1730 gen_store_fpr64(fp, DREG(B11_8));
1731 tcg_temp_free_i64(fp);
1732 } else {
1733 gen_helper_fabs_FT(cpu_fregs[FREG(B11_8)], cpu_fregs[FREG(B11_8)]);
1734 }
1735 return;
1736 case 0xf06d: /* fsqrt FRn */
1737 CHECK_FPU_ENABLED
1738 if (ctx->flags & FPSCR_PR) {
1739 if (ctx->opcode & 0x0100)
1740 break; /* illegal instruction */
1741 TCGv_i64 fp = tcg_temp_new_i64();
1742 gen_load_fpr64(fp, DREG(B11_8));
1743 gen_helper_fsqrt_DT(fp, cpu_env, fp);
1744 gen_store_fpr64(fp, DREG(B11_8));
1745 tcg_temp_free_i64(fp);
1746 } else {
1747 gen_helper_fsqrt_FT(cpu_fregs[FREG(B11_8)], cpu_env,
1748 cpu_fregs[FREG(B11_8)]);
1749 }
1750 return;
1751 case 0xf07d: /* fsrra FRn */
1752 CHECK_FPU_ENABLED
1753 break;
1754 case 0xf08d: /* fldi0 FRn - FPSCR: R[PR] */
1755 CHECK_FPU_ENABLED
1756 if (!(ctx->flags & FPSCR_PR)) {
1757 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0);
1758 }
1759 return;
1760 case 0xf09d: /* fldi1 FRn - FPSCR: R[PR] */
1761 CHECK_FPU_ENABLED
1762 if (!(ctx->flags & FPSCR_PR)) {
1763 tcg_gen_movi_i32(cpu_fregs[FREG(B11_8)], 0x3f800000);
1764 }
1765 return;
1766 case 0xf0ad: /* fcnvsd FPUL,DRn */
1767 CHECK_FPU_ENABLED
1768 {
1769 TCGv_i64 fp = tcg_temp_new_i64();
1770 gen_helper_fcnvsd_FT_DT(fp, cpu_env, cpu_fpul);
1771 gen_store_fpr64(fp, DREG(B11_8));
1772 tcg_temp_free_i64(fp);
1773 }
1774 return;
1775 case 0xf0bd: /* fcnvds DRn,FPUL */
1776 CHECK_FPU_ENABLED
1777 {
1778 TCGv_i64 fp = tcg_temp_new_i64();
1779 gen_load_fpr64(fp, DREG(B11_8));
1780 gen_helper_fcnvds_DT_FT(cpu_fpul, cpu_env, fp);
1781 tcg_temp_free_i64(fp);
1782 }
1783 return;
1784 case 0xf0ed: /* fipr FVm,FVn */
1785 CHECK_FPU_ENABLED
1786 if ((ctx->flags & FPSCR_PR) == 0) {
1787 TCGv m, n;
1788 m = tcg_const_i32((ctx->opcode >> 8) & 3);
1789 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1790 gen_helper_fipr(cpu_env, m, n);
1791 tcg_temp_free(m);
1792 tcg_temp_free(n);
1793 return;
1794 }
1795 break;
1796 case 0xf0fd: /* ftrv XMTRX,FVn */
1797 CHECK_FPU_ENABLED
1798 if ((ctx->opcode & 0x0300) == 0x0100 &&
1799 (ctx->flags & FPSCR_PR) == 0) {
1800 TCGv n;
1801 n = tcg_const_i32((ctx->opcode >> 10) & 3);
1802 gen_helper_ftrv(cpu_env, n);
1803 tcg_temp_free(n);
1804 return;
1805 }
1806 break;
1807 }
1808 #if 0
1809 fprintf(stderr, "unknown instruction 0x%04x at pc 0x%08x\n",
1810 ctx->opcode, ctx->pc);
1811 fflush(stderr);
1812 #endif
1813 tcg_gen_movi_i32(cpu_pc, ctx->pc);
1814 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1815 gen_helper_raise_slot_illegal_instruction(cpu_env);
1816 } else {
1817 gen_helper_raise_illegal_instruction(cpu_env);
1818 }
1819 ctx->bstate = BS_BRANCH;
1820 }
1821
1822 static void decode_opc(DisasContext * ctx)
1823 {
1824 uint32_t old_flags = ctx->flags;
1825
1826 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
1827 tcg_gen_debug_insn_start(ctx->pc);
1828 }
1829
1830 _decode_opc(ctx);
1831
1832 if (old_flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
1833 if (ctx->flags & DELAY_SLOT_CLEARME) {
1834 gen_store_flags(0);
1835 } else {
1836 /* go out of the delay slot */
1837 uint32_t new_flags = ctx->flags;
1838 new_flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1839 gen_store_flags(new_flags);
1840 }
1841 ctx->flags = 0;
1842 ctx->bstate = BS_BRANCH;
1843 if (old_flags & DELAY_SLOT_CONDITIONAL) {
1844 gen_delayed_conditional_jump(ctx);
1845 } else if (old_flags & DELAY_SLOT) {
1846 gen_jump(ctx);
1847 }
1848
1849 }
1850
1851 /* go into a delay slot */
1852 if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL))
1853 gen_store_flags(ctx->flags);
1854 }
1855
1856 static inline void
1857 gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
1858 bool search_pc)
1859 {
1860 CPUState *cs = CPU(cpu);
1861 CPUSH4State *env = &cpu->env;
1862 DisasContext ctx;
1863 target_ulong pc_start;
1864 static uint16_t *gen_opc_end;
1865 CPUBreakpoint *bp;
1866 int i, ii;
1867 int num_insns;
1868 int max_insns;
1869
1870 pc_start = tb->pc;
1871 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
1872 ctx.pc = pc_start;
1873 ctx.flags = (uint32_t)tb->flags;
1874 ctx.bstate = BS_NONE;
1875 ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
1876 /* We don't know if the delayed pc came from a dynamic or static branch,
1877 so assume it is a dynamic branch. */
1878 ctx.delayed_pc = -1; /* use delayed pc from env pointer */
1879 ctx.tb = tb;
1880 ctx.singlestep_enabled = cs->singlestep_enabled;
1881 ctx.features = env->features;
1882 ctx.has_movcal = (ctx.flags & TB_FLAG_PENDING_MOVCA);
1883
1884 ii = -1;
1885 num_insns = 0;
1886 max_insns = tb->cflags & CF_COUNT_MASK;
1887 if (max_insns == 0)
1888 max_insns = CF_COUNT_MASK;
1889 gen_tb_start();
1890 while (ctx.bstate == BS_NONE && tcg_ctx.gen_opc_ptr < gen_opc_end) {
1891 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
1892 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1893 if (ctx.pc == bp->pc) {
1894 /* We have hit a breakpoint - make sure PC is up-to-date */
1895 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1896 gen_helper_debug(cpu_env);
1897 ctx.bstate = BS_BRANCH;
1898 break;
1899 }
1900 }
1901 }
1902 if (search_pc) {
1903 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1904 if (ii < i) {
1905 ii++;
1906 while (ii < i)
1907 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1908 }
1909 tcg_ctx.gen_opc_pc[ii] = ctx.pc;
1910 gen_opc_hflags[ii] = ctx.flags;
1911 tcg_ctx.gen_opc_instr_start[ii] = 1;
1912 tcg_ctx.gen_opc_icount[ii] = num_insns;
1913 }
1914 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
1915 gen_io_start();
1916 #if 0
1917 fprintf(stderr, "Loading opcode at address 0x%08x\n", ctx.pc);
1918 fflush(stderr);
1919 #endif
1920 ctx.opcode = cpu_lduw_code(env, ctx.pc);
1921 decode_opc(&ctx);
1922 num_insns++;
1923 ctx.pc += 2;
1924 if ((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0)
1925 break;
1926 if (cs->singlestep_enabled) {
1927 break;
1928 }
1929 if (num_insns >= max_insns)
1930 break;
1931 if (singlestep)
1932 break;
1933 }
1934 if (tb->cflags & CF_LAST_IO)
1935 gen_io_end();
1936 if (cs->singlestep_enabled) {
1937 tcg_gen_movi_i32(cpu_pc, ctx.pc);
1938 gen_helper_debug(cpu_env);
1939 } else {
1940 switch (ctx.bstate) {
1941 case BS_STOP:
1942 /* gen_op_interrupt_restart(); */
1943 /* fall through */
1944 case BS_NONE:
1945 if (ctx.flags) {
1946 gen_store_flags(ctx.flags | DELAY_SLOT_CLEARME);
1947 }
1948 gen_goto_tb(&ctx, 0, ctx.pc);
1949 break;
1950 case BS_EXCP:
1951 /* gen_op_interrupt_restart(); */
1952 tcg_gen_exit_tb(0);
1953 break;
1954 case BS_BRANCH:
1955 default:
1956 break;
1957 }
1958 }
1959
1960 gen_tb_end(tb, num_insns);
1961 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
1962 if (search_pc) {
1963 i = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
1964 ii++;
1965 while (ii <= i)
1966 tcg_ctx.gen_opc_instr_start[ii++] = 0;
1967 } else {
1968 tb->size = ctx.pc - pc_start;
1969 tb->icount = num_insns;
1970 }
1971
1972 #ifdef DEBUG_DISAS
1973 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
1974 qemu_log("IN:\n"); /* , lookup_symbol(pc_start)); */
1975 log_target_disas(env, pc_start, ctx.pc - pc_start, 0);
1976 qemu_log("\n");
1977 }
1978 #endif
1979 }
1980
1981 void gen_intermediate_code(CPUSH4State * env, struct TranslationBlock *tb)
1982 {
1983 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, false);
1984 }
1985
1986 void gen_intermediate_code_pc(CPUSH4State * env, struct TranslationBlock *tb)
1987 {
1988 gen_intermediate_code_internal(sh_env_get_cpu(env), tb, true);
1989 }
1990
1991 void restore_state_to_opc(CPUSH4State *env, TranslationBlock *tb, int pc_pos)
1992 {
1993 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
1994 env->flags = gen_opc_hflags[pc_pos];
1995 }