]> git.proxmox.com Git - mirror_qemu.git/blob - target/rx/translate.c
Merge tag 'mem-2022-10-28' of https://github.com/davidhildenbrand/qemu into staging
[mirror_qemu.git] / target / rx / translate.c
1 /*
2 * RX translation
3 *
4 * Copyright (c) 2019 Yoshinori Sato
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 typedef struct DisasContext {
32 DisasContextBase base;
33 CPURXState *env;
34 uint32_t pc;
35 uint32_t tb_flags;
36 } DisasContext;
37
38 typedef struct DisasCompare {
39 TCGv value;
40 TCGv temp;
41 TCGCond cond;
42 } DisasCompare;
43
44 const char *rx_crname(uint8_t cr)
45 {
46 static const char *cr_names[] = {
47 "psw", "pc", "usp", "fpsw", "", "", "", "",
48 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
49 };
50 if (cr >= ARRAY_SIZE(cr_names)) {
51 return "illegal";
52 }
53 return cr_names[cr];
54 }
55
56 /* Target-specific values for dc->base.is_jmp. */
57 #define DISAS_JUMP DISAS_TARGET_0
58 #define DISAS_UPDATE DISAS_TARGET_1
59 #define DISAS_EXIT DISAS_TARGET_2
60
61 /* global register indexes */
62 static TCGv cpu_regs[16];
63 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
64 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
65 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
66 static TCGv cpu_fintv, cpu_intb, cpu_pc;
67 static TCGv_i64 cpu_acc;
68
69 #define cpu_sp cpu_regs[0]
70
71 #include "exec/gen-icount.h"
72
73 /* decoder helper */
74 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
75 int i, int n)
76 {
77 while (++i <= n) {
78 uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
79 insn |= b << (32 - i * 8);
80 }
81 return insn;
82 }
83
84 static uint32_t li(DisasContext *ctx, int sz)
85 {
86 int32_t tmp, addr;
87 CPURXState *env = ctx->env;
88 addr = ctx->base.pc_next;
89
90 tcg_debug_assert(sz < 4);
91 switch (sz) {
92 case 1:
93 ctx->base.pc_next += 1;
94 return cpu_ldsb_code(env, addr);
95 case 2:
96 ctx->base.pc_next += 2;
97 return cpu_ldsw_code(env, addr);
98 case 3:
99 ctx->base.pc_next += 3;
100 tmp = cpu_ldsb_code(env, addr + 2) << 16;
101 tmp |= cpu_lduw_code(env, addr) & 0xffff;
102 return tmp;
103 case 0:
104 ctx->base.pc_next += 4;
105 return cpu_ldl_code(env, addr);
106 }
107 return 0;
108 }
109
110 static int bdsp_s(DisasContext *ctx, int d)
111 {
112 /*
113 * 0 -> 8
114 * 1 -> 9
115 * 2 -> 10
116 * 3 -> 3
117 * :
118 * 7 -> 7
119 */
120 if (d < 3) {
121 d += 8;
122 }
123 return d;
124 }
125
126 /* Include the auto-generated decoder. */
127 #include "decode-insns.c.inc"
128
129 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
130 {
131 RXCPU *cpu = RX_CPU(cs);
132 CPURXState *env = &cpu->env;
133 int i;
134 uint32_t psw;
135
136 psw = rx_cpu_pack_psw(env);
137 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
138 env->pc, psw);
139 for (i = 0; i < 16; i += 4) {
140 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
141 i, env->regs[i], i + 1, env->regs[i + 1],
142 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
143 }
144 }
145
146 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
147 {
148 if (translator_use_goto_tb(&dc->base, dest)) {
149 tcg_gen_goto_tb(n);
150 tcg_gen_movi_i32(cpu_pc, dest);
151 tcg_gen_exit_tb(dc->base.tb, n);
152 } else {
153 tcg_gen_movi_i32(cpu_pc, dest);
154 tcg_gen_lookup_and_goto_ptr();
155 }
156 dc->base.is_jmp = DISAS_NORETURN;
157 }
158
159 /* generic load wrapper */
160 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
161 {
162 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
163 }
164
165 /* unsigned load wrapper */
166 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
167 {
168 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
169 }
170
171 /* generic store wrapper */
172 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
173 {
174 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
175 }
176
177 /* [ri, rb] */
178 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
179 int size, int ri, int rb)
180 {
181 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
182 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
183 }
184
185 /* dsp[reg] */
186 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
187 int ld, int size, int reg)
188 {
189 uint32_t dsp;
190
191 tcg_debug_assert(ld < 3);
192 switch (ld) {
193 case 0:
194 return cpu_regs[reg];
195 case 1:
196 dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
197 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
198 ctx->base.pc_next += 1;
199 return mem;
200 case 2:
201 dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
202 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
203 ctx->base.pc_next += 2;
204 return mem;
205 }
206 return NULL;
207 }
208
209 static inline MemOp mi_to_mop(unsigned mi)
210 {
211 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
212 tcg_debug_assert(mi < 5);
213 return mop[mi];
214 }
215
216 /* load source operand */
217 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
218 int ld, int mi, int rs)
219 {
220 TCGv addr;
221 MemOp mop;
222 if (ld < 3) {
223 mop = mi_to_mop(mi);
224 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
225 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
226 return mem;
227 } else {
228 return cpu_regs[rs];
229 }
230 }
231
232 /* Processor mode check */
233 static int is_privileged(DisasContext *ctx, int is_exception)
234 {
235 if (FIELD_EX32(ctx->tb_flags, PSW, PM)) {
236 if (is_exception) {
237 gen_helper_raise_privilege_violation(cpu_env);
238 }
239 return 0;
240 } else {
241 return 1;
242 }
243 }
244
245 /* generate QEMU condition */
246 static void psw_cond(DisasCompare *dc, uint32_t cond)
247 {
248 tcg_debug_assert(cond < 16);
249 switch (cond) {
250 case 0: /* z */
251 dc->cond = TCG_COND_EQ;
252 dc->value = cpu_psw_z;
253 break;
254 case 1: /* nz */
255 dc->cond = TCG_COND_NE;
256 dc->value = cpu_psw_z;
257 break;
258 case 2: /* c */
259 dc->cond = TCG_COND_NE;
260 dc->value = cpu_psw_c;
261 break;
262 case 3: /* nc */
263 dc->cond = TCG_COND_EQ;
264 dc->value = cpu_psw_c;
265 break;
266 case 4: /* gtu (C& ~Z) == 1 */
267 case 5: /* leu (C& ~Z) == 0 */
268 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
269 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
270 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
271 dc->value = dc->temp;
272 break;
273 case 6: /* pz (S == 0) */
274 dc->cond = TCG_COND_GE;
275 dc->value = cpu_psw_s;
276 break;
277 case 7: /* n (S == 1) */
278 dc->cond = TCG_COND_LT;
279 dc->value = cpu_psw_s;
280 break;
281 case 8: /* ge (S^O)==0 */
282 case 9: /* lt (S^O)==1 */
283 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
284 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
285 dc->value = dc->temp;
286 break;
287 case 10: /* gt ((S^O)|Z)==0 */
288 case 11: /* le ((S^O)|Z)==1 */
289 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
290 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
291 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
292 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
293 dc->value = dc->temp;
294 break;
295 case 12: /* o */
296 dc->cond = TCG_COND_LT;
297 dc->value = cpu_psw_o;
298 break;
299 case 13: /* no */
300 dc->cond = TCG_COND_GE;
301 dc->value = cpu_psw_o;
302 break;
303 case 14: /* always true */
304 dc->cond = TCG_COND_ALWAYS;
305 dc->value = dc->temp;
306 break;
307 case 15: /* always false */
308 dc->cond = TCG_COND_NEVER;
309 dc->value = dc->temp;
310 break;
311 }
312 }
313
314 static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc)
315 {
316 switch (cr) {
317 case 0: /* PSW */
318 gen_helper_pack_psw(ret, cpu_env);
319 break;
320 case 1: /* PC */
321 tcg_gen_movi_i32(ret, pc);
322 break;
323 case 2: /* USP */
324 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
325 tcg_gen_mov_i32(ret, cpu_sp);
326 } else {
327 tcg_gen_mov_i32(ret, cpu_usp);
328 }
329 break;
330 case 3: /* FPSW */
331 tcg_gen_mov_i32(ret, cpu_fpsw);
332 break;
333 case 8: /* BPSW */
334 tcg_gen_mov_i32(ret, cpu_bpsw);
335 break;
336 case 9: /* BPC */
337 tcg_gen_mov_i32(ret, cpu_bpc);
338 break;
339 case 10: /* ISP */
340 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
341 tcg_gen_mov_i32(ret, cpu_isp);
342 } else {
343 tcg_gen_mov_i32(ret, cpu_sp);
344 }
345 break;
346 case 11: /* FINTV */
347 tcg_gen_mov_i32(ret, cpu_fintv);
348 break;
349 case 12: /* INTB */
350 tcg_gen_mov_i32(ret, cpu_intb);
351 break;
352 default:
353 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
354 /* Unimplement registers return 0 */
355 tcg_gen_movi_i32(ret, 0);
356 break;
357 }
358 }
359
360 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
361 {
362 if (cr >= 8 && !is_privileged(ctx, 0)) {
363 /* Some control registers can only be written in privileged mode. */
364 qemu_log_mask(LOG_GUEST_ERROR,
365 "disallow control register write %s", rx_crname(cr));
366 return;
367 }
368 switch (cr) {
369 case 0: /* PSW */
370 gen_helper_set_psw(cpu_env, val);
371 if (is_privileged(ctx, 0)) {
372 /* PSW.{I,U} may be updated here. exit TB. */
373 ctx->base.is_jmp = DISAS_UPDATE;
374 }
375 break;
376 /* case 1: to PC not supported */
377 case 2: /* USP */
378 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
379 tcg_gen_mov_i32(cpu_sp, val);
380 } else {
381 tcg_gen_mov_i32(cpu_usp, val);
382 }
383 break;
384 case 3: /* FPSW */
385 gen_helper_set_fpsw(cpu_env, val);
386 break;
387 case 8: /* BPSW */
388 tcg_gen_mov_i32(cpu_bpsw, val);
389 break;
390 case 9: /* BPC */
391 tcg_gen_mov_i32(cpu_bpc, val);
392 break;
393 case 10: /* ISP */
394 if (FIELD_EX32(ctx->tb_flags, PSW, U)) {
395 tcg_gen_mov_i32(cpu_isp, val);
396 } else {
397 tcg_gen_mov_i32(cpu_sp, val);
398 }
399 break;
400 case 11: /* FINTV */
401 tcg_gen_mov_i32(cpu_fintv, val);
402 break;
403 case 12: /* INTB */
404 tcg_gen_mov_i32(cpu_intb, val);
405 break;
406 default:
407 qemu_log_mask(LOG_GUEST_ERROR,
408 "Unimplement control register %d", cr);
409 break;
410 }
411 }
412
413 static void push(TCGv val)
414 {
415 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
416 rx_gen_st(MO_32, val, cpu_sp);
417 }
418
419 static void pop(TCGv ret)
420 {
421 rx_gen_ld(MO_32, ret, cpu_sp);
422 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
423 }
424
425 /* mov.<bwl> rs,dsp5[rd] */
426 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
427 {
428 TCGv mem;
429 mem = tcg_temp_new();
430 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
431 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
432 tcg_temp_free(mem);
433 return true;
434 }
435
436 /* mov.<bwl> dsp5[rs],rd */
437 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
438 {
439 TCGv mem;
440 mem = tcg_temp_new();
441 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
442 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
443 tcg_temp_free(mem);
444 return true;
445 }
446
447 /* mov.l #uimm4,rd */
448 /* mov.l #uimm8,rd */
449 /* mov.l #imm,rd */
450 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
451 {
452 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
453 return true;
454 }
455
456 /* mov.<bwl> #uimm8,dsp[rd] */
457 /* mov.<bwl> #imm, dsp[rd] */
458 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
459 {
460 TCGv imm, mem;
461 imm = tcg_const_i32(a->imm);
462 mem = tcg_temp_new();
463 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
464 rx_gen_st(a->sz, imm, mem);
465 tcg_temp_free(imm);
466 tcg_temp_free(mem);
467 return true;
468 }
469
470 /* mov.<bwl> [ri,rb],rd */
471 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
472 {
473 TCGv mem;
474 mem = tcg_temp_new();
475 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
476 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
477 tcg_temp_free(mem);
478 return true;
479 }
480
481 /* mov.<bwl> rd,[ri,rb] */
482 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
483 {
484 TCGv mem;
485 mem = tcg_temp_new();
486 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
487 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
488 tcg_temp_free(mem);
489 return true;
490 }
491
492 /* mov.<bwl> dsp[rs],dsp[rd] */
493 /* mov.<bwl> rs,dsp[rd] */
494 /* mov.<bwl> dsp[rs],rd */
495 /* mov.<bwl> rs,rd */
496 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
497 {
498 static void (* const mov[])(TCGv ret, TCGv arg) = {
499 tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
500 };
501 TCGv tmp, mem, addr;
502 if (a->lds == 3 && a->ldd == 3) {
503 /* mov.<bwl> rs,rd */
504 mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
505 return true;
506 }
507
508 mem = tcg_temp_new();
509 if (a->lds == 3) {
510 /* mov.<bwl> rs,dsp[rd] */
511 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
512 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
513 } else if (a->ldd == 3) {
514 /* mov.<bwl> dsp[rs],rd */
515 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
516 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
517 } else {
518 /* mov.<bwl> dsp[rs],dsp[rd] */
519 tmp = tcg_temp_new();
520 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
521 rx_gen_ld(a->sz, tmp, addr);
522 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
523 rx_gen_st(a->sz, tmp, addr);
524 tcg_temp_free(tmp);
525 }
526 tcg_temp_free(mem);
527 return true;
528 }
529
530 /* mov.<bwl> rs,[rd+] */
531 /* mov.<bwl> rs,[-rd] */
532 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
533 {
534 TCGv val;
535 val = tcg_temp_new();
536 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
537 if (a->ad == 1) {
538 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
539 }
540 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
541 if (a->ad == 0) {
542 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
543 }
544 tcg_temp_free(val);
545 return true;
546 }
547
548 /* mov.<bwl> [rd+],rs */
549 /* mov.<bwl> [-rd],rs */
550 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
551 {
552 TCGv val;
553 val = tcg_temp_new();
554 if (a->ad == 1) {
555 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
556 }
557 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
558 if (a->ad == 0) {
559 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
560 }
561 tcg_gen_mov_i32(cpu_regs[a->rs], val);
562 tcg_temp_free(val);
563 return true;
564 }
565
566 /* movu.<bw> dsp5[rs],rd */
567 /* movu.<bw> dsp[rs],rd */
568 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
569 {
570 TCGv mem;
571 mem = tcg_temp_new();
572 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
573 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
574 tcg_temp_free(mem);
575 return true;
576 }
577
578 /* movu.<bw> rs,rd */
579 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
580 {
581 static void (* const ext[])(TCGv ret, TCGv arg) = {
582 tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
583 };
584 ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
585 return true;
586 }
587
588 /* movu.<bw> [ri,rb],rd */
589 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
590 {
591 TCGv mem;
592 mem = tcg_temp_new();
593 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
594 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
595 tcg_temp_free(mem);
596 return true;
597 }
598
599 /* movu.<bw> [rd+],rs */
600 /* mov.<bw> [-rd],rs */
601 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
602 {
603 TCGv val;
604 val = tcg_temp_new();
605 if (a->ad == 1) {
606 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
607 }
608 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
609 if (a->ad == 0) {
610 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
611 }
612 tcg_gen_mov_i32(cpu_regs[a->rs], val);
613 tcg_temp_free(val);
614 return true;
615 }
616
617
618 /* pop rd */
619 static bool trans_POP(DisasContext *ctx, arg_POP *a)
620 {
621 /* mov.l [r0+], rd */
622 arg_MOV_rp mov_a;
623 mov_a.rd = 0;
624 mov_a.rs = a->rd;
625 mov_a.ad = 0;
626 mov_a.sz = MO_32;
627 trans_MOV_pr(ctx, &mov_a);
628 return true;
629 }
630
631 /* popc cr */
632 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
633 {
634 TCGv val;
635 val = tcg_temp_new();
636 pop(val);
637 move_to_cr(ctx, val, a->cr);
638 tcg_temp_free(val);
639 return true;
640 }
641
642 /* popm rd-rd2 */
643 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
644 {
645 int r;
646 if (a->rd == 0 || a->rd >= a->rd2) {
647 qemu_log_mask(LOG_GUEST_ERROR,
648 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
649 }
650 r = a->rd;
651 while (r <= a->rd2 && r < 16) {
652 pop(cpu_regs[r++]);
653 }
654 return true;
655 }
656
657
658 /* push.<bwl> rs */
659 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
660 {
661 TCGv val;
662 val = tcg_temp_new();
663 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
664 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
665 rx_gen_st(a->sz, val, cpu_sp);
666 tcg_temp_free(val);
667 return true;
668 }
669
670 /* push.<bwl> dsp[rs] */
671 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
672 {
673 TCGv mem, val, addr;
674 mem = tcg_temp_new();
675 val = tcg_temp_new();
676 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
677 rx_gen_ld(a->sz, val, addr);
678 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
679 rx_gen_st(a->sz, val, cpu_sp);
680 tcg_temp_free(mem);
681 tcg_temp_free(val);
682 return true;
683 }
684
685 /* pushc rx */
686 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
687 {
688 TCGv val;
689 val = tcg_temp_new();
690 move_from_cr(ctx, val, a->cr, ctx->pc);
691 push(val);
692 tcg_temp_free(val);
693 return true;
694 }
695
696 /* pushm rs-rs2 */
697 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
698 {
699 int r;
700
701 if (a->rs == 0 || a->rs >= a->rs2) {
702 qemu_log_mask(LOG_GUEST_ERROR,
703 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
704 }
705 r = a->rs2;
706 while (r >= a->rs && r >= 0) {
707 push(cpu_regs[r--]);
708 }
709 return true;
710 }
711
712 /* xchg rs,rd */
713 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
714 {
715 TCGv tmp;
716 tmp = tcg_temp_new();
717 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
718 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
719 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
720 tcg_temp_free(tmp);
721 return true;
722 }
723
724 /* xchg dsp[rs].<mi>,rd */
725 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
726 {
727 TCGv mem, addr;
728 mem = tcg_temp_new();
729 switch (a->mi) {
730 case 0: /* dsp[rs].b */
731 case 1: /* dsp[rs].w */
732 case 2: /* dsp[rs].l */
733 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
734 break;
735 case 3: /* dsp[rs].uw */
736 case 4: /* dsp[rs].ub */
737 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
738 break;
739 default:
740 g_assert_not_reached();
741 }
742 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
743 0, mi_to_mop(a->mi));
744 tcg_temp_free(mem);
745 return true;
746 }
747
748 static inline void stcond(TCGCond cond, int rd, int imm)
749 {
750 TCGv z;
751 TCGv _imm;
752 z = tcg_const_i32(0);
753 _imm = tcg_const_i32(imm);
754 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
755 _imm, cpu_regs[rd]);
756 tcg_temp_free(z);
757 tcg_temp_free(_imm);
758 }
759
760 /* stz #imm,rd */
761 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
762 {
763 stcond(TCG_COND_EQ, a->rd, a->imm);
764 return true;
765 }
766
767 /* stnz #imm,rd */
768 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
769 {
770 stcond(TCG_COND_NE, a->rd, a->imm);
771 return true;
772 }
773
774 /* sccnd.<bwl> rd */
775 /* sccnd.<bwl> dsp:[rd] */
776 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
777 {
778 DisasCompare dc;
779 TCGv val, mem, addr;
780 dc.temp = tcg_temp_new();
781 psw_cond(&dc, a->cd);
782 if (a->ld < 3) {
783 val = tcg_temp_new();
784 mem = tcg_temp_new();
785 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
786 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
787 rx_gen_st(a->sz, val, addr);
788 tcg_temp_free(val);
789 tcg_temp_free(mem);
790 } else {
791 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
792 }
793 tcg_temp_free(dc.temp);
794 return true;
795 }
796
797 /* rtsd #imm */
798 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
799 {
800 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
801 pop(cpu_pc);
802 ctx->base.is_jmp = DISAS_JUMP;
803 return true;
804 }
805
806 /* rtsd #imm, rd-rd2 */
807 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
808 {
809 int dst;
810 int adj;
811
812 if (a->rd2 >= a->rd) {
813 adj = a->imm - (a->rd2 - a->rd + 1);
814 } else {
815 adj = a->imm - (15 - a->rd + 1);
816 }
817
818 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
819 dst = a->rd;
820 while (dst <= a->rd2 && dst < 16) {
821 pop(cpu_regs[dst++]);
822 }
823 pop(cpu_pc);
824 ctx->base.is_jmp = DISAS_JUMP;
825 return true;
826 }
827
828 typedef void (*op2fn)(TCGv ret, TCGv arg1);
829 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
830
831 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
832 {
833 opr(cpu_regs[dst], cpu_regs[src]);
834 }
835
836 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
837 {
838 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
839 }
840
841 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
842 {
843 TCGv imm = tcg_const_i32(src2);
844 opr(cpu_regs[dst], cpu_regs[src], imm);
845 tcg_temp_free(imm);
846 }
847
848 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
849 int dst, int src, int ld, int mi)
850 {
851 TCGv val, mem;
852 mem = tcg_temp_new();
853 val = rx_load_source(ctx, mem, ld, mi, src);
854 opr(cpu_regs[dst], cpu_regs[dst], val);
855 tcg_temp_free(mem);
856 }
857
858 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
859 {
860 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
861 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
862 tcg_gen_mov_i32(ret, cpu_psw_s);
863 }
864
865 /* and #uimm:4, rd */
866 /* and #imm, rd */
867 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
868 {
869 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
870 return true;
871 }
872
873 /* and dsp[rs], rd */
874 /* and rs,rd */
875 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
876 {
877 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
878 return true;
879 }
880
881 /* and rs,rs2,rd */
882 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
883 {
884 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
885 return true;
886 }
887
888 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
889 {
890 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
891 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
892 tcg_gen_mov_i32(ret, cpu_psw_s);
893 }
894
895 /* or #uimm:4, rd */
896 /* or #imm, rd */
897 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
898 {
899 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
900 return true;
901 }
902
903 /* or dsp[rs], rd */
904 /* or rs,rd */
905 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
906 {
907 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
908 return true;
909 }
910
911 /* or rs,rs2,rd */
912 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
913 {
914 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
915 return true;
916 }
917
918 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
919 {
920 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
921 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
922 tcg_gen_mov_i32(ret, cpu_psw_s);
923 }
924
925 /* xor #imm, rd */
926 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
927 {
928 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
929 return true;
930 }
931
932 /* xor dsp[rs], rd */
933 /* xor rs,rd */
934 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
935 {
936 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
937 return true;
938 }
939
940 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
941 {
942 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
943 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
944 }
945
946 /* tst #imm, rd */
947 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
948 {
949 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
950 return true;
951 }
952
953 /* tst dsp[rs], rd */
954 /* tst rs, rd */
955 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
956 {
957 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
958 return true;
959 }
960
961 static void rx_not(TCGv ret, TCGv arg1)
962 {
963 tcg_gen_not_i32(ret, arg1);
964 tcg_gen_mov_i32(cpu_psw_z, ret);
965 tcg_gen_mov_i32(cpu_psw_s, ret);
966 }
967
968 /* not rd */
969 /* not rs, rd */
970 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
971 {
972 rx_gen_op_rr(rx_not, a->rd, a->rs);
973 return true;
974 }
975
976 static void rx_neg(TCGv ret, TCGv arg1)
977 {
978 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
979 tcg_gen_neg_i32(ret, arg1);
980 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
981 tcg_gen_mov_i32(cpu_psw_z, ret);
982 tcg_gen_mov_i32(cpu_psw_s, ret);
983 }
984
985
986 /* neg rd */
987 /* neg rs, rd */
988 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
989 {
990 rx_gen_op_rr(rx_neg, a->rd, a->rs);
991 return true;
992 }
993
994 /* ret = arg1 + arg2 + psw_c */
995 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
996 {
997 TCGv z;
998 z = tcg_const_i32(0);
999 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
1000 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
1001 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1002 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1003 tcg_gen_xor_i32(z, arg1, arg2);
1004 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1005 tcg_gen_mov_i32(ret, cpu_psw_s);
1006 tcg_temp_free(z);
1007 }
1008
1009 /* adc #imm, rd */
1010 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
1011 {
1012 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
1013 return true;
1014 }
1015
1016 /* adc rs, rd */
1017 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
1018 {
1019 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
1020 return true;
1021 }
1022
1023 /* adc dsp[rs], rd */
1024 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
1025 {
1026 /* mi only 2 */
1027 if (a->mi != 2) {
1028 return false;
1029 }
1030 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1031 return true;
1032 }
1033
1034 /* ret = arg1 + arg2 */
1035 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1036 {
1037 TCGv z;
1038 z = tcg_const_i32(0);
1039 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1040 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1041 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1042 tcg_gen_xor_i32(z, arg1, arg2);
1043 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1044 tcg_gen_mov_i32(ret, cpu_psw_s);
1045 tcg_temp_free(z);
1046 }
1047
1048 /* add #uimm4, rd */
1049 /* add #imm, rs, rd */
1050 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1051 {
1052 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1053 return true;
1054 }
1055
1056 /* add rs, rd */
1057 /* add dsp[rs], rd */
1058 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1059 {
1060 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1061 return true;
1062 }
1063
1064 /* add rs, rs2, rd */
1065 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1066 {
1067 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1068 return true;
1069 }
1070
1071 /* ret = arg1 - arg2 */
1072 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1073 {
1074 TCGv temp;
1075 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1076 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1077 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1078 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1079 temp = tcg_temp_new_i32();
1080 tcg_gen_xor_i32(temp, arg1, arg2);
1081 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
1082 tcg_temp_free_i32(temp);
1083 /* CMP not required return */
1084 if (ret) {
1085 tcg_gen_mov_i32(ret, cpu_psw_s);
1086 }
1087 }
1088 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1089 {
1090 rx_sub(NULL, arg1, arg2);
1091 }
1092 /* ret = arg1 - arg2 - !psw_c */
1093 /* -> ret = arg1 + ~arg2 + psw_c */
1094 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1095 {
1096 TCGv temp;
1097 temp = tcg_temp_new();
1098 tcg_gen_not_i32(temp, arg2);
1099 rx_adc(ret, arg1, temp);
1100 tcg_temp_free(temp);
1101 }
1102
1103 /* cmp #imm4, rs2 */
1104 /* cmp #imm8, rs2 */
1105 /* cmp #imm, rs2 */
1106 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1107 {
1108 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1109 return true;
1110 }
1111
1112 /* cmp rs, rs2 */
1113 /* cmp dsp[rs], rs2 */
1114 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1115 {
1116 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1117 return true;
1118 }
1119
1120 /* sub #imm4, rd */
1121 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1122 {
1123 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1124 return true;
1125 }
1126
1127 /* sub rs, rd */
1128 /* sub dsp[rs], rd */
1129 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1130 {
1131 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1132 return true;
1133 }
1134
1135 /* sub rs2, rs, rd */
1136 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1137 {
1138 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1139 return true;
1140 }
1141
1142 /* sbb rs, rd */
1143 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1144 {
1145 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1146 return true;
1147 }
1148
1149 /* sbb dsp[rs], rd */
1150 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1151 {
1152 /* mi only 2 */
1153 if (a->mi != 2) {
1154 return false;
1155 }
1156 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1157 return true;
1158 }
1159
1160 static void rx_abs(TCGv ret, TCGv arg1)
1161 {
1162 TCGv neg;
1163 TCGv zero;
1164 neg = tcg_temp_new();
1165 zero = tcg_const_i32(0);
1166 tcg_gen_neg_i32(neg, arg1);
1167 tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
1168 tcg_temp_free(neg);
1169 tcg_temp_free(zero);
1170 }
1171
1172 /* abs rd */
1173 /* abs rs, rd */
1174 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1175 {
1176 rx_gen_op_rr(rx_abs, a->rd, a->rs);
1177 return true;
1178 }
1179
1180 /* max #imm, rd */
1181 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1182 {
1183 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1184 return true;
1185 }
1186
1187 /* max rs, rd */
1188 /* max dsp[rs], rd */
1189 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1190 {
1191 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1192 return true;
1193 }
1194
1195 /* min #imm, rd */
1196 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1197 {
1198 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1199 return true;
1200 }
1201
1202 /* min rs, rd */
1203 /* min dsp[rs], rd */
1204 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1205 {
1206 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1207 return true;
1208 }
1209
1210 /* mul #uimm4, rd */
1211 /* mul #imm, rd */
1212 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1213 {
1214 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1215 return true;
1216 }
1217
1218 /* mul rs, rd */
1219 /* mul dsp[rs], rd */
1220 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1221 {
1222 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1223 return true;
1224 }
1225
1226 /* mul rs, rs2, rd */
1227 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1228 {
1229 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1230 return true;
1231 }
1232
1233 /* emul #imm, rd */
1234 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1235 {
1236 TCGv imm = tcg_const_i32(a->imm);
1237 if (a->rd > 14) {
1238 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1239 }
1240 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1241 cpu_regs[a->rd], imm);
1242 tcg_temp_free(imm);
1243 return true;
1244 }
1245
1246 /* emul rs, rd */
1247 /* emul dsp[rs], rd */
1248 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1249 {
1250 TCGv val, mem;
1251 if (a->rd > 14) {
1252 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1253 }
1254 mem = tcg_temp_new();
1255 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1256 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1257 cpu_regs[a->rd], val);
1258 tcg_temp_free(mem);
1259 return true;
1260 }
1261
1262 /* emulu #imm, rd */
1263 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1264 {
1265 TCGv imm = tcg_const_i32(a->imm);
1266 if (a->rd > 14) {
1267 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1268 }
1269 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1270 cpu_regs[a->rd], imm);
1271 tcg_temp_free(imm);
1272 return true;
1273 }
1274
1275 /* emulu rs, rd */
1276 /* emulu dsp[rs], rd */
1277 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1278 {
1279 TCGv val, mem;
1280 if (a->rd > 14) {
1281 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1282 }
1283 mem = tcg_temp_new();
1284 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1285 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1286 cpu_regs[a->rd], val);
1287 tcg_temp_free(mem);
1288 return true;
1289 }
1290
1291 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1292 {
1293 gen_helper_div(ret, cpu_env, arg1, arg2);
1294 }
1295
1296 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1297 {
1298 gen_helper_divu(ret, cpu_env, arg1, arg2);
1299 }
1300
1301 /* div #imm, rd */
1302 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1303 {
1304 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1305 return true;
1306 }
1307
1308 /* div rs, rd */
1309 /* div dsp[rs], rd */
1310 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1311 {
1312 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1313 return true;
1314 }
1315
1316 /* divu #imm, rd */
1317 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1318 {
1319 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1320 return true;
1321 }
1322
1323 /* divu rs, rd */
1324 /* divu dsp[rs], rd */
1325 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1326 {
1327 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1328 return true;
1329 }
1330
1331
1332 /* shll #imm:5, rd */
1333 /* shll #imm:5, rs2, rd */
1334 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1335 {
1336 TCGv tmp;
1337 tmp = tcg_temp_new();
1338 if (a->imm) {
1339 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1340 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1341 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1342 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1343 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1344 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1345 } else {
1346 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1347 tcg_gen_movi_i32(cpu_psw_c, 0);
1348 tcg_gen_movi_i32(cpu_psw_o, 0);
1349 }
1350 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1351 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1352 return true;
1353 }
1354
1355 /* shll rs, rd */
1356 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1357 {
1358 TCGLabel *noshift, *done;
1359 TCGv count, tmp;
1360
1361 noshift = gen_new_label();
1362 done = gen_new_label();
1363 /* if (cpu_regs[a->rs]) { */
1364 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1365 count = tcg_const_i32(32);
1366 tmp = tcg_temp_new();
1367 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1368 tcg_gen_sub_i32(count, count, tmp);
1369 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1370 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1371 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1372 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1373 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1374 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1375 tcg_gen_br(done);
1376 /* } else { */
1377 gen_set_label(noshift);
1378 tcg_gen_movi_i32(cpu_psw_c, 0);
1379 tcg_gen_movi_i32(cpu_psw_o, 0);
1380 /* } */
1381 gen_set_label(done);
1382 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1383 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1384 tcg_temp_free(count);
1385 tcg_temp_free(tmp);
1386 return true;
1387 }
1388
1389 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1390 unsigned int alith)
1391 {
1392 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1393 tcg_gen_shri_i32, tcg_gen_sari_i32,
1394 };
1395 tcg_debug_assert(alith < 2);
1396 if (imm) {
1397 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1398 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1399 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1400 } else {
1401 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1402 tcg_gen_movi_i32(cpu_psw_c, 0);
1403 }
1404 tcg_gen_movi_i32(cpu_psw_o, 0);
1405 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1406 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1407 }
1408
1409 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1410 {
1411 TCGLabel *noshift, *done;
1412 TCGv count;
1413 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1414 tcg_gen_shri_i32, tcg_gen_sari_i32,
1415 };
1416 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1417 tcg_gen_shr_i32, tcg_gen_sar_i32,
1418 };
1419 tcg_debug_assert(alith < 2);
1420 noshift = gen_new_label();
1421 done = gen_new_label();
1422 count = tcg_temp_new();
1423 /* if (cpu_regs[rs]) { */
1424 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1425 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1426 tcg_gen_subi_i32(count, count, 1);
1427 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1428 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1429 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1430 tcg_gen_br(done);
1431 /* } else { */
1432 gen_set_label(noshift);
1433 tcg_gen_movi_i32(cpu_psw_c, 0);
1434 /* } */
1435 gen_set_label(done);
1436 tcg_gen_movi_i32(cpu_psw_o, 0);
1437 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1438 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1439 tcg_temp_free(count);
1440 }
1441
1442 /* shar #imm:5, rd */
1443 /* shar #imm:5, rs2, rd */
1444 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1445 {
1446 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1447 return true;
1448 }
1449
1450 /* shar rs, rd */
1451 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1452 {
1453 shiftr_reg(a->rd, a->rs, 1);
1454 return true;
1455 }
1456
1457 /* shlr #imm:5, rd */
1458 /* shlr #imm:5, rs2, rd */
1459 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1460 {
1461 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1462 return true;
1463 }
1464
1465 /* shlr rs, rd */
1466 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1467 {
1468 shiftr_reg(a->rd, a->rs, 0);
1469 return true;
1470 }
1471
1472 /* rolc rd */
1473 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1474 {
1475 TCGv tmp;
1476 tmp = tcg_temp_new();
1477 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1478 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1479 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1480 tcg_gen_mov_i32(cpu_psw_c, tmp);
1481 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1482 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1483 tcg_temp_free(tmp);
1484 return true;
1485 }
1486
1487 /* rorc rd */
1488 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1489 {
1490 TCGv tmp;
1491 tmp = tcg_temp_new();
1492 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1493 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1494 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1495 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1496 tcg_gen_mov_i32(cpu_psw_c, tmp);
1497 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1498 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1499 return true;
1500 }
1501
1502 enum {ROTR = 0, ROTL = 1};
1503 enum {ROT_IMM = 0, ROT_REG = 1};
1504 static inline void rx_rot(int ir, int dir, int rd, int src)
1505 {
1506 switch (dir) {
1507 case ROTL:
1508 if (ir == ROT_IMM) {
1509 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1510 } else {
1511 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1512 }
1513 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1514 break;
1515 case ROTR:
1516 if (ir == ROT_IMM) {
1517 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1518 } else {
1519 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1520 }
1521 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1522 break;
1523 }
1524 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1525 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1526 }
1527
1528 /* rotl #imm, rd */
1529 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1530 {
1531 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1532 return true;
1533 }
1534
1535 /* rotl rs, rd */
1536 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1537 {
1538 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1539 return true;
1540 }
1541
1542 /* rotr #imm, rd */
1543 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1544 {
1545 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1546 return true;
1547 }
1548
1549 /* rotr rs, rd */
1550 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1551 {
1552 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1553 return true;
1554 }
1555
1556 /* revl rs, rd */
1557 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1558 {
1559 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1560 return true;
1561 }
1562
1563 /* revw rs, rd */
1564 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1565 {
1566 TCGv tmp;
1567 tmp = tcg_temp_new();
1568 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1569 tcg_gen_shli_i32(tmp, tmp, 8);
1570 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1571 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1572 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1573 tcg_temp_free(tmp);
1574 return true;
1575 }
1576
1577 /* conditional branch helper */
1578 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1579 {
1580 DisasCompare dc;
1581 TCGLabel *t, *done;
1582
1583 switch (cd) {
1584 case 0 ... 13:
1585 dc.temp = tcg_temp_new();
1586 psw_cond(&dc, cd);
1587 t = gen_new_label();
1588 done = gen_new_label();
1589 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1590 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1591 tcg_gen_br(done);
1592 gen_set_label(t);
1593 gen_goto_tb(ctx, 1, ctx->pc + dst);
1594 gen_set_label(done);
1595 tcg_temp_free(dc.temp);
1596 break;
1597 case 14:
1598 /* always true case */
1599 gen_goto_tb(ctx, 0, ctx->pc + dst);
1600 break;
1601 case 15:
1602 /* always false case */
1603 /* Nothing do */
1604 break;
1605 }
1606 }
1607
1608 /* beq dsp:3 / bne dsp:3 */
1609 /* beq dsp:8 / bne dsp:8 */
1610 /* bc dsp:8 / bnc dsp:8 */
1611 /* bgtu dsp:8 / bleu dsp:8 */
1612 /* bpz dsp:8 / bn dsp:8 */
1613 /* bge dsp:8 / blt dsp:8 */
1614 /* bgt dsp:8 / ble dsp:8 */
1615 /* bo dsp:8 / bno dsp:8 */
1616 /* beq dsp:16 / bne dsp:16 */
1617 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1618 {
1619 rx_bcnd_main(ctx, a->cd, a->dsp);
1620 return true;
1621 }
1622
1623 /* bra dsp:3 */
1624 /* bra dsp:8 */
1625 /* bra dsp:16 */
1626 /* bra dsp:24 */
1627 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1628 {
1629 rx_bcnd_main(ctx, 14, a->dsp);
1630 return true;
1631 }
1632
1633 /* bra rs */
1634 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1635 {
1636 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1637 ctx->base.is_jmp = DISAS_JUMP;
1638 return true;
1639 }
1640
1641 static inline void rx_save_pc(DisasContext *ctx)
1642 {
1643 TCGv pc = tcg_const_i32(ctx->base.pc_next);
1644 push(pc);
1645 tcg_temp_free(pc);
1646 }
1647
1648 /* jmp rs */
1649 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1650 {
1651 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1652 ctx->base.is_jmp = DISAS_JUMP;
1653 return true;
1654 }
1655
1656 /* jsr rs */
1657 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1658 {
1659 rx_save_pc(ctx);
1660 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1661 ctx->base.is_jmp = DISAS_JUMP;
1662 return true;
1663 }
1664
1665 /* bsr dsp:16 */
1666 /* bsr dsp:24 */
1667 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1668 {
1669 rx_save_pc(ctx);
1670 rx_bcnd_main(ctx, 14, a->dsp);
1671 return true;
1672 }
1673
1674 /* bsr rs */
1675 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1676 {
1677 rx_save_pc(ctx);
1678 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1679 ctx->base.is_jmp = DISAS_JUMP;
1680 return true;
1681 }
1682
1683 /* rts */
1684 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1685 {
1686 pop(cpu_pc);
1687 ctx->base.is_jmp = DISAS_JUMP;
1688 return true;
1689 }
1690
1691 /* nop */
1692 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1693 {
1694 return true;
1695 }
1696
1697 /* scmpu */
1698 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1699 {
1700 gen_helper_scmpu(cpu_env);
1701 return true;
1702 }
1703
1704 /* smovu */
1705 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1706 {
1707 gen_helper_smovu(cpu_env);
1708 return true;
1709 }
1710
1711 /* smovf */
1712 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1713 {
1714 gen_helper_smovf(cpu_env);
1715 return true;
1716 }
1717
1718 /* smovb */
1719 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1720 {
1721 gen_helper_smovb(cpu_env);
1722 return true;
1723 }
1724
1725 #define STRING(op) \
1726 do { \
1727 TCGv size = tcg_const_i32(a->sz); \
1728 gen_helper_##op(cpu_env, size); \
1729 tcg_temp_free(size); \
1730 } while (0)
1731
1732 /* suntile.<bwl> */
1733 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1734 {
1735 STRING(suntil);
1736 return true;
1737 }
1738
1739 /* swhile.<bwl> */
1740 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1741 {
1742 STRING(swhile);
1743 return true;
1744 }
1745 /* sstr.<bwl> */
1746 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1747 {
1748 STRING(sstr);
1749 return true;
1750 }
1751
1752 /* rmpa.<bwl> */
1753 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1754 {
1755 STRING(rmpa);
1756 return true;
1757 }
1758
1759 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1760 {
1761 TCGv_i64 tmp0, tmp1;
1762 tmp0 = tcg_temp_new_i64();
1763 tmp1 = tcg_temp_new_i64();
1764 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1765 tcg_gen_sari_i64(tmp0, tmp0, 16);
1766 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1767 tcg_gen_sari_i64(tmp1, tmp1, 16);
1768 tcg_gen_mul_i64(ret, tmp0, tmp1);
1769 tcg_gen_shli_i64(ret, ret, 16);
1770 tcg_temp_free_i64(tmp0);
1771 tcg_temp_free_i64(tmp1);
1772 }
1773
1774 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1775 {
1776 TCGv_i64 tmp0, tmp1;
1777 tmp0 = tcg_temp_new_i64();
1778 tmp1 = tcg_temp_new_i64();
1779 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1780 tcg_gen_ext16s_i64(tmp0, tmp0);
1781 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1782 tcg_gen_ext16s_i64(tmp1, tmp1);
1783 tcg_gen_mul_i64(ret, tmp0, tmp1);
1784 tcg_gen_shli_i64(ret, ret, 16);
1785 tcg_temp_free_i64(tmp0);
1786 tcg_temp_free_i64(tmp1);
1787 }
1788
1789 /* mulhi rs,rs2 */
1790 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1791 {
1792 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1793 return true;
1794 }
1795
1796 /* mullo rs,rs2 */
1797 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1798 {
1799 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1800 return true;
1801 }
1802
1803 /* machi rs,rs2 */
1804 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1805 {
1806 TCGv_i64 tmp;
1807 tmp = tcg_temp_new_i64();
1808 rx_mul64hi(tmp, a->rs, a->rs2);
1809 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1810 tcg_temp_free_i64(tmp);
1811 return true;
1812 }
1813
1814 /* maclo rs,rs2 */
1815 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1816 {
1817 TCGv_i64 tmp;
1818 tmp = tcg_temp_new_i64();
1819 rx_mul64lo(tmp, a->rs, a->rs2);
1820 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1821 tcg_temp_free_i64(tmp);
1822 return true;
1823 }
1824
1825 /* mvfachi rd */
1826 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1827 {
1828 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1829 return true;
1830 }
1831
1832 /* mvfacmi rd */
1833 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1834 {
1835 TCGv_i64 rd64;
1836 rd64 = tcg_temp_new_i64();
1837 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1838 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1839 tcg_temp_free_i64(rd64);
1840 return true;
1841 }
1842
1843 /* mvtachi rs */
1844 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1845 {
1846 TCGv_i64 rs64;
1847 rs64 = tcg_temp_new_i64();
1848 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1849 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1850 tcg_temp_free_i64(rs64);
1851 return true;
1852 }
1853
1854 /* mvtaclo rs */
1855 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1856 {
1857 TCGv_i64 rs64;
1858 rs64 = tcg_temp_new_i64();
1859 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1860 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1861 tcg_temp_free_i64(rs64);
1862 return true;
1863 }
1864
1865 /* racw #imm */
1866 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1867 {
1868 TCGv imm = tcg_const_i32(a->imm + 1);
1869 gen_helper_racw(cpu_env, imm);
1870 tcg_temp_free(imm);
1871 return true;
1872 }
1873
1874 /* sat rd */
1875 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1876 {
1877 TCGv tmp, z;
1878 tmp = tcg_temp_new();
1879 z = tcg_const_i32(0);
1880 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1881 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1882 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1883 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1884 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1885 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1886 tcg_temp_free(tmp);
1887 tcg_temp_free(z);
1888 return true;
1889 }
1890
1891 /* satr */
1892 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1893 {
1894 gen_helper_satr(cpu_env);
1895 return true;
1896 }
1897
1898 #define cat3(a, b, c) a##b##c
1899 #define FOP(name, op) \
1900 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1901 cat3(arg_, name, _ir) * a) \
1902 { \
1903 TCGv imm = tcg_const_i32(li(ctx, 0)); \
1904 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1905 cpu_regs[a->rd], imm); \
1906 tcg_temp_free(imm); \
1907 return true; \
1908 } \
1909 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1910 cat3(arg_, name, _mr) * a) \
1911 { \
1912 TCGv val, mem; \
1913 mem = tcg_temp_new(); \
1914 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1915 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1916 cpu_regs[a->rd], val); \
1917 tcg_temp_free(mem); \
1918 return true; \
1919 }
1920
1921 #define FCONVOP(name, op) \
1922 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1923 { \
1924 TCGv val, mem; \
1925 mem = tcg_temp_new(); \
1926 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1927 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
1928 tcg_temp_free(mem); \
1929 return true; \
1930 }
1931
1932 FOP(FADD, fadd)
1933 FOP(FSUB, fsub)
1934 FOP(FMUL, fmul)
1935 FOP(FDIV, fdiv)
1936
1937 /* fcmp #imm, rd */
1938 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1939 {
1940 TCGv imm = tcg_const_i32(li(ctx, 0));
1941 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
1942 tcg_temp_free(imm);
1943 return true;
1944 }
1945
1946 /* fcmp dsp[rs], rd */
1947 /* fcmp rs, rd */
1948 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1949 {
1950 TCGv val, mem;
1951 mem = tcg_temp_new();
1952 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1953 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
1954 tcg_temp_free(mem);
1955 return true;
1956 }
1957
1958 FCONVOP(FTOI, ftoi)
1959 FCONVOP(ROUND, round)
1960
1961 /* itof rs, rd */
1962 /* itof dsp[rs], rd */
1963 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1964 {
1965 TCGv val, mem;
1966 mem = tcg_temp_new();
1967 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1968 gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
1969 tcg_temp_free(mem);
1970 return true;
1971 }
1972
1973 static void rx_bsetm(TCGv mem, TCGv mask)
1974 {
1975 TCGv val;
1976 val = tcg_temp_new();
1977 rx_gen_ld(MO_8, val, mem);
1978 tcg_gen_or_i32(val, val, mask);
1979 rx_gen_st(MO_8, val, mem);
1980 tcg_temp_free(val);
1981 }
1982
1983 static void rx_bclrm(TCGv mem, TCGv mask)
1984 {
1985 TCGv val;
1986 val = tcg_temp_new();
1987 rx_gen_ld(MO_8, val, mem);
1988 tcg_gen_andc_i32(val, val, mask);
1989 rx_gen_st(MO_8, val, mem);
1990 tcg_temp_free(val);
1991 }
1992
1993 static void rx_btstm(TCGv mem, TCGv mask)
1994 {
1995 TCGv val;
1996 val = tcg_temp_new();
1997 rx_gen_ld(MO_8, val, mem);
1998 tcg_gen_and_i32(val, val, mask);
1999 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
2000 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2001 tcg_temp_free(val);
2002 }
2003
2004 static void rx_bnotm(TCGv mem, TCGv mask)
2005 {
2006 TCGv val;
2007 val = tcg_temp_new();
2008 rx_gen_ld(MO_8, val, mem);
2009 tcg_gen_xor_i32(val, val, mask);
2010 rx_gen_st(MO_8, val, mem);
2011 tcg_temp_free(val);
2012 }
2013
2014 static void rx_bsetr(TCGv reg, TCGv mask)
2015 {
2016 tcg_gen_or_i32(reg, reg, mask);
2017 }
2018
2019 static void rx_bclrr(TCGv reg, TCGv mask)
2020 {
2021 tcg_gen_andc_i32(reg, reg, mask);
2022 }
2023
2024 static inline void rx_btstr(TCGv reg, TCGv mask)
2025 {
2026 TCGv t0;
2027 t0 = tcg_temp_new();
2028 tcg_gen_and_i32(t0, reg, mask);
2029 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
2030 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2031 tcg_temp_free(t0);
2032 }
2033
2034 static inline void rx_bnotr(TCGv reg, TCGv mask)
2035 {
2036 tcg_gen_xor_i32(reg, reg, mask);
2037 }
2038
2039 #define BITOP(name, op) \
2040 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
2041 cat3(arg_, name, _im) * a) \
2042 { \
2043 TCGv mask, mem, addr; \
2044 mem = tcg_temp_new(); \
2045 mask = tcg_const_i32(1 << a->imm); \
2046 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2047 cat3(rx_, op, m)(addr, mask); \
2048 tcg_temp_free(mask); \
2049 tcg_temp_free(mem); \
2050 return true; \
2051 } \
2052 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
2053 cat3(arg_, name, _ir) * a) \
2054 { \
2055 TCGv mask; \
2056 mask = tcg_const_i32(1 << a->imm); \
2057 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2058 tcg_temp_free(mask); \
2059 return true; \
2060 } \
2061 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
2062 cat3(arg_, name, _rr) * a) \
2063 { \
2064 TCGv mask, b; \
2065 mask = tcg_const_i32(1); \
2066 b = tcg_temp_new(); \
2067 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
2068 tcg_gen_shl_i32(mask, mask, b); \
2069 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2070 tcg_temp_free(mask); \
2071 tcg_temp_free(b); \
2072 return true; \
2073 } \
2074 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
2075 cat3(arg_, name, _rm) * a) \
2076 { \
2077 TCGv mask, mem, addr, b; \
2078 mask = tcg_const_i32(1); \
2079 b = tcg_temp_new(); \
2080 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
2081 tcg_gen_shl_i32(mask, mask, b); \
2082 mem = tcg_temp_new(); \
2083 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2084 cat3(rx_, op, m)(addr, mask); \
2085 tcg_temp_free(mem); \
2086 tcg_temp_free(mask); \
2087 tcg_temp_free(b); \
2088 return true; \
2089 }
2090
2091 BITOP(BSET, bset)
2092 BITOP(BCLR, bclr)
2093 BITOP(BTST, btst)
2094 BITOP(BNOT, bnot)
2095
2096 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2097 {
2098 TCGv bit;
2099 DisasCompare dc;
2100 dc.temp = tcg_temp_new();
2101 bit = tcg_temp_new();
2102 psw_cond(&dc, cond);
2103 tcg_gen_andi_i32(val, val, ~(1 << pos));
2104 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2105 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2106 tcg_temp_free(bit);
2107 tcg_temp_free(dc.temp);
2108 }
2109
2110 /* bmcnd #imm, dsp[rd] */
2111 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2112 {
2113 TCGv val, mem, addr;
2114 val = tcg_temp_new();
2115 mem = tcg_temp_new();
2116 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2117 rx_gen_ld(MO_8, val, addr);
2118 bmcnd_op(val, a->cd, a->imm);
2119 rx_gen_st(MO_8, val, addr);
2120 tcg_temp_free(val);
2121 tcg_temp_free(mem);
2122 return true;
2123 }
2124
2125 /* bmcond #imm, rd */
2126 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2127 {
2128 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2129 return true;
2130 }
2131
2132 enum {
2133 PSW_C = 0,
2134 PSW_Z = 1,
2135 PSW_S = 2,
2136 PSW_O = 3,
2137 PSW_I = 8,
2138 PSW_U = 9,
2139 };
2140
2141 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2142 {
2143 if (cb < 8) {
2144 switch (cb) {
2145 case PSW_C:
2146 tcg_gen_movi_i32(cpu_psw_c, val);
2147 break;
2148 case PSW_Z:
2149 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2150 break;
2151 case PSW_S:
2152 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2153 break;
2154 case PSW_O:
2155 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2156 break;
2157 default:
2158 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2159 break;
2160 }
2161 } else if (is_privileged(ctx, 0)) {
2162 switch (cb) {
2163 case PSW_I:
2164 tcg_gen_movi_i32(cpu_psw_i, val);
2165 ctx->base.is_jmp = DISAS_UPDATE;
2166 break;
2167 case PSW_U:
2168 if (FIELD_EX32(ctx->tb_flags, PSW, U) != val) {
2169 ctx->tb_flags = FIELD_DP32(ctx->tb_flags, PSW, U, val);
2170 tcg_gen_movi_i32(cpu_psw_u, val);
2171 tcg_gen_mov_i32(val ? cpu_isp : cpu_usp, cpu_sp);
2172 tcg_gen_mov_i32(cpu_sp, val ? cpu_usp : cpu_isp);
2173 }
2174 break;
2175 default:
2176 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2177 break;
2178 }
2179 }
2180 }
2181
2182 /* clrpsw psw */
2183 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2184 {
2185 clrsetpsw(ctx, a->cb, 0);
2186 return true;
2187 }
2188
2189 /* setpsw psw */
2190 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2191 {
2192 clrsetpsw(ctx, a->cb, 1);
2193 return true;
2194 }
2195
2196 /* mvtipl #imm */
2197 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2198 {
2199 if (is_privileged(ctx, 1)) {
2200 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2201 ctx->base.is_jmp = DISAS_UPDATE;
2202 }
2203 return true;
2204 }
2205
2206 /* mvtc #imm, rd */
2207 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2208 {
2209 TCGv imm;
2210
2211 imm = tcg_const_i32(a->imm);
2212 move_to_cr(ctx, imm, a->cr);
2213 tcg_temp_free(imm);
2214 return true;
2215 }
2216
2217 /* mvtc rs, rd */
2218 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2219 {
2220 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2221 return true;
2222 }
2223
2224 /* mvfc rs, rd */
2225 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2226 {
2227 move_from_cr(ctx, cpu_regs[a->rd], a->cr, ctx->pc);
2228 return true;
2229 }
2230
2231 /* rtfi */
2232 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2233 {
2234 TCGv psw;
2235 if (is_privileged(ctx, 1)) {
2236 psw = tcg_temp_new();
2237 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2238 tcg_gen_mov_i32(psw, cpu_bpsw);
2239 gen_helper_set_psw_rte(cpu_env, psw);
2240 ctx->base.is_jmp = DISAS_EXIT;
2241 tcg_temp_free(psw);
2242 }
2243 return true;
2244 }
2245
2246 /* rte */
2247 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2248 {
2249 TCGv psw;
2250 if (is_privileged(ctx, 1)) {
2251 psw = tcg_temp_new();
2252 pop(cpu_pc);
2253 pop(psw);
2254 gen_helper_set_psw_rte(cpu_env, psw);
2255 ctx->base.is_jmp = DISAS_EXIT;
2256 tcg_temp_free(psw);
2257 }
2258 return true;
2259 }
2260
2261 /* brk */
2262 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2263 {
2264 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2265 gen_helper_rxbrk(cpu_env);
2266 ctx->base.is_jmp = DISAS_NORETURN;
2267 return true;
2268 }
2269
2270 /* int #imm */
2271 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2272 {
2273 TCGv vec;
2274
2275 tcg_debug_assert(a->imm < 0x100);
2276 vec = tcg_const_i32(a->imm);
2277 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2278 gen_helper_rxint(cpu_env, vec);
2279 tcg_temp_free(vec);
2280 ctx->base.is_jmp = DISAS_NORETURN;
2281 return true;
2282 }
2283
2284 /* wait */
2285 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2286 {
2287 if (is_privileged(ctx, 1)) {
2288 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2289 gen_helper_wait(cpu_env);
2290 }
2291 return true;
2292 }
2293
2294 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2295 {
2296 CPURXState *env = cs->env_ptr;
2297 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2298 ctx->env = env;
2299 ctx->tb_flags = ctx->base.tb->flags;
2300 }
2301
2302 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2303 {
2304 }
2305
2306 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2307 {
2308 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2309
2310 tcg_gen_insn_start(ctx->base.pc_next);
2311 }
2312
2313 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2314 {
2315 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2316 uint32_t insn;
2317
2318 ctx->pc = ctx->base.pc_next;
2319 insn = decode_load(ctx);
2320 if (!decode(ctx, insn)) {
2321 gen_helper_raise_illegal_instruction(cpu_env);
2322 }
2323 }
2324
2325 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2326 {
2327 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2328
2329 switch (ctx->base.is_jmp) {
2330 case DISAS_NEXT:
2331 case DISAS_TOO_MANY:
2332 gen_goto_tb(ctx, 0, dcbase->pc_next);
2333 break;
2334 case DISAS_JUMP:
2335 tcg_gen_lookup_and_goto_ptr();
2336 break;
2337 case DISAS_UPDATE:
2338 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2339 /* fall through */
2340 case DISAS_EXIT:
2341 tcg_gen_exit_tb(NULL, 0);
2342 break;
2343 case DISAS_NORETURN:
2344 break;
2345 default:
2346 g_assert_not_reached();
2347 }
2348 }
2349
2350 static void rx_tr_disas_log(const DisasContextBase *dcbase,
2351 CPUState *cs, FILE *logfile)
2352 {
2353 fprintf(logfile, "IN: %s\n", lookup_symbol(dcbase->pc_first));
2354 target_disas(logfile, cs, dcbase->pc_first, dcbase->tb->size);
2355 }
2356
2357 static const TranslatorOps rx_tr_ops = {
2358 .init_disas_context = rx_tr_init_disas_context,
2359 .tb_start = rx_tr_tb_start,
2360 .insn_start = rx_tr_insn_start,
2361 .translate_insn = rx_tr_translate_insn,
2362 .tb_stop = rx_tr_tb_stop,
2363 .disas_log = rx_tr_disas_log,
2364 };
2365
2366 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns,
2367 target_ulong pc, void *host_pc)
2368 {
2369 DisasContext dc;
2370
2371 translator_loop(cs, tb, max_insns, pc, host_pc, &rx_tr_ops, &dc.base);
2372 }
2373
2374 #define ALLOC_REGISTER(sym, name) \
2375 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
2376 offsetof(CPURXState, sym), name)
2377
2378 void rx_translate_init(void)
2379 {
2380 static const char * const regnames[NUM_REGS] = {
2381 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2382 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2383 };
2384 int i;
2385
2386 for (i = 0; i < NUM_REGS; i++) {
2387 cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
2388 offsetof(CPURXState, regs[i]),
2389 regnames[i]);
2390 }
2391 ALLOC_REGISTER(pc, "PC");
2392 ALLOC_REGISTER(psw_o, "PSW(O)");
2393 ALLOC_REGISTER(psw_s, "PSW(S)");
2394 ALLOC_REGISTER(psw_z, "PSW(Z)");
2395 ALLOC_REGISTER(psw_c, "PSW(C)");
2396 ALLOC_REGISTER(psw_u, "PSW(U)");
2397 ALLOC_REGISTER(psw_i, "PSW(I)");
2398 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2399 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2400 ALLOC_REGISTER(usp, "USP");
2401 ALLOC_REGISTER(fpsw, "FPSW");
2402 ALLOC_REGISTER(bpsw, "BPSW");
2403 ALLOC_REGISTER(bpc, "BPC");
2404 ALLOC_REGISTER(isp, "ISP");
2405 ALLOC_REGISTER(fintv, "FINTV");
2406 ALLOC_REGISTER(intb, "INTB");
2407 cpu_acc = tcg_global_mem_new_i64(cpu_env,
2408 offsetof(CPURXState, acc), "ACC");
2409 }