]> git.proxmox.com Git - mirror_qemu.git/blob - target/rx/translate.c
Merge remote-tracking branch 'remotes/rth-gitlab/tags/pull-tcg-20210710' into staging
[mirror_qemu.git] / target / rx / translate.c
1 /*
2 * RX translation
3 *
4 * Copyright (c) 2019 Yoshinori Sato
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "qemu/osdep.h"
20 #include "qemu/bswap.h"
21 #include "qemu/qemu-print.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/log.h"
30
31 typedef struct DisasContext {
32 DisasContextBase base;
33 CPURXState *env;
34 uint32_t pc;
35 } DisasContext;
36
37 typedef struct DisasCompare {
38 TCGv value;
39 TCGv temp;
40 TCGCond cond;
41 } DisasCompare;
42
43 const char *rx_crname(uint8_t cr)
44 {
45 static const char *cr_names[] = {
46 "psw", "pc", "usp", "fpsw", "", "", "", "",
47 "bpsw", "bpc", "isp", "fintv", "intb", "", "", ""
48 };
49 if (cr >= ARRAY_SIZE(cr_names)) {
50 return "illegal";
51 }
52 return cr_names[cr];
53 }
54
55 /* Target-specific values for dc->base.is_jmp. */
56 #define DISAS_JUMP DISAS_TARGET_0
57 #define DISAS_UPDATE DISAS_TARGET_1
58 #define DISAS_EXIT DISAS_TARGET_2
59
60 /* global register indexes */
61 static TCGv cpu_regs[16];
62 static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c;
63 static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl;
64 static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp;
65 static TCGv cpu_fintv, cpu_intb, cpu_pc;
66 static TCGv_i64 cpu_acc;
67
68 #define cpu_sp cpu_regs[0]
69
70 #include "exec/gen-icount.h"
71
72 /* decoder helper */
73 static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn,
74 int i, int n)
75 {
76 while (++i <= n) {
77 uint8_t b = cpu_ldub_code(ctx->env, ctx->base.pc_next++);
78 insn |= b << (32 - i * 8);
79 }
80 return insn;
81 }
82
83 static uint32_t li(DisasContext *ctx, int sz)
84 {
85 int32_t tmp, addr;
86 CPURXState *env = ctx->env;
87 addr = ctx->base.pc_next;
88
89 tcg_debug_assert(sz < 4);
90 switch (sz) {
91 case 1:
92 ctx->base.pc_next += 1;
93 return cpu_ldsb_code(env, addr);
94 case 2:
95 ctx->base.pc_next += 2;
96 return cpu_ldsw_code(env, addr);
97 case 3:
98 ctx->base.pc_next += 3;
99 tmp = cpu_ldsb_code(env, addr + 2) << 16;
100 tmp |= cpu_lduw_code(env, addr) & 0xffff;
101 return tmp;
102 case 0:
103 ctx->base.pc_next += 4;
104 return cpu_ldl_code(env, addr);
105 }
106 return 0;
107 }
108
109 static int bdsp_s(DisasContext *ctx, int d)
110 {
111 /*
112 * 0 -> 8
113 * 1 -> 9
114 * 2 -> 10
115 * 3 -> 3
116 * :
117 * 7 -> 7
118 */
119 if (d < 3) {
120 d += 8;
121 }
122 return d;
123 }
124
125 /* Include the auto-generated decoder. */
126 #include "decode-insns.c.inc"
127
128 void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags)
129 {
130 RXCPU *cpu = RX_CPU(cs);
131 CPURXState *env = &cpu->env;
132 int i;
133 uint32_t psw;
134
135 psw = rx_cpu_pack_psw(env);
136 qemu_fprintf(f, "pc=0x%08x psw=0x%08x\n",
137 env->pc, psw);
138 for (i = 0; i < 16; i += 4) {
139 qemu_fprintf(f, "r%d=0x%08x r%d=0x%08x r%d=0x%08x r%d=0x%08x\n",
140 i, env->regs[i], i + 1, env->regs[i + 1],
141 i + 2, env->regs[i + 2], i + 3, env->regs[i + 3]);
142 }
143 }
144
145 static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
146 {
147 if (translator_use_goto_tb(&dc->base, dest)) {
148 tcg_gen_goto_tb(n);
149 tcg_gen_movi_i32(cpu_pc, dest);
150 tcg_gen_exit_tb(dc->base.tb, n);
151 } else {
152 tcg_gen_movi_i32(cpu_pc, dest);
153 if (dc->base.singlestep_enabled) {
154 gen_helper_debug(cpu_env);
155 } else {
156 tcg_gen_lookup_and_goto_ptr();
157 }
158 }
159 dc->base.is_jmp = DISAS_NORETURN;
160 }
161
162 /* generic load wrapper */
163 static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem)
164 {
165 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE);
166 }
167
168 /* unsigned load wrapper */
169 static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem)
170 {
171 tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE);
172 }
173
174 /* generic store wrapper */
175 static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem)
176 {
177 tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE);
178 }
179
180 /* [ri, rb] */
181 static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem,
182 int size, int ri, int rb)
183 {
184 tcg_gen_shli_i32(mem, cpu_regs[ri], size);
185 tcg_gen_add_i32(mem, mem, cpu_regs[rb]);
186 }
187
188 /* dsp[reg] */
189 static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem,
190 int ld, int size, int reg)
191 {
192 uint32_t dsp;
193
194 tcg_debug_assert(ld < 3);
195 switch (ld) {
196 case 0:
197 return cpu_regs[reg];
198 case 1:
199 dsp = cpu_ldub_code(ctx->env, ctx->base.pc_next) << size;
200 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
201 ctx->base.pc_next += 1;
202 return mem;
203 case 2:
204 dsp = cpu_lduw_code(ctx->env, ctx->base.pc_next) << size;
205 tcg_gen_addi_i32(mem, cpu_regs[reg], dsp);
206 ctx->base.pc_next += 2;
207 return mem;
208 }
209 return NULL;
210 }
211
212 static inline MemOp mi_to_mop(unsigned mi)
213 {
214 static const MemOp mop[5] = { MO_SB, MO_SW, MO_UL, MO_UW, MO_UB };
215 tcg_debug_assert(mi < 5);
216 return mop[mi];
217 }
218
219 /* load source operand */
220 static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem,
221 int ld, int mi, int rs)
222 {
223 TCGv addr;
224 MemOp mop;
225 if (ld < 3) {
226 mop = mi_to_mop(mi);
227 addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs);
228 tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE);
229 return mem;
230 } else {
231 return cpu_regs[rs];
232 }
233 }
234
235 /* Processor mode check */
236 static int is_privileged(DisasContext *ctx, int is_exception)
237 {
238 if (FIELD_EX32(ctx->base.tb->flags, PSW, PM)) {
239 if (is_exception) {
240 gen_helper_raise_privilege_violation(cpu_env);
241 }
242 return 0;
243 } else {
244 return 1;
245 }
246 }
247
248 /* generate QEMU condition */
249 static void psw_cond(DisasCompare *dc, uint32_t cond)
250 {
251 tcg_debug_assert(cond < 16);
252 switch (cond) {
253 case 0: /* z */
254 dc->cond = TCG_COND_EQ;
255 dc->value = cpu_psw_z;
256 break;
257 case 1: /* nz */
258 dc->cond = TCG_COND_NE;
259 dc->value = cpu_psw_z;
260 break;
261 case 2: /* c */
262 dc->cond = TCG_COND_NE;
263 dc->value = cpu_psw_c;
264 break;
265 case 3: /* nc */
266 dc->cond = TCG_COND_EQ;
267 dc->value = cpu_psw_c;
268 break;
269 case 4: /* gtu (C& ~Z) == 1 */
270 case 5: /* leu (C& ~Z) == 0 */
271 tcg_gen_setcondi_i32(TCG_COND_NE, dc->temp, cpu_psw_z, 0);
272 tcg_gen_and_i32(dc->temp, dc->temp, cpu_psw_c);
273 dc->cond = (cond == 4) ? TCG_COND_NE : TCG_COND_EQ;
274 dc->value = dc->temp;
275 break;
276 case 6: /* pz (S == 0) */
277 dc->cond = TCG_COND_GE;
278 dc->value = cpu_psw_s;
279 break;
280 case 7: /* n (S == 1) */
281 dc->cond = TCG_COND_LT;
282 dc->value = cpu_psw_s;
283 break;
284 case 8: /* ge (S^O)==0 */
285 case 9: /* lt (S^O)==1 */
286 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
287 dc->cond = (cond == 8) ? TCG_COND_GE : TCG_COND_LT;
288 dc->value = dc->temp;
289 break;
290 case 10: /* gt ((S^O)|Z)==0 */
291 case 11: /* le ((S^O)|Z)==1 */
292 tcg_gen_xor_i32(dc->temp, cpu_psw_o, cpu_psw_s);
293 tcg_gen_sari_i32(dc->temp, dc->temp, 31);
294 tcg_gen_andc_i32(dc->temp, cpu_psw_z, dc->temp);
295 dc->cond = (cond == 10) ? TCG_COND_NE : TCG_COND_EQ;
296 dc->value = dc->temp;
297 break;
298 case 12: /* o */
299 dc->cond = TCG_COND_LT;
300 dc->value = cpu_psw_o;
301 break;
302 case 13: /* no */
303 dc->cond = TCG_COND_GE;
304 dc->value = cpu_psw_o;
305 break;
306 case 14: /* always true */
307 dc->cond = TCG_COND_ALWAYS;
308 dc->value = dc->temp;
309 break;
310 case 15: /* always false */
311 dc->cond = TCG_COND_NEVER;
312 dc->value = dc->temp;
313 break;
314 }
315 }
316
317 static void move_from_cr(TCGv ret, int cr, uint32_t pc)
318 {
319 TCGv z = tcg_const_i32(0);
320 switch (cr) {
321 case 0: /* PSW */
322 gen_helper_pack_psw(ret, cpu_env);
323 break;
324 case 1: /* PC */
325 tcg_gen_movi_i32(ret, pc);
326 break;
327 case 2: /* USP */
328 tcg_gen_movcond_i32(TCG_COND_NE, ret,
329 cpu_psw_u, z, cpu_sp, cpu_usp);
330 break;
331 case 3: /* FPSW */
332 tcg_gen_mov_i32(ret, cpu_fpsw);
333 break;
334 case 8: /* BPSW */
335 tcg_gen_mov_i32(ret, cpu_bpsw);
336 break;
337 case 9: /* BPC */
338 tcg_gen_mov_i32(ret, cpu_bpc);
339 break;
340 case 10: /* ISP */
341 tcg_gen_movcond_i32(TCG_COND_EQ, ret,
342 cpu_psw_u, z, cpu_sp, cpu_isp);
343 break;
344 case 11: /* FINTV */
345 tcg_gen_mov_i32(ret, cpu_fintv);
346 break;
347 case 12: /* INTB */
348 tcg_gen_mov_i32(ret, cpu_intb);
349 break;
350 default:
351 qemu_log_mask(LOG_GUEST_ERROR, "Unimplement control register %d", cr);
352 /* Unimplement registers return 0 */
353 tcg_gen_movi_i32(ret, 0);
354 break;
355 }
356 tcg_temp_free(z);
357 }
358
359 static void move_to_cr(DisasContext *ctx, TCGv val, int cr)
360 {
361 TCGv z;
362 if (cr >= 8 && !is_privileged(ctx, 0)) {
363 /* Some control registers can only be written in privileged mode. */
364 qemu_log_mask(LOG_GUEST_ERROR,
365 "disallow control register write %s", rx_crname(cr));
366 return;
367 }
368 z = tcg_const_i32(0);
369 switch (cr) {
370 case 0: /* PSW */
371 gen_helper_set_psw(cpu_env, val);
372 break;
373 /* case 1: to PC not supported */
374 case 2: /* USP */
375 tcg_gen_mov_i32(cpu_usp, val);
376 tcg_gen_movcond_i32(TCG_COND_NE, cpu_sp,
377 cpu_psw_u, z, cpu_usp, cpu_sp);
378 break;
379 case 3: /* FPSW */
380 gen_helper_set_fpsw(cpu_env, val);
381 break;
382 case 8: /* BPSW */
383 tcg_gen_mov_i32(cpu_bpsw, val);
384 break;
385 case 9: /* BPC */
386 tcg_gen_mov_i32(cpu_bpc, val);
387 break;
388 case 10: /* ISP */
389 tcg_gen_mov_i32(cpu_isp, val);
390 /* if PSW.U is 0, copy isp to r0 */
391 tcg_gen_movcond_i32(TCG_COND_EQ, cpu_sp,
392 cpu_psw_u, z, cpu_isp, cpu_sp);
393 break;
394 case 11: /* FINTV */
395 tcg_gen_mov_i32(cpu_fintv, val);
396 break;
397 case 12: /* INTB */
398 tcg_gen_mov_i32(cpu_intb, val);
399 break;
400 default:
401 qemu_log_mask(LOG_GUEST_ERROR,
402 "Unimplement control register %d", cr);
403 break;
404 }
405 tcg_temp_free(z);
406 }
407
408 static void push(TCGv val)
409 {
410 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
411 rx_gen_st(MO_32, val, cpu_sp);
412 }
413
414 static void pop(TCGv ret)
415 {
416 rx_gen_ld(MO_32, ret, cpu_sp);
417 tcg_gen_addi_i32(cpu_sp, cpu_sp, 4);
418 }
419
420 /* mov.<bwl> rs,dsp5[rd] */
421 static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a)
422 {
423 TCGv mem;
424 mem = tcg_temp_new();
425 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
426 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
427 tcg_temp_free(mem);
428 return true;
429 }
430
431 /* mov.<bwl> dsp5[rs],rd */
432 static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a)
433 {
434 TCGv mem;
435 mem = tcg_temp_new();
436 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
437 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
438 tcg_temp_free(mem);
439 return true;
440 }
441
442 /* mov.l #uimm4,rd */
443 /* mov.l #uimm8,rd */
444 /* mov.l #imm,rd */
445 static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a)
446 {
447 tcg_gen_movi_i32(cpu_regs[a->rd], a->imm);
448 return true;
449 }
450
451 /* mov.<bwl> #uimm8,dsp[rd] */
452 /* mov.<bwl> #imm, dsp[rd] */
453 static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a)
454 {
455 TCGv imm, mem;
456 imm = tcg_const_i32(a->imm);
457 mem = tcg_temp_new();
458 tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz);
459 rx_gen_st(a->sz, imm, mem);
460 tcg_temp_free(imm);
461 tcg_temp_free(mem);
462 return true;
463 }
464
465 /* mov.<bwl> [ri,rb],rd */
466 static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a)
467 {
468 TCGv mem;
469 mem = tcg_temp_new();
470 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
471 rx_gen_ld(a->sz, cpu_regs[a->rd], mem);
472 tcg_temp_free(mem);
473 return true;
474 }
475
476 /* mov.<bwl> rd,[ri,rb] */
477 static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a)
478 {
479 TCGv mem;
480 mem = tcg_temp_new();
481 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
482 rx_gen_st(a->sz, cpu_regs[a->rs], mem);
483 tcg_temp_free(mem);
484 return true;
485 }
486
487 /* mov.<bwl> dsp[rs],dsp[rd] */
488 /* mov.<bwl> rs,dsp[rd] */
489 /* mov.<bwl> dsp[rs],rd */
490 /* mov.<bwl> rs,rd */
491 static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a)
492 {
493 static void (* const mov[])(TCGv ret, TCGv arg) = {
494 tcg_gen_ext8s_i32, tcg_gen_ext16s_i32, tcg_gen_mov_i32,
495 };
496 TCGv tmp, mem, addr;
497 if (a->lds == 3 && a->ldd == 3) {
498 /* mov.<bwl> rs,rd */
499 mov[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
500 return true;
501 }
502
503 mem = tcg_temp_new();
504 if (a->lds == 3) {
505 /* mov.<bwl> rs,dsp[rd] */
506 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs);
507 rx_gen_st(a->sz, cpu_regs[a->rd], addr);
508 } else if (a->ldd == 3) {
509 /* mov.<bwl> dsp[rs],rd */
510 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
511 rx_gen_ld(a->sz, cpu_regs[a->rd], addr);
512 } else {
513 /* mov.<bwl> dsp[rs],dsp[rd] */
514 tmp = tcg_temp_new();
515 addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs);
516 rx_gen_ld(a->sz, tmp, addr);
517 addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd);
518 rx_gen_st(a->sz, tmp, addr);
519 tcg_temp_free(tmp);
520 }
521 tcg_temp_free(mem);
522 return true;
523 }
524
525 /* mov.<bwl> rs,[rd+] */
526 /* mov.<bwl> rs,[-rd] */
527 static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a)
528 {
529 TCGv val;
530 val = tcg_temp_new();
531 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
532 if (a->ad == 1) {
533 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
534 }
535 rx_gen_st(a->sz, val, cpu_regs[a->rd]);
536 if (a->ad == 0) {
537 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
538 }
539 tcg_temp_free(val);
540 return true;
541 }
542
543 /* mov.<bwl> [rd+],rs */
544 /* mov.<bwl> [-rd],rs */
545 static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a)
546 {
547 TCGv val;
548 val = tcg_temp_new();
549 if (a->ad == 1) {
550 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
551 }
552 rx_gen_ld(a->sz, val, cpu_regs[a->rd]);
553 if (a->ad == 0) {
554 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
555 }
556 tcg_gen_mov_i32(cpu_regs[a->rs], val);
557 tcg_temp_free(val);
558 return true;
559 }
560
561 /* movu.<bw> dsp5[rs],rd */
562 /* movu.<bw> dsp[rs],rd */
563 static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a)
564 {
565 TCGv mem;
566 mem = tcg_temp_new();
567 tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz);
568 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
569 tcg_temp_free(mem);
570 return true;
571 }
572
573 /* movu.<bw> rs,rd */
574 static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a)
575 {
576 static void (* const ext[])(TCGv ret, TCGv arg) = {
577 tcg_gen_ext8u_i32, tcg_gen_ext16u_i32,
578 };
579 ext[a->sz](cpu_regs[a->rd], cpu_regs[a->rs]);
580 return true;
581 }
582
583 /* movu.<bw> [ri,rb],rd */
584 static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a)
585 {
586 TCGv mem;
587 mem = tcg_temp_new();
588 rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb);
589 rx_gen_ldu(a->sz, cpu_regs[a->rd], mem);
590 tcg_temp_free(mem);
591 return true;
592 }
593
594 /* movu.<bw> [rd+],rs */
595 /* mov.<bw> [-rd],rs */
596 static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a)
597 {
598 TCGv val;
599 val = tcg_temp_new();
600 if (a->ad == 1) {
601 tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
602 }
603 rx_gen_ldu(a->sz, val, cpu_regs[a->rd]);
604 if (a->ad == 0) {
605 tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz);
606 }
607 tcg_gen_mov_i32(cpu_regs[a->rs], val);
608 tcg_temp_free(val);
609 return true;
610 }
611
612
613 /* pop rd */
614 static bool trans_POP(DisasContext *ctx, arg_POP *a)
615 {
616 /* mov.l [r0+], rd */
617 arg_MOV_rp mov_a;
618 mov_a.rd = 0;
619 mov_a.rs = a->rd;
620 mov_a.ad = 0;
621 mov_a.sz = MO_32;
622 trans_MOV_pr(ctx, &mov_a);
623 return true;
624 }
625
626 /* popc cr */
627 static bool trans_POPC(DisasContext *ctx, arg_POPC *a)
628 {
629 TCGv val;
630 val = tcg_temp_new();
631 pop(val);
632 move_to_cr(ctx, val, a->cr);
633 if (a->cr == 0 && is_privileged(ctx, 0)) {
634 /* PSW.I may be updated here. exit TB. */
635 ctx->base.is_jmp = DISAS_UPDATE;
636 }
637 tcg_temp_free(val);
638 return true;
639 }
640
641 /* popm rd-rd2 */
642 static bool trans_POPM(DisasContext *ctx, arg_POPM *a)
643 {
644 int r;
645 if (a->rd == 0 || a->rd >= a->rd2) {
646 qemu_log_mask(LOG_GUEST_ERROR,
647 "Invalid register ranges r%d-r%d", a->rd, a->rd2);
648 }
649 r = a->rd;
650 while (r <= a->rd2 && r < 16) {
651 pop(cpu_regs[r++]);
652 }
653 return true;
654 }
655
656
657 /* push.<bwl> rs */
658 static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a)
659 {
660 TCGv val;
661 val = tcg_temp_new();
662 tcg_gen_mov_i32(val, cpu_regs[a->rs]);
663 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
664 rx_gen_st(a->sz, val, cpu_sp);
665 tcg_temp_free(val);
666 return true;
667 }
668
669 /* push.<bwl> dsp[rs] */
670 static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a)
671 {
672 TCGv mem, val, addr;
673 mem = tcg_temp_new();
674 val = tcg_temp_new();
675 addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs);
676 rx_gen_ld(a->sz, val, addr);
677 tcg_gen_subi_i32(cpu_sp, cpu_sp, 4);
678 rx_gen_st(a->sz, val, cpu_sp);
679 tcg_temp_free(mem);
680 tcg_temp_free(val);
681 return true;
682 }
683
684 /* pushc rx */
685 static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a)
686 {
687 TCGv val;
688 val = tcg_temp_new();
689 move_from_cr(val, a->cr, ctx->pc);
690 push(val);
691 tcg_temp_free(val);
692 return true;
693 }
694
695 /* pushm rs-rs2 */
696 static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a)
697 {
698 int r;
699
700 if (a->rs == 0 || a->rs >= a->rs2) {
701 qemu_log_mask(LOG_GUEST_ERROR,
702 "Invalid register ranges r%d-r%d", a->rs, a->rs2);
703 }
704 r = a->rs2;
705 while (r >= a->rs && r >= 0) {
706 push(cpu_regs[r--]);
707 }
708 return true;
709 }
710
711 /* xchg rs,rd */
712 static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a)
713 {
714 TCGv tmp;
715 tmp = tcg_temp_new();
716 tcg_gen_mov_i32(tmp, cpu_regs[a->rs]);
717 tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]);
718 tcg_gen_mov_i32(cpu_regs[a->rd], tmp);
719 tcg_temp_free(tmp);
720 return true;
721 }
722
723 /* xchg dsp[rs].<mi>,rd */
724 static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a)
725 {
726 TCGv mem, addr;
727 mem = tcg_temp_new();
728 switch (a->mi) {
729 case 0: /* dsp[rs].b */
730 case 1: /* dsp[rs].w */
731 case 2: /* dsp[rs].l */
732 addr = rx_index_addr(ctx, mem, a->ld, a->mi, a->rs);
733 break;
734 case 3: /* dsp[rs].uw */
735 case 4: /* dsp[rs].ub */
736 addr = rx_index_addr(ctx, mem, a->ld, 4 - a->mi, a->rs);
737 break;
738 default:
739 g_assert_not_reached();
740 }
741 tcg_gen_atomic_xchg_i32(cpu_regs[a->rd], addr, cpu_regs[a->rd],
742 0, mi_to_mop(a->mi));
743 tcg_temp_free(mem);
744 return true;
745 }
746
747 static inline void stcond(TCGCond cond, int rd, int imm)
748 {
749 TCGv z;
750 TCGv _imm;
751 z = tcg_const_i32(0);
752 _imm = tcg_const_i32(imm);
753 tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z,
754 _imm, cpu_regs[rd]);
755 tcg_temp_free(z);
756 tcg_temp_free(_imm);
757 }
758
759 /* stz #imm,rd */
760 static bool trans_STZ(DisasContext *ctx, arg_STZ *a)
761 {
762 stcond(TCG_COND_EQ, a->rd, a->imm);
763 return true;
764 }
765
766 /* stnz #imm,rd */
767 static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a)
768 {
769 stcond(TCG_COND_NE, a->rd, a->imm);
770 return true;
771 }
772
773 /* sccnd.<bwl> rd */
774 /* sccnd.<bwl> dsp:[rd] */
775 static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a)
776 {
777 DisasCompare dc;
778 TCGv val, mem, addr;
779 dc.temp = tcg_temp_new();
780 psw_cond(&dc, a->cd);
781 if (a->ld < 3) {
782 val = tcg_temp_new();
783 mem = tcg_temp_new();
784 tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0);
785 addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd);
786 rx_gen_st(a->sz, val, addr);
787 tcg_temp_free(val);
788 tcg_temp_free(mem);
789 } else {
790 tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0);
791 }
792 tcg_temp_free(dc.temp);
793 return true;
794 }
795
796 /* rtsd #imm */
797 static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a)
798 {
799 tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2);
800 pop(cpu_pc);
801 ctx->base.is_jmp = DISAS_JUMP;
802 return true;
803 }
804
805 /* rtsd #imm, rd-rd2 */
806 static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a)
807 {
808 int dst;
809 int adj;
810
811 if (a->rd2 >= a->rd) {
812 adj = a->imm - (a->rd2 - a->rd + 1);
813 } else {
814 adj = a->imm - (15 - a->rd + 1);
815 }
816
817 tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2);
818 dst = a->rd;
819 while (dst <= a->rd2 && dst < 16) {
820 pop(cpu_regs[dst++]);
821 }
822 pop(cpu_pc);
823 ctx->base.is_jmp = DISAS_JUMP;
824 return true;
825 }
826
827 typedef void (*op2fn)(TCGv ret, TCGv arg1);
828 typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2);
829
830 static inline void rx_gen_op_rr(op2fn opr, int dst, int src)
831 {
832 opr(cpu_regs[dst], cpu_regs[src]);
833 }
834
835 static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2)
836 {
837 opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]);
838 }
839
840 static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2)
841 {
842 TCGv imm = tcg_const_i32(src2);
843 opr(cpu_regs[dst], cpu_regs[src], imm);
844 tcg_temp_free(imm);
845 }
846
847 static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx,
848 int dst, int src, int ld, int mi)
849 {
850 TCGv val, mem;
851 mem = tcg_temp_new();
852 val = rx_load_source(ctx, mem, ld, mi, src);
853 opr(cpu_regs[dst], cpu_regs[dst], val);
854 tcg_temp_free(mem);
855 }
856
857 static void rx_and(TCGv ret, TCGv arg1, TCGv arg2)
858 {
859 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
860 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
861 tcg_gen_mov_i32(ret, cpu_psw_s);
862 }
863
864 /* and #uimm:4, rd */
865 /* and #imm, rd */
866 static bool trans_AND_ir(DisasContext *ctx, arg_AND_ir *a)
867 {
868 rx_gen_op_irr(rx_and, a->rd, a->rd, a->imm);
869 return true;
870 }
871
872 /* and dsp[rs], rd */
873 /* and rs,rd */
874 static bool trans_AND_mr(DisasContext *ctx, arg_AND_mr *a)
875 {
876 rx_gen_op_mr(rx_and, ctx, a->rd, a->rs, a->ld, a->mi);
877 return true;
878 }
879
880 /* and rs,rs2,rd */
881 static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a)
882 {
883 rx_gen_op_rrr(rx_and, a->rd, a->rs, a->rs2);
884 return true;
885 }
886
887 static void rx_or(TCGv ret, TCGv arg1, TCGv arg2)
888 {
889 tcg_gen_or_i32(cpu_psw_s, arg1, arg2);
890 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
891 tcg_gen_mov_i32(ret, cpu_psw_s);
892 }
893
894 /* or #uimm:4, rd */
895 /* or #imm, rd */
896 static bool trans_OR_ir(DisasContext *ctx, arg_OR_ir *a)
897 {
898 rx_gen_op_irr(rx_or, a->rd, a->rd, a->imm);
899 return true;
900 }
901
902 /* or dsp[rs], rd */
903 /* or rs,rd */
904 static bool trans_OR_mr(DisasContext *ctx, arg_OR_mr *a)
905 {
906 rx_gen_op_mr(rx_or, ctx, a->rd, a->rs, a->ld, a->mi);
907 return true;
908 }
909
910 /* or rs,rs2,rd */
911 static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a)
912 {
913 rx_gen_op_rrr(rx_or, a->rd, a->rs, a->rs2);
914 return true;
915 }
916
917 static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2)
918 {
919 tcg_gen_xor_i32(cpu_psw_s, arg1, arg2);
920 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
921 tcg_gen_mov_i32(ret, cpu_psw_s);
922 }
923
924 /* xor #imm, rd */
925 static bool trans_XOR_ir(DisasContext *ctx, arg_XOR_ir *a)
926 {
927 rx_gen_op_irr(rx_xor, a->rd, a->rd, a->imm);
928 return true;
929 }
930
931 /* xor dsp[rs], rd */
932 /* xor rs,rd */
933 static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a)
934 {
935 rx_gen_op_mr(rx_xor, ctx, a->rd, a->rs, a->ld, a->mi);
936 return true;
937 }
938
939 static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2)
940 {
941 tcg_gen_and_i32(cpu_psw_s, arg1, arg2);
942 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
943 }
944
945 /* tst #imm, rd */
946 static bool trans_TST_ir(DisasContext *ctx, arg_TST_ir *a)
947 {
948 rx_gen_op_irr(rx_tst, a->rd, a->rd, a->imm);
949 return true;
950 }
951
952 /* tst dsp[rs], rd */
953 /* tst rs, rd */
954 static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a)
955 {
956 rx_gen_op_mr(rx_tst, ctx, a->rd, a->rs, a->ld, a->mi);
957 return true;
958 }
959
960 static void rx_not(TCGv ret, TCGv arg1)
961 {
962 tcg_gen_not_i32(ret, arg1);
963 tcg_gen_mov_i32(cpu_psw_z, ret);
964 tcg_gen_mov_i32(cpu_psw_s, ret);
965 }
966
967 /* not rd */
968 /* not rs, rd */
969 static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a)
970 {
971 rx_gen_op_rr(rx_not, a->rd, a->rs);
972 return true;
973 }
974
975 static void rx_neg(TCGv ret, TCGv arg1)
976 {
977 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000);
978 tcg_gen_neg_i32(ret, arg1);
979 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_c, ret, 0);
980 tcg_gen_mov_i32(cpu_psw_z, ret);
981 tcg_gen_mov_i32(cpu_psw_s, ret);
982 }
983
984
985 /* neg rd */
986 /* neg rs, rd */
987 static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a)
988 {
989 rx_gen_op_rr(rx_neg, a->rd, a->rs);
990 return true;
991 }
992
993 /* ret = arg1 + arg2 + psw_c */
994 static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2)
995 {
996 TCGv z;
997 z = tcg_const_i32(0);
998 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z);
999 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z);
1000 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1001 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1002 tcg_gen_xor_i32(z, arg1, arg2);
1003 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1004 tcg_gen_mov_i32(ret, cpu_psw_s);
1005 tcg_temp_free(z);
1006 }
1007
1008 /* adc #imm, rd */
1009 static bool trans_ADC_ir(DisasContext *ctx, arg_ADC_ir *a)
1010 {
1011 rx_gen_op_irr(rx_adc, a->rd, a->rd, a->imm);
1012 return true;
1013 }
1014
1015 /* adc rs, rd */
1016 static bool trans_ADC_rr(DisasContext *ctx, arg_ADC_rr *a)
1017 {
1018 rx_gen_op_rrr(rx_adc, a->rd, a->rd, a->rs);
1019 return true;
1020 }
1021
1022 /* adc dsp[rs], rd */
1023 static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a)
1024 {
1025 /* mi only 2 */
1026 if (a->mi != 2) {
1027 return false;
1028 }
1029 rx_gen_op_mr(rx_adc, ctx, a->rd, a->rs, a->ld, a->mi);
1030 return true;
1031 }
1032
1033 /* ret = arg1 + arg2 */
1034 static void rx_add(TCGv ret, TCGv arg1, TCGv arg2)
1035 {
1036 TCGv z;
1037 z = tcg_const_i32(0);
1038 tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z);
1039 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1040 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1041 tcg_gen_xor_i32(z, arg1, arg2);
1042 tcg_gen_andc_i32(cpu_psw_o, cpu_psw_o, z);
1043 tcg_gen_mov_i32(ret, cpu_psw_s);
1044 tcg_temp_free(z);
1045 }
1046
1047 /* add #uimm4, rd */
1048 /* add #imm, rs, rd */
1049 static bool trans_ADD_irr(DisasContext *ctx, arg_ADD_irr *a)
1050 {
1051 rx_gen_op_irr(rx_add, a->rd, a->rs2, a->imm);
1052 return true;
1053 }
1054
1055 /* add rs, rd */
1056 /* add dsp[rs], rd */
1057 static bool trans_ADD_mr(DisasContext *ctx, arg_ADD_mr *a)
1058 {
1059 rx_gen_op_mr(rx_add, ctx, a->rd, a->rs, a->ld, a->mi);
1060 return true;
1061 }
1062
1063 /* add rs, rs2, rd */
1064 static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a)
1065 {
1066 rx_gen_op_rrr(rx_add, a->rd, a->rs, a->rs2);
1067 return true;
1068 }
1069
1070 /* ret = arg1 - arg2 */
1071 static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2)
1072 {
1073 TCGv temp;
1074 tcg_gen_sub_i32(cpu_psw_s, arg1, arg2);
1075 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s);
1076 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2);
1077 tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1);
1078 temp = tcg_temp_new_i32();
1079 tcg_gen_xor_i32(temp, arg1, arg2);
1080 tcg_gen_and_i32(cpu_psw_o, cpu_psw_o, temp);
1081 tcg_temp_free_i32(temp);
1082 /* CMP not required return */
1083 if (ret) {
1084 tcg_gen_mov_i32(ret, cpu_psw_s);
1085 }
1086 }
1087 static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2)
1088 {
1089 rx_sub(NULL, arg1, arg2);
1090 }
1091 /* ret = arg1 - arg2 - !psw_c */
1092 /* -> ret = arg1 + ~arg2 + psw_c */
1093 static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2)
1094 {
1095 TCGv temp;
1096 temp = tcg_temp_new();
1097 tcg_gen_not_i32(temp, arg2);
1098 rx_adc(ret, arg1, temp);
1099 tcg_temp_free(temp);
1100 }
1101
1102 /* cmp #imm4, rs2 */
1103 /* cmp #imm8, rs2 */
1104 /* cmp #imm, rs2 */
1105 static bool trans_CMP_ir(DisasContext *ctx, arg_CMP_ir *a)
1106 {
1107 rx_gen_op_irr(rx_cmp, 0, a->rs2, a->imm);
1108 return true;
1109 }
1110
1111 /* cmp rs, rs2 */
1112 /* cmp dsp[rs], rs2 */
1113 static bool trans_CMP_mr(DisasContext *ctx, arg_CMP_mr *a)
1114 {
1115 rx_gen_op_mr(rx_cmp, ctx, a->rd, a->rs, a->ld, a->mi);
1116 return true;
1117 }
1118
1119 /* sub #imm4, rd */
1120 static bool trans_SUB_ir(DisasContext *ctx, arg_SUB_ir *a)
1121 {
1122 rx_gen_op_irr(rx_sub, a->rd, a->rd, a->imm);
1123 return true;
1124 }
1125
1126 /* sub rs, rd */
1127 /* sub dsp[rs], rd */
1128 static bool trans_SUB_mr(DisasContext *ctx, arg_SUB_mr *a)
1129 {
1130 rx_gen_op_mr(rx_sub, ctx, a->rd, a->rs, a->ld, a->mi);
1131 return true;
1132 }
1133
1134 /* sub rs2, rs, rd */
1135 static bool trans_SUB_rrr(DisasContext *ctx, arg_SUB_rrr *a)
1136 {
1137 rx_gen_op_rrr(rx_sub, a->rd, a->rs2, a->rs);
1138 return true;
1139 }
1140
1141 /* sbb rs, rd */
1142 static bool trans_SBB_rr(DisasContext *ctx, arg_SBB_rr *a)
1143 {
1144 rx_gen_op_rrr(rx_sbb, a->rd, a->rd, a->rs);
1145 return true;
1146 }
1147
1148 /* sbb dsp[rs], rd */
1149 static bool trans_SBB_mr(DisasContext *ctx, arg_SBB_mr *a)
1150 {
1151 /* mi only 2 */
1152 if (a->mi != 2) {
1153 return false;
1154 }
1155 rx_gen_op_mr(rx_sbb, ctx, a->rd, a->rs, a->ld, a->mi);
1156 return true;
1157 }
1158
1159 static void rx_abs(TCGv ret, TCGv arg1)
1160 {
1161 TCGv neg;
1162 TCGv zero;
1163 neg = tcg_temp_new();
1164 zero = tcg_const_i32(0);
1165 tcg_gen_neg_i32(neg, arg1);
1166 tcg_gen_movcond_i32(TCG_COND_LT, ret, arg1, zero, neg, arg1);
1167 tcg_temp_free(neg);
1168 tcg_temp_free(zero);
1169 }
1170
1171 /* abs rd */
1172 /* abs rs, rd */
1173 static bool trans_ABS_rr(DisasContext *ctx, arg_ABS_rr *a)
1174 {
1175 rx_gen_op_rr(rx_abs, a->rd, a->rs);
1176 return true;
1177 }
1178
1179 /* max #imm, rd */
1180 static bool trans_MAX_ir(DisasContext *ctx, arg_MAX_ir *a)
1181 {
1182 rx_gen_op_irr(tcg_gen_smax_i32, a->rd, a->rd, a->imm);
1183 return true;
1184 }
1185
1186 /* max rs, rd */
1187 /* max dsp[rs], rd */
1188 static bool trans_MAX_mr(DisasContext *ctx, arg_MAX_mr *a)
1189 {
1190 rx_gen_op_mr(tcg_gen_smax_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1191 return true;
1192 }
1193
1194 /* min #imm, rd */
1195 static bool trans_MIN_ir(DisasContext *ctx, arg_MIN_ir *a)
1196 {
1197 rx_gen_op_irr(tcg_gen_smin_i32, a->rd, a->rd, a->imm);
1198 return true;
1199 }
1200
1201 /* min rs, rd */
1202 /* min dsp[rs], rd */
1203 static bool trans_MIN_mr(DisasContext *ctx, arg_MIN_mr *a)
1204 {
1205 rx_gen_op_mr(tcg_gen_smin_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1206 return true;
1207 }
1208
1209 /* mul #uimm4, rd */
1210 /* mul #imm, rd */
1211 static bool trans_MUL_ir(DisasContext *ctx, arg_MUL_ir *a)
1212 {
1213 rx_gen_op_irr(tcg_gen_mul_i32, a->rd, a->rd, a->imm);
1214 return true;
1215 }
1216
1217 /* mul rs, rd */
1218 /* mul dsp[rs], rd */
1219 static bool trans_MUL_mr(DisasContext *ctx, arg_MUL_mr *a)
1220 {
1221 rx_gen_op_mr(tcg_gen_mul_i32, ctx, a->rd, a->rs, a->ld, a->mi);
1222 return true;
1223 }
1224
1225 /* mul rs, rs2, rd */
1226 static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a)
1227 {
1228 rx_gen_op_rrr(tcg_gen_mul_i32, a->rd, a->rs, a->rs2);
1229 return true;
1230 }
1231
1232 /* emul #imm, rd */
1233 static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a)
1234 {
1235 TCGv imm = tcg_const_i32(a->imm);
1236 if (a->rd > 14) {
1237 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1238 }
1239 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1240 cpu_regs[a->rd], imm);
1241 tcg_temp_free(imm);
1242 return true;
1243 }
1244
1245 /* emul rs, rd */
1246 /* emul dsp[rs], rd */
1247 static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a)
1248 {
1249 TCGv val, mem;
1250 if (a->rd > 14) {
1251 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1252 }
1253 mem = tcg_temp_new();
1254 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1255 tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1256 cpu_regs[a->rd], val);
1257 tcg_temp_free(mem);
1258 return true;
1259 }
1260
1261 /* emulu #imm, rd */
1262 static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a)
1263 {
1264 TCGv imm = tcg_const_i32(a->imm);
1265 if (a->rd > 14) {
1266 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1267 }
1268 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1269 cpu_regs[a->rd], imm);
1270 tcg_temp_free(imm);
1271 return true;
1272 }
1273
1274 /* emulu rs, rd */
1275 /* emulu dsp[rs], rd */
1276 static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a)
1277 {
1278 TCGv val, mem;
1279 if (a->rd > 14) {
1280 qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd);
1281 }
1282 mem = tcg_temp_new();
1283 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1284 tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15],
1285 cpu_regs[a->rd], val);
1286 tcg_temp_free(mem);
1287 return true;
1288 }
1289
1290 static void rx_div(TCGv ret, TCGv arg1, TCGv arg2)
1291 {
1292 gen_helper_div(ret, cpu_env, arg1, arg2);
1293 }
1294
1295 static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2)
1296 {
1297 gen_helper_divu(ret, cpu_env, arg1, arg2);
1298 }
1299
1300 /* div #imm, rd */
1301 static bool trans_DIV_ir(DisasContext *ctx, arg_DIV_ir *a)
1302 {
1303 rx_gen_op_irr(rx_div, a->rd, a->rd, a->imm);
1304 return true;
1305 }
1306
1307 /* div rs, rd */
1308 /* div dsp[rs], rd */
1309 static bool trans_DIV_mr(DisasContext *ctx, arg_DIV_mr *a)
1310 {
1311 rx_gen_op_mr(rx_div, ctx, a->rd, a->rs, a->ld, a->mi);
1312 return true;
1313 }
1314
1315 /* divu #imm, rd */
1316 static bool trans_DIVU_ir(DisasContext *ctx, arg_DIVU_ir *a)
1317 {
1318 rx_gen_op_irr(rx_divu, a->rd, a->rd, a->imm);
1319 return true;
1320 }
1321
1322 /* divu rs, rd */
1323 /* divu dsp[rs], rd */
1324 static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a)
1325 {
1326 rx_gen_op_mr(rx_divu, ctx, a->rd, a->rs, a->ld, a->mi);
1327 return true;
1328 }
1329
1330
1331 /* shll #imm:5, rd */
1332 /* shll #imm:5, rs2, rd */
1333 static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a)
1334 {
1335 TCGv tmp;
1336 tmp = tcg_temp_new();
1337 if (a->imm) {
1338 tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm);
1339 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm);
1340 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1341 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1342 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1343 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1344 } else {
1345 tcg_gen_mov_i32(cpu_regs[a->rd], cpu_regs[a->rs2]);
1346 tcg_gen_movi_i32(cpu_psw_c, 0);
1347 tcg_gen_movi_i32(cpu_psw_o, 0);
1348 }
1349 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1350 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1351 return true;
1352 }
1353
1354 /* shll rs, rd */
1355 static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a)
1356 {
1357 TCGLabel *noshift, *done;
1358 TCGv count, tmp;
1359
1360 noshift = gen_new_label();
1361 done = gen_new_label();
1362 /* if (cpu_regs[a->rs]) { */
1363 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift);
1364 count = tcg_const_i32(32);
1365 tmp = tcg_temp_new();
1366 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31);
1367 tcg_gen_sub_i32(count, count, tmp);
1368 tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count);
1369 tcg_gen_shl_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1370 tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, cpu_psw_c, 0);
1371 tcg_gen_setcondi_i32(TCG_COND_EQ, tmp, cpu_psw_c, 0xffffffff);
1372 tcg_gen_or_i32(cpu_psw_o, cpu_psw_o, tmp);
1373 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, cpu_psw_c, 0);
1374 tcg_gen_br(done);
1375 /* } else { */
1376 gen_set_label(noshift);
1377 tcg_gen_movi_i32(cpu_psw_c, 0);
1378 tcg_gen_movi_i32(cpu_psw_o, 0);
1379 /* } */
1380 gen_set_label(done);
1381 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1382 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1383 tcg_temp_free(count);
1384 tcg_temp_free(tmp);
1385 return true;
1386 }
1387
1388 static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm,
1389 unsigned int alith)
1390 {
1391 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1392 tcg_gen_shri_i32, tcg_gen_sari_i32,
1393 };
1394 tcg_debug_assert(alith < 2);
1395 if (imm) {
1396 gen_sXri[alith](cpu_regs[rd], cpu_regs[rs], imm - 1);
1397 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1398 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1399 } else {
1400 tcg_gen_mov_i32(cpu_regs[rd], cpu_regs[rs]);
1401 tcg_gen_movi_i32(cpu_psw_c, 0);
1402 }
1403 tcg_gen_movi_i32(cpu_psw_o, 0);
1404 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1405 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1406 }
1407
1408 static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith)
1409 {
1410 TCGLabel *noshift, *done;
1411 TCGv count;
1412 static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = {
1413 tcg_gen_shri_i32, tcg_gen_sari_i32,
1414 };
1415 static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = {
1416 tcg_gen_shr_i32, tcg_gen_sar_i32,
1417 };
1418 tcg_debug_assert(alith < 2);
1419 noshift = gen_new_label();
1420 done = gen_new_label();
1421 count = tcg_temp_new();
1422 /* if (cpu_regs[rs]) { */
1423 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift);
1424 tcg_gen_andi_i32(count, cpu_regs[rs], 31);
1425 tcg_gen_subi_i32(count, count, 1);
1426 gen_sXr[alith](cpu_regs[rd], cpu_regs[rd], count);
1427 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1428 gen_sXri[alith](cpu_regs[rd], cpu_regs[rd], 1);
1429 tcg_gen_br(done);
1430 /* } else { */
1431 gen_set_label(noshift);
1432 tcg_gen_movi_i32(cpu_psw_c, 0);
1433 /* } */
1434 gen_set_label(done);
1435 tcg_gen_movi_i32(cpu_psw_o, 0);
1436 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1437 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1438 tcg_temp_free(count);
1439 }
1440
1441 /* shar #imm:5, rd */
1442 /* shar #imm:5, rs2, rd */
1443 static bool trans_SHAR_irr(DisasContext *ctx, arg_SHAR_irr *a)
1444 {
1445 shiftr_imm(a->rd, a->rs2, a->imm, 1);
1446 return true;
1447 }
1448
1449 /* shar rs, rd */
1450 static bool trans_SHAR_rr(DisasContext *ctx, arg_SHAR_rr *a)
1451 {
1452 shiftr_reg(a->rd, a->rs, 1);
1453 return true;
1454 }
1455
1456 /* shlr #imm:5, rd */
1457 /* shlr #imm:5, rs2, rd */
1458 static bool trans_SHLR_irr(DisasContext *ctx, arg_SHLR_irr *a)
1459 {
1460 shiftr_imm(a->rd, a->rs2, a->imm, 0);
1461 return true;
1462 }
1463
1464 /* shlr rs, rd */
1465 static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a)
1466 {
1467 shiftr_reg(a->rd, a->rs, 0);
1468 return true;
1469 }
1470
1471 /* rolc rd */
1472 static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a)
1473 {
1474 TCGv tmp;
1475 tmp = tcg_temp_new();
1476 tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31);
1477 tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1478 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1479 tcg_gen_mov_i32(cpu_psw_c, tmp);
1480 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1481 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1482 tcg_temp_free(tmp);
1483 return true;
1484 }
1485
1486 /* rorc rd */
1487 static bool trans_RORC(DisasContext *ctx, arg_RORC *a)
1488 {
1489 TCGv tmp;
1490 tmp = tcg_temp_new();
1491 tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001);
1492 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1);
1493 tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31);
1494 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c);
1495 tcg_gen_mov_i32(cpu_psw_c, tmp);
1496 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[a->rd]);
1497 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[a->rd]);
1498 return true;
1499 }
1500
1501 enum {ROTR = 0, ROTL = 1};
1502 enum {ROT_IMM = 0, ROT_REG = 1};
1503 static inline void rx_rot(int ir, int dir, int rd, int src)
1504 {
1505 switch (dir) {
1506 case ROTL:
1507 if (ir == ROT_IMM) {
1508 tcg_gen_rotli_i32(cpu_regs[rd], cpu_regs[rd], src);
1509 } else {
1510 tcg_gen_rotl_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1511 }
1512 tcg_gen_andi_i32(cpu_psw_c, cpu_regs[rd], 0x00000001);
1513 break;
1514 case ROTR:
1515 if (ir == ROT_IMM) {
1516 tcg_gen_rotri_i32(cpu_regs[rd], cpu_regs[rd], src);
1517 } else {
1518 tcg_gen_rotr_i32(cpu_regs[rd], cpu_regs[rd], cpu_regs[src]);
1519 }
1520 tcg_gen_shri_i32(cpu_psw_c, cpu_regs[rd], 31);
1521 break;
1522 }
1523 tcg_gen_mov_i32(cpu_psw_z, cpu_regs[rd]);
1524 tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]);
1525 }
1526
1527 /* rotl #imm, rd */
1528 static bool trans_ROTL_ir(DisasContext *ctx, arg_ROTL_ir *a)
1529 {
1530 rx_rot(ROT_IMM, ROTL, a->rd, a->imm);
1531 return true;
1532 }
1533
1534 /* rotl rs, rd */
1535 static bool trans_ROTL_rr(DisasContext *ctx, arg_ROTL_rr *a)
1536 {
1537 rx_rot(ROT_REG, ROTL, a->rd, a->rs);
1538 return true;
1539 }
1540
1541 /* rotr #imm, rd */
1542 static bool trans_ROTR_ir(DisasContext *ctx, arg_ROTR_ir *a)
1543 {
1544 rx_rot(ROT_IMM, ROTR, a->rd, a->imm);
1545 return true;
1546 }
1547
1548 /* rotr rs, rd */
1549 static bool trans_ROTR_rr(DisasContext *ctx, arg_ROTR_rr *a)
1550 {
1551 rx_rot(ROT_REG, ROTR, a->rd, a->rs);
1552 return true;
1553 }
1554
1555 /* revl rs, rd */
1556 static bool trans_REVL(DisasContext *ctx, arg_REVL *a)
1557 {
1558 tcg_gen_bswap32_i32(cpu_regs[a->rd], cpu_regs[a->rs]);
1559 return true;
1560 }
1561
1562 /* revw rs, rd */
1563 static bool trans_REVW(DisasContext *ctx, arg_REVW *a)
1564 {
1565 TCGv tmp;
1566 tmp = tcg_temp_new();
1567 tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff);
1568 tcg_gen_shli_i32(tmp, tmp, 8);
1569 tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8);
1570 tcg_gen_andi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 0x00ff00ff);
1571 tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], tmp);
1572 tcg_temp_free(tmp);
1573 return true;
1574 }
1575
1576 /* conditional branch helper */
1577 static void rx_bcnd_main(DisasContext *ctx, int cd, int dst)
1578 {
1579 DisasCompare dc;
1580 TCGLabel *t, *done;
1581
1582 switch (cd) {
1583 case 0 ... 13:
1584 dc.temp = tcg_temp_new();
1585 psw_cond(&dc, cd);
1586 t = gen_new_label();
1587 done = gen_new_label();
1588 tcg_gen_brcondi_i32(dc.cond, dc.value, 0, t);
1589 gen_goto_tb(ctx, 0, ctx->base.pc_next);
1590 tcg_gen_br(done);
1591 gen_set_label(t);
1592 gen_goto_tb(ctx, 1, ctx->pc + dst);
1593 gen_set_label(done);
1594 tcg_temp_free(dc.temp);
1595 break;
1596 case 14:
1597 /* always true case */
1598 gen_goto_tb(ctx, 0, ctx->pc + dst);
1599 break;
1600 case 15:
1601 /* always false case */
1602 /* Nothing do */
1603 break;
1604 }
1605 }
1606
1607 /* beq dsp:3 / bne dsp:3 */
1608 /* beq dsp:8 / bne dsp:8 */
1609 /* bc dsp:8 / bnc dsp:8 */
1610 /* bgtu dsp:8 / bleu dsp:8 */
1611 /* bpz dsp:8 / bn dsp:8 */
1612 /* bge dsp:8 / blt dsp:8 */
1613 /* bgt dsp:8 / ble dsp:8 */
1614 /* bo dsp:8 / bno dsp:8 */
1615 /* beq dsp:16 / bne dsp:16 */
1616 static bool trans_BCnd(DisasContext *ctx, arg_BCnd *a)
1617 {
1618 rx_bcnd_main(ctx, a->cd, a->dsp);
1619 return true;
1620 }
1621
1622 /* bra dsp:3 */
1623 /* bra dsp:8 */
1624 /* bra dsp:16 */
1625 /* bra dsp:24 */
1626 static bool trans_BRA(DisasContext *ctx, arg_BRA *a)
1627 {
1628 rx_bcnd_main(ctx, 14, a->dsp);
1629 return true;
1630 }
1631
1632 /* bra rs */
1633 static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a)
1634 {
1635 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1636 ctx->base.is_jmp = DISAS_JUMP;
1637 return true;
1638 }
1639
1640 static inline void rx_save_pc(DisasContext *ctx)
1641 {
1642 TCGv pc = tcg_const_i32(ctx->base.pc_next);
1643 push(pc);
1644 tcg_temp_free(pc);
1645 }
1646
1647 /* jmp rs */
1648 static bool trans_JMP(DisasContext *ctx, arg_JMP *a)
1649 {
1650 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1651 ctx->base.is_jmp = DISAS_JUMP;
1652 return true;
1653 }
1654
1655 /* jsr rs */
1656 static bool trans_JSR(DisasContext *ctx, arg_JSR *a)
1657 {
1658 rx_save_pc(ctx);
1659 tcg_gen_mov_i32(cpu_pc, cpu_regs[a->rs]);
1660 ctx->base.is_jmp = DISAS_JUMP;
1661 return true;
1662 }
1663
1664 /* bsr dsp:16 */
1665 /* bsr dsp:24 */
1666 static bool trans_BSR(DisasContext *ctx, arg_BSR *a)
1667 {
1668 rx_save_pc(ctx);
1669 rx_bcnd_main(ctx, 14, a->dsp);
1670 return true;
1671 }
1672
1673 /* bsr rs */
1674 static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a)
1675 {
1676 rx_save_pc(ctx);
1677 tcg_gen_addi_i32(cpu_pc, cpu_regs[a->rd], ctx->pc);
1678 ctx->base.is_jmp = DISAS_JUMP;
1679 return true;
1680 }
1681
1682 /* rts */
1683 static bool trans_RTS(DisasContext *ctx, arg_RTS *a)
1684 {
1685 pop(cpu_pc);
1686 ctx->base.is_jmp = DISAS_JUMP;
1687 return true;
1688 }
1689
1690 /* nop */
1691 static bool trans_NOP(DisasContext *ctx, arg_NOP *a)
1692 {
1693 return true;
1694 }
1695
1696 /* scmpu */
1697 static bool trans_SCMPU(DisasContext *ctx, arg_SCMPU *a)
1698 {
1699 gen_helper_scmpu(cpu_env);
1700 return true;
1701 }
1702
1703 /* smovu */
1704 static bool trans_SMOVU(DisasContext *ctx, arg_SMOVU *a)
1705 {
1706 gen_helper_smovu(cpu_env);
1707 return true;
1708 }
1709
1710 /* smovf */
1711 static bool trans_SMOVF(DisasContext *ctx, arg_SMOVF *a)
1712 {
1713 gen_helper_smovf(cpu_env);
1714 return true;
1715 }
1716
1717 /* smovb */
1718 static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a)
1719 {
1720 gen_helper_smovb(cpu_env);
1721 return true;
1722 }
1723
1724 #define STRING(op) \
1725 do { \
1726 TCGv size = tcg_const_i32(a->sz); \
1727 gen_helper_##op(cpu_env, size); \
1728 tcg_temp_free(size); \
1729 } while (0)
1730
1731 /* suntile.<bwl> */
1732 static bool trans_SUNTIL(DisasContext *ctx, arg_SUNTIL *a)
1733 {
1734 STRING(suntil);
1735 return true;
1736 }
1737
1738 /* swhile.<bwl> */
1739 static bool trans_SWHILE(DisasContext *ctx, arg_SWHILE *a)
1740 {
1741 STRING(swhile);
1742 return true;
1743 }
1744 /* sstr.<bwl> */
1745 static bool trans_SSTR(DisasContext *ctx, arg_SSTR *a)
1746 {
1747 STRING(sstr);
1748 return true;
1749 }
1750
1751 /* rmpa.<bwl> */
1752 static bool trans_RMPA(DisasContext *ctx, arg_RMPA *a)
1753 {
1754 STRING(rmpa);
1755 return true;
1756 }
1757
1758 static void rx_mul64hi(TCGv_i64 ret, int rs, int rs2)
1759 {
1760 TCGv_i64 tmp0, tmp1;
1761 tmp0 = tcg_temp_new_i64();
1762 tmp1 = tcg_temp_new_i64();
1763 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1764 tcg_gen_sari_i64(tmp0, tmp0, 16);
1765 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1766 tcg_gen_sari_i64(tmp1, tmp1, 16);
1767 tcg_gen_mul_i64(ret, tmp0, tmp1);
1768 tcg_gen_shli_i64(ret, ret, 16);
1769 tcg_temp_free_i64(tmp0);
1770 tcg_temp_free_i64(tmp1);
1771 }
1772
1773 static void rx_mul64lo(TCGv_i64 ret, int rs, int rs2)
1774 {
1775 TCGv_i64 tmp0, tmp1;
1776 tmp0 = tcg_temp_new_i64();
1777 tmp1 = tcg_temp_new_i64();
1778 tcg_gen_ext_i32_i64(tmp0, cpu_regs[rs]);
1779 tcg_gen_ext16s_i64(tmp0, tmp0);
1780 tcg_gen_ext_i32_i64(tmp1, cpu_regs[rs2]);
1781 tcg_gen_ext16s_i64(tmp1, tmp1);
1782 tcg_gen_mul_i64(ret, tmp0, tmp1);
1783 tcg_gen_shli_i64(ret, ret, 16);
1784 tcg_temp_free_i64(tmp0);
1785 tcg_temp_free_i64(tmp1);
1786 }
1787
1788 /* mulhi rs,rs2 */
1789 static bool trans_MULHI(DisasContext *ctx, arg_MULHI *a)
1790 {
1791 rx_mul64hi(cpu_acc, a->rs, a->rs2);
1792 return true;
1793 }
1794
1795 /* mullo rs,rs2 */
1796 static bool trans_MULLO(DisasContext *ctx, arg_MULLO *a)
1797 {
1798 rx_mul64lo(cpu_acc, a->rs, a->rs2);
1799 return true;
1800 }
1801
1802 /* machi rs,rs2 */
1803 static bool trans_MACHI(DisasContext *ctx, arg_MACHI *a)
1804 {
1805 TCGv_i64 tmp;
1806 tmp = tcg_temp_new_i64();
1807 rx_mul64hi(tmp, a->rs, a->rs2);
1808 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1809 tcg_temp_free_i64(tmp);
1810 return true;
1811 }
1812
1813 /* maclo rs,rs2 */
1814 static bool trans_MACLO(DisasContext *ctx, arg_MACLO *a)
1815 {
1816 TCGv_i64 tmp;
1817 tmp = tcg_temp_new_i64();
1818 rx_mul64lo(tmp, a->rs, a->rs2);
1819 tcg_gen_add_i64(cpu_acc, cpu_acc, tmp);
1820 tcg_temp_free_i64(tmp);
1821 return true;
1822 }
1823
1824 /* mvfachi rd */
1825 static bool trans_MVFACHI(DisasContext *ctx, arg_MVFACHI *a)
1826 {
1827 tcg_gen_extrh_i64_i32(cpu_regs[a->rd], cpu_acc);
1828 return true;
1829 }
1830
1831 /* mvfacmi rd */
1832 static bool trans_MVFACMI(DisasContext *ctx, arg_MVFACMI *a)
1833 {
1834 TCGv_i64 rd64;
1835 rd64 = tcg_temp_new_i64();
1836 tcg_gen_extract_i64(rd64, cpu_acc, 16, 32);
1837 tcg_gen_extrl_i64_i32(cpu_regs[a->rd], rd64);
1838 tcg_temp_free_i64(rd64);
1839 return true;
1840 }
1841
1842 /* mvtachi rs */
1843 static bool trans_MVTACHI(DisasContext *ctx, arg_MVTACHI *a)
1844 {
1845 TCGv_i64 rs64;
1846 rs64 = tcg_temp_new_i64();
1847 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1848 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 32, 32);
1849 tcg_temp_free_i64(rs64);
1850 return true;
1851 }
1852
1853 /* mvtaclo rs */
1854 static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a)
1855 {
1856 TCGv_i64 rs64;
1857 rs64 = tcg_temp_new_i64();
1858 tcg_gen_extu_i32_i64(rs64, cpu_regs[a->rs]);
1859 tcg_gen_deposit_i64(cpu_acc, cpu_acc, rs64, 0, 32);
1860 tcg_temp_free_i64(rs64);
1861 return true;
1862 }
1863
1864 /* racw #imm */
1865 static bool trans_RACW(DisasContext *ctx, arg_RACW *a)
1866 {
1867 TCGv imm = tcg_const_i32(a->imm + 1);
1868 gen_helper_racw(cpu_env, imm);
1869 tcg_temp_free(imm);
1870 return true;
1871 }
1872
1873 /* sat rd */
1874 static bool trans_SAT(DisasContext *ctx, arg_SAT *a)
1875 {
1876 TCGv tmp, z;
1877 tmp = tcg_temp_new();
1878 z = tcg_const_i32(0);
1879 /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */
1880 tcg_gen_sari_i32(tmp, cpu_psw_s, 31);
1881 /* S == 1 -> 0x7fffffff / S == 0 -> 0x80000000 */
1882 tcg_gen_xori_i32(tmp, tmp, 0x80000000);
1883 tcg_gen_movcond_i32(TCG_COND_LT, cpu_regs[a->rd],
1884 cpu_psw_o, z, tmp, cpu_regs[a->rd]);
1885 tcg_temp_free(tmp);
1886 tcg_temp_free(z);
1887 return true;
1888 }
1889
1890 /* satr */
1891 static bool trans_SATR(DisasContext *ctx, arg_SATR *a)
1892 {
1893 gen_helper_satr(cpu_env);
1894 return true;
1895 }
1896
1897 #define cat3(a, b, c) a##b##c
1898 #define FOP(name, op) \
1899 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
1900 cat3(arg_, name, _ir) * a) \
1901 { \
1902 TCGv imm = tcg_const_i32(li(ctx, 0)); \
1903 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1904 cpu_regs[a->rd], imm); \
1905 tcg_temp_free(imm); \
1906 return true; \
1907 } \
1908 static bool cat3(trans_, name, _mr)(DisasContext *ctx, \
1909 cat3(arg_, name, _mr) * a) \
1910 { \
1911 TCGv val, mem; \
1912 mem = tcg_temp_new(); \
1913 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1914 gen_helper_##op(cpu_regs[a->rd], cpu_env, \
1915 cpu_regs[a->rd], val); \
1916 tcg_temp_free(mem); \
1917 return true; \
1918 }
1919
1920 #define FCONVOP(name, op) \
1921 static bool trans_##name(DisasContext *ctx, arg_##name * a) \
1922 { \
1923 TCGv val, mem; \
1924 mem = tcg_temp_new(); \
1925 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \
1926 gen_helper_##op(cpu_regs[a->rd], cpu_env, val); \
1927 tcg_temp_free(mem); \
1928 return true; \
1929 }
1930
1931 FOP(FADD, fadd)
1932 FOP(FSUB, fsub)
1933 FOP(FMUL, fmul)
1934 FOP(FDIV, fdiv)
1935
1936 /* fcmp #imm, rd */
1937 static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a)
1938 {
1939 TCGv imm = tcg_const_i32(li(ctx, 0));
1940 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], imm);
1941 tcg_temp_free(imm);
1942 return true;
1943 }
1944
1945 /* fcmp dsp[rs], rd */
1946 /* fcmp rs, rd */
1947 static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a)
1948 {
1949 TCGv val, mem;
1950 mem = tcg_temp_new();
1951 val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs);
1952 gen_helper_fcmp(cpu_env, cpu_regs[a->rd], val);
1953 tcg_temp_free(mem);
1954 return true;
1955 }
1956
1957 FCONVOP(FTOI, ftoi)
1958 FCONVOP(ROUND, round)
1959
1960 /* itof rs, rd */
1961 /* itof dsp[rs], rd */
1962 static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a)
1963 {
1964 TCGv val, mem;
1965 mem = tcg_temp_new();
1966 val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs);
1967 gen_helper_itof(cpu_regs[a->rd], cpu_env, val);
1968 tcg_temp_free(mem);
1969 return true;
1970 }
1971
1972 static void rx_bsetm(TCGv mem, TCGv mask)
1973 {
1974 TCGv val;
1975 val = tcg_temp_new();
1976 rx_gen_ld(MO_8, val, mem);
1977 tcg_gen_or_i32(val, val, mask);
1978 rx_gen_st(MO_8, val, mem);
1979 tcg_temp_free(val);
1980 }
1981
1982 static void rx_bclrm(TCGv mem, TCGv mask)
1983 {
1984 TCGv val;
1985 val = tcg_temp_new();
1986 rx_gen_ld(MO_8, val, mem);
1987 tcg_gen_andc_i32(val, val, mask);
1988 rx_gen_st(MO_8, val, mem);
1989 tcg_temp_free(val);
1990 }
1991
1992 static void rx_btstm(TCGv mem, TCGv mask)
1993 {
1994 TCGv val;
1995 val = tcg_temp_new();
1996 rx_gen_ld(MO_8, val, mem);
1997 tcg_gen_and_i32(val, val, mask);
1998 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0);
1999 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2000 tcg_temp_free(val);
2001 }
2002
2003 static void rx_bnotm(TCGv mem, TCGv mask)
2004 {
2005 TCGv val;
2006 val = tcg_temp_new();
2007 rx_gen_ld(MO_8, val, mem);
2008 tcg_gen_xor_i32(val, val, mask);
2009 rx_gen_st(MO_8, val, mem);
2010 tcg_temp_free(val);
2011 }
2012
2013 static void rx_bsetr(TCGv reg, TCGv mask)
2014 {
2015 tcg_gen_or_i32(reg, reg, mask);
2016 }
2017
2018 static void rx_bclrr(TCGv reg, TCGv mask)
2019 {
2020 tcg_gen_andc_i32(reg, reg, mask);
2021 }
2022
2023 static inline void rx_btstr(TCGv reg, TCGv mask)
2024 {
2025 TCGv t0;
2026 t0 = tcg_temp_new();
2027 tcg_gen_and_i32(t0, reg, mask);
2028 tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0);
2029 tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c);
2030 tcg_temp_free(t0);
2031 }
2032
2033 static inline void rx_bnotr(TCGv reg, TCGv mask)
2034 {
2035 tcg_gen_xor_i32(reg, reg, mask);
2036 }
2037
2038 #define BITOP(name, op) \
2039 static bool cat3(trans_, name, _im)(DisasContext *ctx, \
2040 cat3(arg_, name, _im) * a) \
2041 { \
2042 TCGv mask, mem, addr; \
2043 mem = tcg_temp_new(); \
2044 mask = tcg_const_i32(1 << a->imm); \
2045 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2046 cat3(rx_, op, m)(addr, mask); \
2047 tcg_temp_free(mask); \
2048 tcg_temp_free(mem); \
2049 return true; \
2050 } \
2051 static bool cat3(trans_, name, _ir)(DisasContext *ctx, \
2052 cat3(arg_, name, _ir) * a) \
2053 { \
2054 TCGv mask; \
2055 mask = tcg_const_i32(1 << a->imm); \
2056 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2057 tcg_temp_free(mask); \
2058 return true; \
2059 } \
2060 static bool cat3(trans_, name, _rr)(DisasContext *ctx, \
2061 cat3(arg_, name, _rr) * a) \
2062 { \
2063 TCGv mask, b; \
2064 mask = tcg_const_i32(1); \
2065 b = tcg_temp_new(); \
2066 tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \
2067 tcg_gen_shl_i32(mask, mask, b); \
2068 cat3(rx_, op, r)(cpu_regs[a->rd], mask); \
2069 tcg_temp_free(mask); \
2070 tcg_temp_free(b); \
2071 return true; \
2072 } \
2073 static bool cat3(trans_, name, _rm)(DisasContext *ctx, \
2074 cat3(arg_, name, _rm) * a) \
2075 { \
2076 TCGv mask, mem, addr, b; \
2077 mask = tcg_const_i32(1); \
2078 b = tcg_temp_new(); \
2079 tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \
2080 tcg_gen_shl_i32(mask, mask, b); \
2081 mem = tcg_temp_new(); \
2082 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \
2083 cat3(rx_, op, m)(addr, mask); \
2084 tcg_temp_free(mem); \
2085 tcg_temp_free(mask); \
2086 tcg_temp_free(b); \
2087 return true; \
2088 }
2089
2090 BITOP(BSET, bset)
2091 BITOP(BCLR, bclr)
2092 BITOP(BTST, btst)
2093 BITOP(BNOT, bnot)
2094
2095 static inline void bmcnd_op(TCGv val, TCGCond cond, int pos)
2096 {
2097 TCGv bit;
2098 DisasCompare dc;
2099 dc.temp = tcg_temp_new();
2100 bit = tcg_temp_new();
2101 psw_cond(&dc, cond);
2102 tcg_gen_andi_i32(val, val, ~(1 << pos));
2103 tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0);
2104 tcg_gen_deposit_i32(val, val, bit, pos, 1);
2105 tcg_temp_free(bit);
2106 tcg_temp_free(dc.temp);
2107 }
2108
2109 /* bmcnd #imm, dsp[rd] */
2110 static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a)
2111 {
2112 TCGv val, mem, addr;
2113 val = tcg_temp_new();
2114 mem = tcg_temp_new();
2115 addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd);
2116 rx_gen_ld(MO_8, val, addr);
2117 bmcnd_op(val, a->cd, a->imm);
2118 rx_gen_st(MO_8, val, addr);
2119 tcg_temp_free(val);
2120 tcg_temp_free(mem);
2121 return true;
2122 }
2123
2124 /* bmcond #imm, rd */
2125 static bool trans_BMCnd_ir(DisasContext *ctx, arg_BMCnd_ir *a)
2126 {
2127 bmcnd_op(cpu_regs[a->rd], a->cd, a->imm);
2128 return true;
2129 }
2130
2131 enum {
2132 PSW_C = 0,
2133 PSW_Z = 1,
2134 PSW_S = 2,
2135 PSW_O = 3,
2136 PSW_I = 8,
2137 PSW_U = 9,
2138 };
2139
2140 static inline void clrsetpsw(DisasContext *ctx, int cb, int val)
2141 {
2142 if (cb < 8) {
2143 switch (cb) {
2144 case PSW_C:
2145 tcg_gen_movi_i32(cpu_psw_c, val);
2146 break;
2147 case PSW_Z:
2148 tcg_gen_movi_i32(cpu_psw_z, val == 0);
2149 break;
2150 case PSW_S:
2151 tcg_gen_movi_i32(cpu_psw_s, val ? -1 : 0);
2152 break;
2153 case PSW_O:
2154 tcg_gen_movi_i32(cpu_psw_o, val << 31);
2155 break;
2156 default:
2157 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2158 break;
2159 }
2160 } else if (is_privileged(ctx, 0)) {
2161 switch (cb) {
2162 case PSW_I:
2163 tcg_gen_movi_i32(cpu_psw_i, val);
2164 ctx->base.is_jmp = DISAS_UPDATE;
2165 break;
2166 case PSW_U:
2167 tcg_gen_movi_i32(cpu_psw_u, val);
2168 break;
2169 default:
2170 qemu_log_mask(LOG_GUEST_ERROR, "Invalid distination %d", cb);
2171 break;
2172 }
2173 }
2174 }
2175
2176 /* clrpsw psw */
2177 static bool trans_CLRPSW(DisasContext *ctx, arg_CLRPSW *a)
2178 {
2179 clrsetpsw(ctx, a->cb, 0);
2180 return true;
2181 }
2182
2183 /* setpsw psw */
2184 static bool trans_SETPSW(DisasContext *ctx, arg_SETPSW *a)
2185 {
2186 clrsetpsw(ctx, a->cb, 1);
2187 return true;
2188 }
2189
2190 /* mvtipl #imm */
2191 static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a)
2192 {
2193 if (is_privileged(ctx, 1)) {
2194 tcg_gen_movi_i32(cpu_psw_ipl, a->imm);
2195 ctx->base.is_jmp = DISAS_UPDATE;
2196 }
2197 return true;
2198 }
2199
2200 /* mvtc #imm, rd */
2201 static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a)
2202 {
2203 TCGv imm;
2204
2205 imm = tcg_const_i32(a->imm);
2206 move_to_cr(ctx, imm, a->cr);
2207 if (a->cr == 0 && is_privileged(ctx, 0)) {
2208 ctx->base.is_jmp = DISAS_UPDATE;
2209 }
2210 tcg_temp_free(imm);
2211 return true;
2212 }
2213
2214 /* mvtc rs, rd */
2215 static bool trans_MVTC_r(DisasContext *ctx, arg_MVTC_r *a)
2216 {
2217 move_to_cr(ctx, cpu_regs[a->rs], a->cr);
2218 if (a->cr == 0 && is_privileged(ctx, 0)) {
2219 ctx->base.is_jmp = DISAS_UPDATE;
2220 }
2221 return true;
2222 }
2223
2224 /* mvfc rs, rd */
2225 static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a)
2226 {
2227 move_from_cr(cpu_regs[a->rd], a->cr, ctx->pc);
2228 return true;
2229 }
2230
2231 /* rtfi */
2232 static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a)
2233 {
2234 TCGv psw;
2235 if (is_privileged(ctx, 1)) {
2236 psw = tcg_temp_new();
2237 tcg_gen_mov_i32(cpu_pc, cpu_bpc);
2238 tcg_gen_mov_i32(psw, cpu_bpsw);
2239 gen_helper_set_psw_rte(cpu_env, psw);
2240 ctx->base.is_jmp = DISAS_EXIT;
2241 tcg_temp_free(psw);
2242 }
2243 return true;
2244 }
2245
2246 /* rte */
2247 static bool trans_RTE(DisasContext *ctx, arg_RTE *a)
2248 {
2249 TCGv psw;
2250 if (is_privileged(ctx, 1)) {
2251 psw = tcg_temp_new();
2252 pop(cpu_pc);
2253 pop(psw);
2254 gen_helper_set_psw_rte(cpu_env, psw);
2255 ctx->base.is_jmp = DISAS_EXIT;
2256 tcg_temp_free(psw);
2257 }
2258 return true;
2259 }
2260
2261 /* brk */
2262 static bool trans_BRK(DisasContext *ctx, arg_BRK *a)
2263 {
2264 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2265 gen_helper_rxbrk(cpu_env);
2266 ctx->base.is_jmp = DISAS_NORETURN;
2267 return true;
2268 }
2269
2270 /* int #imm */
2271 static bool trans_INT(DisasContext *ctx, arg_INT *a)
2272 {
2273 TCGv vec;
2274
2275 tcg_debug_assert(a->imm < 0x100);
2276 vec = tcg_const_i32(a->imm);
2277 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2278 gen_helper_rxint(cpu_env, vec);
2279 tcg_temp_free(vec);
2280 ctx->base.is_jmp = DISAS_NORETURN;
2281 return true;
2282 }
2283
2284 /* wait */
2285 static bool trans_WAIT(DisasContext *ctx, arg_WAIT *a)
2286 {
2287 if (is_privileged(ctx, 1)) {
2288 tcg_gen_addi_i32(cpu_pc, cpu_pc, 2);
2289 gen_helper_wait(cpu_env);
2290 }
2291 return true;
2292 }
2293
2294 static void rx_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
2295 {
2296 CPURXState *env = cs->env_ptr;
2297 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2298 ctx->env = env;
2299 }
2300
2301 static void rx_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
2302 {
2303 }
2304
2305 static void rx_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
2306 {
2307 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2308
2309 tcg_gen_insn_start(ctx->base.pc_next);
2310 }
2311
2312 static bool rx_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs,
2313 const CPUBreakpoint *bp)
2314 {
2315 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2316
2317 /* We have hit a breakpoint - make sure PC is up-to-date */
2318 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2319 gen_helper_debug(cpu_env);
2320 ctx->base.is_jmp = DISAS_NORETURN;
2321 ctx->base.pc_next += 1;
2322 return true;
2323 }
2324
2325 static void rx_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
2326 {
2327 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2328 uint32_t insn;
2329
2330 ctx->pc = ctx->base.pc_next;
2331 insn = decode_load(ctx);
2332 if (!decode(ctx, insn)) {
2333 gen_helper_raise_illegal_instruction(cpu_env);
2334 }
2335 }
2336
2337 static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
2338 {
2339 DisasContext *ctx = container_of(dcbase, DisasContext, base);
2340
2341 switch (ctx->base.is_jmp) {
2342 case DISAS_NEXT:
2343 case DISAS_TOO_MANY:
2344 gen_goto_tb(ctx, 0, dcbase->pc_next);
2345 break;
2346 case DISAS_JUMP:
2347 if (ctx->base.singlestep_enabled) {
2348 gen_helper_debug(cpu_env);
2349 } else {
2350 tcg_gen_lookup_and_goto_ptr();
2351 }
2352 break;
2353 case DISAS_UPDATE:
2354 tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next);
2355 /* fall through */
2356 case DISAS_EXIT:
2357 tcg_gen_exit_tb(NULL, 0);
2358 break;
2359 case DISAS_NORETURN:
2360 break;
2361 default:
2362 g_assert_not_reached();
2363 }
2364 }
2365
2366 static void rx_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs)
2367 {
2368 qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */
2369 log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
2370 }
2371
2372 static const TranslatorOps rx_tr_ops = {
2373 .init_disas_context = rx_tr_init_disas_context,
2374 .tb_start = rx_tr_tb_start,
2375 .insn_start = rx_tr_insn_start,
2376 .breakpoint_check = rx_tr_breakpoint_check,
2377 .translate_insn = rx_tr_translate_insn,
2378 .tb_stop = rx_tr_tb_stop,
2379 .disas_log = rx_tr_disas_log,
2380 };
2381
2382 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
2383 {
2384 DisasContext dc;
2385
2386 translator_loop(&rx_tr_ops, &dc.base, cs, tb, max_insns);
2387 }
2388
2389 void restore_state_to_opc(CPURXState *env, TranslationBlock *tb,
2390 target_ulong *data)
2391 {
2392 env->pc = data[0];
2393 }
2394
2395 #define ALLOC_REGISTER(sym, name) \
2396 cpu_##sym = tcg_global_mem_new_i32(cpu_env, \
2397 offsetof(CPURXState, sym), name)
2398
2399 void rx_translate_init(void)
2400 {
2401 static const char * const regnames[NUM_REGS] = {
2402 "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7",
2403 "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15"
2404 };
2405 int i;
2406
2407 for (i = 0; i < NUM_REGS; i++) {
2408 cpu_regs[i] = tcg_global_mem_new_i32(cpu_env,
2409 offsetof(CPURXState, regs[i]),
2410 regnames[i]);
2411 }
2412 ALLOC_REGISTER(pc, "PC");
2413 ALLOC_REGISTER(psw_o, "PSW(O)");
2414 ALLOC_REGISTER(psw_s, "PSW(S)");
2415 ALLOC_REGISTER(psw_z, "PSW(Z)");
2416 ALLOC_REGISTER(psw_c, "PSW(C)");
2417 ALLOC_REGISTER(psw_u, "PSW(U)");
2418 ALLOC_REGISTER(psw_i, "PSW(I)");
2419 ALLOC_REGISTER(psw_pm, "PSW(PM)");
2420 ALLOC_REGISTER(psw_ipl, "PSW(IPL)");
2421 ALLOC_REGISTER(usp, "USP");
2422 ALLOC_REGISTER(fpsw, "FPSW");
2423 ALLOC_REGISTER(bpsw, "BPSW");
2424 ALLOC_REGISTER(bpc, "BPC");
2425 ALLOC_REGISTER(isp, "ISP");
2426 ALLOC_REGISTER(fintv, "FINTV");
2427 ALLOC_REGISTER(intb, "INTB");
2428 cpu_acc = tcg_global_mem_new_i64(cpu_env,
2429 offsetof(CPURXState, acc), "ACC");
2430 }