2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
47 #if !defined (CONFIG_USER_ONLY)
54 /* global register indexes */
55 static TCGv_ptr cpu_env
;
56 static TCGv cpu_ir
[31];
57 static TCGv cpu_fir
[31];
60 #ifdef CONFIG_USER_ONLY
65 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
67 #include "gen-icount.h"
69 static void alpha_translate_init(void)
73 static int done_init
= 0;
78 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
81 for (i
= 0; i
< 31; i
++) {
82 sprintf(p
, "ir%d", i
);
83 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
84 offsetof(CPUState
, ir
[i
]), p
);
85 p
+= (i
< 10) ? 4 : 5;
87 sprintf(p
, "fir%d", i
);
88 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
89 offsetof(CPUState
, fir
[i
]), p
);
90 p
+= (i
< 10) ? 5 : 6;
93 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
94 offsetof(CPUState
, pc
), "pc");
96 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
97 offsetof(CPUState
, lock
), "lock");
99 #ifdef CONFIG_USER_ONLY
100 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
101 offsetof(CPUState
, unique
), "uniq");
104 /* register helpers */
111 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
115 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
116 tmp1
= tcg_const_i32(exception
);
117 tmp2
= tcg_const_i32(error_code
);
118 gen_helper_excp(tmp1
, tmp2
);
119 tcg_temp_free_i32(tmp2
);
120 tcg_temp_free_i32(tmp1
);
123 static inline void gen_invalid(DisasContext
*ctx
)
125 gen_excp(ctx
, EXCP_OPCDEC
, 0);
128 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
130 TCGv tmp
= tcg_temp_new();
131 TCGv_i32 tmp32
= tcg_temp_new_i32();
132 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
133 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
134 gen_helper_memory_to_f(t0
, tmp32
);
135 tcg_temp_free_i32(tmp32
);
139 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
141 TCGv tmp
= tcg_temp_new();
142 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
143 gen_helper_memory_to_g(t0
, tmp
);
147 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
149 TCGv tmp
= tcg_temp_new();
150 TCGv_i32 tmp32
= tcg_temp_new_i32();
151 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
152 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
153 gen_helper_memory_to_s(t0
, tmp32
);
154 tcg_temp_free_i32(tmp32
);
158 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
160 tcg_gen_mov_i64(cpu_lock
, t1
);
161 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
164 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
166 tcg_gen_mov_i64(cpu_lock
, t1
);
167 tcg_gen_qemu_ld64(t0
, t1
, flags
);
170 static inline void gen_load_mem(DisasContext
*ctx
,
171 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
173 int ra
, int rb
, int32_t disp16
, int fp
,
178 if (unlikely(ra
== 31))
181 addr
= tcg_temp_new();
183 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
185 tcg_gen_andi_i64(addr
, addr
, ~0x7);
189 tcg_gen_movi_i64(addr
, disp16
);
192 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
194 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
198 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
200 TCGv_i32 tmp32
= tcg_temp_new_i32();
201 TCGv tmp
= tcg_temp_new();
202 gen_helper_f_to_memory(tmp32
, t0
);
203 tcg_gen_extu_i32_i64(tmp
, tmp32
);
204 tcg_gen_qemu_st32(tmp
, t1
, flags
);
206 tcg_temp_free_i32(tmp32
);
209 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
211 TCGv tmp
= tcg_temp_new();
212 gen_helper_g_to_memory(tmp
, t0
);
213 tcg_gen_qemu_st64(tmp
, t1
, flags
);
217 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
219 TCGv_i32 tmp32
= tcg_temp_new_i32();
220 TCGv tmp
= tcg_temp_new();
221 gen_helper_s_to_memory(tmp32
, t0
);
222 tcg_gen_extu_i32_i64(tmp
, tmp32
);
223 tcg_gen_qemu_st32(tmp
, t1
, flags
);
225 tcg_temp_free_i32(tmp32
);
228 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
232 l1
= gen_new_label();
233 l2
= gen_new_label();
234 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
235 tcg_gen_qemu_st32(t0
, t1
, flags
);
236 tcg_gen_movi_i64(t0
, 1);
239 tcg_gen_movi_i64(t0
, 0);
241 tcg_gen_movi_i64(cpu_lock
, -1);
244 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
248 l1
= gen_new_label();
249 l2
= gen_new_label();
250 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
251 tcg_gen_qemu_st64(t0
, t1
, flags
);
252 tcg_gen_movi_i64(t0
, 1);
255 tcg_gen_movi_i64(t0
, 0);
257 tcg_gen_movi_i64(cpu_lock
, -1);
260 static inline void gen_store_mem(DisasContext
*ctx
,
261 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
263 int ra
, int rb
, int32_t disp16
, int fp
,
264 int clear
, int local
)
268 addr
= tcg_temp_local_new();
270 addr
= tcg_temp_new();
272 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
274 tcg_gen_andi_i64(addr
, addr
, ~0x7);
278 tcg_gen_movi_i64(addr
, disp16
);
282 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
284 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
288 zero
= tcg_const_local_i64(0);
290 zero
= tcg_const_i64(0);
291 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
297 static inline void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
298 int32_t disp
, int mask
)
302 l1
= gen_new_label();
303 l2
= gen_new_label();
304 if (likely(ra
!= 31)) {
306 TCGv tmp
= tcg_temp_new();
307 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
308 tcg_gen_brcondi_i64(cond
, tmp
, 0, l1
);
311 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, l1
);
313 /* Very uncommon case - Do not bother to optimize. */
314 TCGv tmp
= tcg_const_i64(0);
315 tcg_gen_brcondi_i64(cond
, tmp
, 0, l1
);
318 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
321 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
325 static inline void gen_fbcond(DisasContext
*ctx
, int opc
, int ra
, int32_t disp
)
331 l1
= gen_new_label();
332 l2
= gen_new_label();
334 tmp
= tcg_temp_new();
337 tmp
= tcg_const_i64(0);
341 case 0x31: /* FBEQ */
342 gen_helper_cmpfeq(tmp
, src
);
344 case 0x32: /* FBLT */
345 gen_helper_cmpflt(tmp
, src
);
347 case 0x33: /* FBLE */
348 gen_helper_cmpfle(tmp
, src
);
350 case 0x35: /* FBNE */
351 gen_helper_cmpfne(tmp
, src
);
353 case 0x36: /* FBGE */
354 gen_helper_cmpfge(tmp
, src
);
356 case 0x37: /* FBGT */
357 gen_helper_cmpfgt(tmp
, src
);
362 tcg_gen_brcondi_i64(TCG_COND_NE
, tmp
, 0, l1
);
363 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
366 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
370 static inline void gen_cmov(TCGCond inv_cond
, int ra
, int rb
, int rc
,
371 int islit
, uint8_t lit
, int mask
)
375 if (unlikely(rc
== 31))
378 l1
= gen_new_label();
382 TCGv tmp
= tcg_temp_new();
383 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
384 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
387 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
389 /* Very uncommon case - Do not bother to optimize. */
390 TCGv tmp
= tcg_const_i64(0);
391 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
396 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
398 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
402 #define FARITH2(name) \
403 static inline void glue(gen_f, name)(int rb, int rc) \
405 if (unlikely(rc == 31)) \
409 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
411 TCGv tmp = tcg_const_i64(0); \
412 gen_helper_ ## name (cpu_fir[rc], tmp); \
413 tcg_temp_free(tmp); \
434 #define FARITH3(name) \
435 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
437 if (unlikely(rc == 31)) \
442 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], cpu_fir[rb]); \
444 TCGv tmp = tcg_const_i64(0); \
445 gen_helper_ ## name (cpu_fir[rc], cpu_fir[ra], tmp); \
446 tcg_temp_free(tmp); \
449 TCGv tmp = tcg_const_i64(0); \
451 gen_helper_ ## name (cpu_fir[rc], tmp, cpu_fir[rb]); \
453 gen_helper_ ## name (cpu_fir[rc], tmp, tmp); \
454 tcg_temp_free(tmp); \
485 #define FCMOV(name) \
486 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
491 if (unlikely(rc == 31)) \
494 l1 = gen_new_label(); \
495 tmp = tcg_temp_new(); \
497 tmp = tcg_temp_new(); \
498 gen_helper_ ## name (tmp, cpu_fir[ra]); \
500 tmp = tcg_const_i64(0); \
501 gen_helper_ ## name (tmp, tmp); \
503 tcg_gen_brcondi_i64(TCG_COND_EQ, tmp, 0, l1); \
505 tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]); \
507 tcg_gen_movi_i64(cpu_fir[rc], 0); \
517 static inline uint64_t zapnot_mask(uint8_t lit
)
522 for (i
= 0; i
< 8; ++i
) {
524 mask
|= 0xffull
<< (i
* 8);
529 /* Implement zapnot with an immediate operand, which expands to some
530 form of immediate AND. This is a basic building block in the
531 definition of many of the other byte manipulation instructions. */
532 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
536 tcg_gen_movi_i64(dest
, 0);
539 tcg_gen_ext8u_i64(dest
, src
);
542 tcg_gen_ext16u_i64(dest
, src
);
545 tcg_gen_ext32u_i64(dest
, src
);
548 tcg_gen_mov_i64(dest
, src
);
551 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
556 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
558 if (unlikely(rc
== 31))
560 else if (unlikely(ra
== 31))
561 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
563 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
565 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
568 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
570 if (unlikely(rc
== 31))
572 else if (unlikely(ra
== 31))
573 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
575 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
577 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
581 /* EXTWH, EXTLH, EXTQH */
582 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
583 uint8_t lit
, uint8_t byte_mask
)
585 if (unlikely(rc
== 31))
587 else if (unlikely(ra
== 31))
588 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
591 lit
= (64 - (lit
& 7) * 8) & 0x3f;
592 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
594 TCGv tmp1
= tcg_temp_new();
595 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
596 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
597 tcg_gen_neg_i64(tmp1
, tmp1
);
598 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
599 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
602 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
606 /* EXTBL, EXTWL, EXTLL, EXTQL */
607 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
608 uint8_t lit
, uint8_t byte_mask
)
610 if (unlikely(rc
== 31))
612 else if (unlikely(ra
== 31))
613 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
616 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
618 TCGv tmp
= tcg_temp_new();
619 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
620 tcg_gen_shli_i64(tmp
, tmp
, 3);
621 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
624 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
628 /* INSWH, INSLH, INSQH */
629 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
630 uint8_t lit
, uint8_t byte_mask
)
632 if (unlikely(rc
== 31))
634 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
635 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
637 TCGv tmp
= tcg_temp_new();
639 /* The instruction description has us left-shift the byte mask
640 and extract bits <15:8> and apply that zap at the end. This
641 is equivalent to simply performing the zap first and shifting
643 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
646 /* Note that we have handled the lit==0 case above. */
647 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
649 TCGv shift
= tcg_temp_new();
651 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
652 Do this portably by splitting the shift into two parts:
653 shift_count-1 and 1. Arrange for the -1 by using
654 ones-complement instead of twos-complement in the negation:
655 ~((B & 7) * 8) & 63. */
657 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
658 tcg_gen_shli_i64(shift
, shift
, 3);
659 tcg_gen_not_i64(shift
, shift
);
660 tcg_gen_andi_i64(shift
, shift
, 0x3f);
662 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
663 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
664 tcg_temp_free(shift
);
670 /* INSBL, INSWL, INSLL, INSQL */
671 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
672 uint8_t lit
, uint8_t byte_mask
)
674 if (unlikely(rc
== 31))
676 else if (unlikely(ra
== 31))
677 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
679 TCGv tmp
= tcg_temp_new();
681 /* The instruction description has us left-shift the byte mask
682 the same number of byte slots as the data and apply the zap
683 at the end. This is equivalent to simply performing the zap
684 first and shifting afterward. */
685 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
688 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
690 TCGv shift
= tcg_temp_new();
691 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
692 tcg_gen_shli_i64(shift
, shift
, 3);
693 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
694 tcg_temp_free(shift
);
700 /* MSKWH, MSKLH, MSKQH */
701 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
702 uint8_t lit
, uint8_t byte_mask
)
704 if (unlikely(rc
== 31))
706 else if (unlikely(ra
== 31))
707 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
709 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
711 TCGv shift
= tcg_temp_new();
712 TCGv mask
= tcg_temp_new();
714 /* The instruction description is as above, where the byte_mask
715 is shifted left, and then we extract bits <15:8>. This can be
716 emulated with a right-shift on the expanded byte mask. This
717 requires extra care because for an input <2:0> == 0 we need a
718 shift of 64 bits in order to generate a zero. This is done by
719 splitting the shift into two parts, the variable shift - 1
720 followed by a constant 1 shift. The code we expand below is
721 equivalent to ~((B & 7) * 8) & 63. */
723 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
724 tcg_gen_shli_i64(shift
, shift
, 3);
725 tcg_gen_not_i64(shift
, shift
);
726 tcg_gen_andi_i64(shift
, shift
, 0x3f);
727 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
728 tcg_gen_shr_i64(mask
, mask
, shift
);
729 tcg_gen_shri_i64(mask
, mask
, 1);
731 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
734 tcg_temp_free(shift
);
738 /* MSKBL, MSKWL, MSKLL, MSKQL */
739 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
740 uint8_t lit
, uint8_t byte_mask
)
742 if (unlikely(rc
== 31))
744 else if (unlikely(ra
== 31))
745 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
747 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
749 TCGv shift
= tcg_temp_new();
750 TCGv mask
= tcg_temp_new();
752 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
753 tcg_gen_shli_i64(shift
, shift
, 3);
754 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
755 tcg_gen_shl_i64(mask
, mask
, shift
);
757 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
760 tcg_temp_free(shift
);
764 /* Code to call arith3 helpers */
765 #define ARITH3(name) \
766 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
769 if (unlikely(rc == 31)) \
774 TCGv tmp = tcg_const_i64(lit); \
775 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
776 tcg_temp_free(tmp); \
778 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
780 TCGv tmp1 = tcg_const_i64(0); \
782 TCGv tmp2 = tcg_const_i64(lit); \
783 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
784 tcg_temp_free(tmp2); \
786 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
787 tcg_temp_free(tmp1); \
808 #define MVIOP2(name) \
809 static inline void glue(gen_, name)(int rb, int rc) \
811 if (unlikely(rc == 31)) \
813 if (unlikely(rb == 31)) \
814 tcg_gen_movi_i64(cpu_ir[rc], 0); \
816 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
823 static inline void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
, int islit
,
829 if (unlikely(rc
== 31))
832 l1
= gen_new_label();
833 l2
= gen_new_label();
836 tmp
= tcg_temp_new();
837 tcg_gen_mov_i64(tmp
, cpu_ir
[ra
]);
839 tmp
= tcg_const_i64(0);
841 tcg_gen_brcondi_i64(cond
, tmp
, lit
, l1
);
843 tcg_gen_brcond_i64(cond
, tmp
, cpu_ir
[rb
], l1
);
845 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
848 tcg_gen_movi_i64(cpu_ir
[rc
], 1);
852 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
855 int32_t disp21
, disp16
, disp12
;
857 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
, real_islit
;
861 /* Decode all instruction fields */
863 ra
= (insn
>> 21) & 0x1F;
864 rb
= (insn
>> 16) & 0x1F;
866 sbz
= (insn
>> 13) & 0x07;
867 real_islit
= islit
= (insn
>> 12) & 1;
868 if (rb
== 31 && !islit
) {
872 lit
= (insn
>> 13) & 0xFF;
873 palcode
= insn
& 0x03FFFFFF;
874 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
875 disp16
= (int16_t)(insn
& 0x0000FFFF);
876 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
877 fn16
= insn
& 0x0000FFFF;
878 fn11
= (insn
>> 5) & 0x000007FF;
880 fn7
= (insn
>> 5) & 0x0000007F;
881 fn2
= (insn
>> 5) & 0x00000003;
883 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
884 opc
, ra
, rb
, rc
, disp16
);
889 #ifdef CONFIG_USER_ONLY
890 if (palcode
== 0x9E) {
892 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
894 } else if (palcode
== 0x9F) {
896 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
900 if (palcode
>= 0x80 && palcode
< 0xC0) {
901 /* Unprivileged PAL call */
902 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
906 #ifndef CONFIG_USER_ONLY
907 if (palcode
< 0x40) {
908 /* Privileged PAL code */
909 if (ctx
->mem_idx
& 1)
911 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
915 /* Invalid PAL call */
940 if (likely(ra
!= 31)) {
942 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
944 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
949 if (likely(ra
!= 31)) {
951 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
953 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
958 if (!(ctx
->amask
& AMASK_BWX
))
960 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
964 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
968 if (!(ctx
->amask
& AMASK_BWX
))
970 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
974 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
978 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
982 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
988 if (likely(rc
!= 31)) {
991 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
992 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
994 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
995 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
999 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1001 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1007 if (likely(rc
!= 31)) {
1009 TCGv tmp
= tcg_temp_new();
1010 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1012 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1014 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1015 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1019 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1021 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1027 if (likely(rc
!= 31)) {
1030 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1032 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1033 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1036 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1038 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1039 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1045 if (likely(rc
!= 31)) {
1047 TCGv tmp
= tcg_temp_new();
1048 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1050 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1052 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1053 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1057 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1059 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1060 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1067 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1071 if (likely(rc
!= 31)) {
1073 TCGv tmp
= tcg_temp_new();
1074 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1076 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1078 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1079 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1083 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1085 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1091 if (likely(rc
!= 31)) {
1093 TCGv tmp
= tcg_temp_new();
1094 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1096 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1098 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1099 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1103 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1105 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1106 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1113 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1117 if (likely(rc
!= 31)) {
1120 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1122 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1125 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1127 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1133 if (likely(rc
!= 31)) {
1135 TCGv tmp
= tcg_temp_new();
1136 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1138 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1140 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1144 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1146 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1152 if (likely(rc
!= 31)) {
1155 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1157 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1160 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1162 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1168 if (likely(rc
!= 31)) {
1170 TCGv tmp
= tcg_temp_new();
1171 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1173 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1175 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1179 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1181 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1187 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1191 if (likely(rc
!= 31)) {
1193 TCGv tmp
= tcg_temp_new();
1194 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1196 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1198 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1202 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1204 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1210 if (likely(rc
!= 31)) {
1212 TCGv tmp
= tcg_temp_new();
1213 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1215 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1217 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1221 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1223 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1229 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1233 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1237 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1241 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1245 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1249 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1253 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1263 if (likely(rc
!= 31)) {
1265 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1267 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1269 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1274 if (likely(rc
!= 31)) {
1277 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1279 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1281 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1286 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1290 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1294 if (likely(rc
!= 31)) {
1297 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1299 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1302 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1304 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1310 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1314 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1318 if (likely(rc
!= 31)) {
1321 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1323 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1326 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1328 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1334 if (likely(rc
!= 31)) {
1337 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1339 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1342 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1344 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1350 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1354 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1358 if (likely(rc
!= 31)) {
1361 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1363 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1366 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1368 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1374 if (likely(rc
!= 31)) {
1376 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1378 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1379 switch (ctx
->env
->implver
) {
1381 /* EV4, EV45, LCA, LCA45 & EV5 */
1386 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1387 ~(uint64_t)ctx
->amask
);
1394 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1398 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1403 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1413 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1417 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1421 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1425 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1429 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1433 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1437 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1441 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1445 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1449 gen_zap(ra
, rb
, rc
, islit
, lit
);
1453 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1457 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1461 if (likely(rc
!= 31)) {
1464 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1466 TCGv shift
= tcg_temp_new();
1467 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1468 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1469 tcg_temp_free(shift
);
1472 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1477 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1481 if (likely(rc
!= 31)) {
1484 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1486 TCGv shift
= tcg_temp_new();
1487 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1488 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1489 tcg_temp_free(shift
);
1492 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1497 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1501 if (likely(rc
!= 31)) {
1504 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1506 TCGv shift
= tcg_temp_new();
1507 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1508 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1509 tcg_temp_free(shift
);
1512 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1517 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1521 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1525 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1529 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1533 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1537 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1541 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1545 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1549 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1559 if (likely(rc
!= 31)) {
1561 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1564 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1566 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1567 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1573 if (likely(rc
!= 31)) {
1575 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1577 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1579 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1584 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1588 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1592 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1599 switch (fpfn
) { /* f11 & 0x3F */
1602 if (!(ctx
->amask
& AMASK_FIX
))
1604 if (likely(rc
!= 31)) {
1606 TCGv_i32 tmp
= tcg_temp_new_i32();
1607 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1608 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1609 tcg_temp_free_i32(tmp
);
1611 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1616 if (!(ctx
->amask
& AMASK_FIX
))
1622 if (!(ctx
->amask
& AMASK_FIX
))
1628 if (!(ctx
->amask
& AMASK_FIX
))
1630 if (likely(rc
!= 31)) {
1632 TCGv_i32 tmp
= tcg_temp_new_i32();
1633 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1634 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
1635 tcg_temp_free_i32(tmp
);
1637 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1642 if (!(ctx
->amask
& AMASK_FIX
))
1644 if (likely(rc
!= 31)) {
1646 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
1648 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1653 if (!(ctx
->amask
& AMASK_FIX
))
1659 if (!(ctx
->amask
& AMASK_FIX
))
1668 /* VAX floating point */
1669 /* XXX: rounding mode and trap are ignored (!) */
1670 switch (fpfn
) { /* f11 & 0x3F */
1673 gen_faddf(ra
, rb
, rc
);
1677 gen_fsubf(ra
, rb
, rc
);
1681 gen_fmulf(ra
, rb
, rc
);
1685 gen_fdivf(ra
, rb
, rc
);
1697 gen_faddg(ra
, rb
, rc
);
1701 gen_fsubg(ra
, rb
, rc
);
1705 gen_fmulg(ra
, rb
, rc
);
1709 gen_fdivg(ra
, rb
, rc
);
1713 gen_fcmpgeq(ra
, rb
, rc
);
1717 gen_fcmpglt(ra
, rb
, rc
);
1721 gen_fcmpgle(ra
, rb
, rc
);
1752 /* IEEE floating-point */
1753 /* XXX: rounding mode and traps are ignored (!) */
1754 switch (fpfn
) { /* f11 & 0x3F */
1757 gen_fadds(ra
, rb
, rc
);
1761 gen_fsubs(ra
, rb
, rc
);
1765 gen_fmuls(ra
, rb
, rc
);
1769 gen_fdivs(ra
, rb
, rc
);
1773 gen_faddt(ra
, rb
, rc
);
1777 gen_fsubt(ra
, rb
, rc
);
1781 gen_fmult(ra
, rb
, rc
);
1785 gen_fdivt(ra
, rb
, rc
);
1789 gen_fcmptun(ra
, rb
, rc
);
1793 gen_fcmpteq(ra
, rb
, rc
);
1797 gen_fcmptlt(ra
, rb
, rc
);
1801 gen_fcmptle(ra
, rb
, rc
);
1804 /* XXX: incorrect */
1805 if (fn11
== 0x2AC || fn11
== 0x6AC) {
1836 if (likely(rc
!= 31)) {
1839 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
1842 gen_fcpys(ra
, rb
, rc
);
1847 gen_fcpysn(ra
, rb
, rc
);
1851 gen_fcpyse(ra
, rb
, rc
);
1855 if (likely(ra
!= 31))
1856 gen_helper_store_fpcr(cpu_fir
[ra
]);
1858 TCGv tmp
= tcg_const_i64(0);
1859 gen_helper_store_fpcr(tmp
);
1865 if (likely(ra
!= 31))
1866 gen_helper_load_fpcr(cpu_fir
[ra
]);
1870 gen_fcmpfeq(ra
, rb
, rc
);
1874 gen_fcmpfne(ra
, rb
, rc
);
1878 gen_fcmpflt(ra
, rb
, rc
);
1882 gen_fcmpfge(ra
, rb
, rc
);
1886 gen_fcmpfle(ra
, rb
, rc
);
1890 gen_fcmpfgt(ra
, rb
, rc
);
1898 gen_fcvtqlv(rb
, rc
);
1902 gen_fcvtqlsv(rb
, rc
);
1909 switch ((uint16_t)disp16
) {
1912 /* No-op. Just exit from the current tb */
1917 /* No-op. Just exit from the current tb */
1939 gen_helper_load_pcc(cpu_ir
[ra
]);
1944 gen_helper_rc(cpu_ir
[ra
]);
1952 gen_helper_rs(cpu_ir
[ra
]);
1963 /* HW_MFPR (PALcode) */
1964 #if defined (CONFIG_USER_ONLY)
1970 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
1971 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
1978 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
1980 tcg_gen_movi_i64(cpu_pc
, 0);
1982 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
1983 /* Those four jumps only differ by the branch prediction hint */
2001 /* HW_LD (PALcode) */
2002 #if defined (CONFIG_USER_ONLY)
2008 TCGv addr
= tcg_temp_new();
2010 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2012 tcg_gen_movi_i64(addr
, disp12
);
2013 switch ((insn
>> 12) & 0xF) {
2015 /* Longword physical access (hw_ldl/p) */
2016 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2019 /* Quadword physical access (hw_ldq/p) */
2020 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2023 /* Longword physical access with lock (hw_ldl_l/p) */
2024 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
2027 /* Quadword physical access with lock (hw_ldq_l/p) */
2028 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
2031 /* Longword virtual PTE fetch (hw_ldl/v) */
2032 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2035 /* Quadword virtual PTE fetch (hw_ldq/v) */
2036 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2039 /* Incpu_ir[ra]id */
2042 /* Incpu_ir[ra]id */
2045 /* Longword virtual access (hw_ldl) */
2046 gen_helper_st_virt_to_phys(addr
, addr
);
2047 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2050 /* Quadword virtual access (hw_ldq) */
2051 gen_helper_st_virt_to_phys(addr
, addr
);
2052 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2055 /* Longword virtual access with protection check (hw_ldl/w) */
2056 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2059 /* Quadword virtual access with protection check (hw_ldq/w) */
2060 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2063 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2064 gen_helper_set_alt_mode();
2065 gen_helper_st_virt_to_phys(addr
, addr
);
2066 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2067 gen_helper_restore_mode();
2070 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2071 gen_helper_set_alt_mode();
2072 gen_helper_st_virt_to_phys(addr
, addr
);
2073 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2074 gen_helper_restore_mode();
2077 /* Longword virtual access with alternate access mode and
2078 * protection checks (hw_ldl/wa)
2080 gen_helper_set_alt_mode();
2081 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2082 gen_helper_restore_mode();
2085 /* Quadword virtual access with alternate access mode and
2086 * protection checks (hw_ldq/wa)
2088 gen_helper_set_alt_mode();
2089 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2090 gen_helper_restore_mode();
2093 tcg_temp_free(addr
);
2101 if (!(ctx
->amask
& AMASK_BWX
))
2103 if (likely(rc
!= 31)) {
2105 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2107 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2112 if (!(ctx
->amask
& AMASK_BWX
))
2114 if (likely(rc
!= 31)) {
2116 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2118 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2123 if (!(ctx
->amask
& AMASK_CIX
))
2125 if (likely(rc
!= 31)) {
2127 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2129 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2134 if (!(ctx
->amask
& AMASK_MVI
))
2136 gen_perr(ra
, rb
, rc
, islit
, lit
);
2140 if (!(ctx
->amask
& AMASK_CIX
))
2142 if (likely(rc
!= 31)) {
2144 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2146 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2151 if (!(ctx
->amask
& AMASK_CIX
))
2153 if (likely(rc
!= 31)) {
2155 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2157 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2162 if (!(ctx
->amask
& AMASK_MVI
))
2164 if (real_islit
|| ra
!= 31)
2166 gen_unpkbw (rb
, rc
);
2170 if (!(ctx
->amask
& AMASK_MVI
))
2172 if (real_islit
|| ra
!= 31)
2174 gen_unpkbl (rb
, rc
);
2178 if (!(ctx
->amask
& AMASK_MVI
))
2180 if (real_islit
|| ra
!= 31)
2186 if (!(ctx
->amask
& AMASK_MVI
))
2188 if (real_islit
|| ra
!= 31)
2194 if (!(ctx
->amask
& AMASK_MVI
))
2196 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2200 if (!(ctx
->amask
& AMASK_MVI
))
2202 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2206 if (!(ctx
->amask
& AMASK_MVI
))
2208 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2212 if (!(ctx
->amask
& AMASK_MVI
))
2214 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2218 if (!(ctx
->amask
& AMASK_MVI
))
2220 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2224 if (!(ctx
->amask
& AMASK_MVI
))
2226 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2230 if (!(ctx
->amask
& AMASK_MVI
))
2232 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2236 if (!(ctx
->amask
& AMASK_MVI
))
2238 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2242 if (!(ctx
->amask
& AMASK_FIX
))
2244 if (likely(rc
!= 31)) {
2246 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2248 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2253 if (!(ctx
->amask
& AMASK_FIX
))
2256 TCGv_i32 tmp1
= tcg_temp_new_i32();
2258 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2260 TCGv tmp2
= tcg_const_i64(0);
2261 gen_helper_s_to_memory(tmp1
, tmp2
);
2262 tcg_temp_free(tmp2
);
2264 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2265 tcg_temp_free_i32(tmp1
);
2273 /* HW_MTPR (PALcode) */
2274 #if defined (CONFIG_USER_ONLY)
2280 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2282 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2284 TCGv tmp2
= tcg_const_i64(0);
2285 gen_helper_mtpr(tmp1
, tmp2
);
2286 tcg_temp_free(tmp2
);
2288 tcg_temp_free(tmp1
);
2294 /* HW_REI (PALcode) */
2295 #if defined (CONFIG_USER_ONLY)
2302 gen_helper_hw_rei();
2307 tmp
= tcg_temp_new();
2308 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2310 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2311 gen_helper_hw_ret(tmp
);
2318 /* HW_ST (PALcode) */
2319 #if defined (CONFIG_USER_ONLY)
2326 addr
= tcg_temp_new();
2328 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2330 tcg_gen_movi_i64(addr
, disp12
);
2334 val
= tcg_temp_new();
2335 tcg_gen_movi_i64(val
, 0);
2337 switch ((insn
>> 12) & 0xF) {
2339 /* Longword physical access */
2340 gen_helper_stl_raw(val
, addr
);
2343 /* Quadword physical access */
2344 gen_helper_stq_raw(val
, addr
);
2347 /* Longword physical access with lock */
2348 gen_helper_stl_c_raw(val
, val
, addr
);
2351 /* Quadword physical access with lock */
2352 gen_helper_stq_c_raw(val
, val
, addr
);
2355 /* Longword virtual access */
2356 gen_helper_st_virt_to_phys(addr
, addr
);
2357 gen_helper_stl_raw(val
, addr
);
2360 /* Quadword virtual access */
2361 gen_helper_st_virt_to_phys(addr
, addr
);
2362 gen_helper_stq_raw(val
, addr
);
2383 /* Longword virtual access with alternate access mode */
2384 gen_helper_set_alt_mode();
2385 gen_helper_st_virt_to_phys(addr
, addr
);
2386 gen_helper_stl_raw(val
, addr
);
2387 gen_helper_restore_mode();
2390 /* Quadword virtual access with alternate access mode */
2391 gen_helper_set_alt_mode();
2392 gen_helper_st_virt_to_phys(addr
, addr
);
2393 gen_helper_stl_raw(val
, addr
);
2394 gen_helper_restore_mode();
2405 tcg_temp_free(addr
);
2411 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2415 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2419 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2423 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2427 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2431 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2435 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2439 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2443 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2447 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2451 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2455 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2459 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2463 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2467 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2471 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2476 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2477 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2480 case 0x31: /* FBEQ */
2481 case 0x32: /* FBLT */
2482 case 0x33: /* FBLE */
2483 gen_fbcond(ctx
, opc
, ra
, disp21
);
2489 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2490 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2493 case 0x35: /* FBNE */
2494 case 0x36: /* FBGE */
2495 case 0x37: /* FBGT */
2496 gen_fbcond(ctx
, opc
, ra
, disp21
);
2501 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2506 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2511 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2516 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2521 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2526 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2531 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2536 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2548 static inline void gen_intermediate_code_internal(CPUState
*env
,
2549 TranslationBlock
*tb
,
2552 DisasContext ctx
, *ctxp
= &ctx
;
2553 target_ulong pc_start
;
2555 uint16_t *gen_opc_end
;
2563 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2565 ctx
.amask
= env
->amask
;
2567 #if defined (CONFIG_USER_ONLY)
2570 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2571 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2574 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2576 max_insns
= CF_COUNT_MASK
;
2579 for (ret
= 0; ret
== 0;) {
2580 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2581 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2582 if (bp
->pc
== ctx
.pc
) {
2583 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2589 j
= gen_opc_ptr
- gen_opc_buf
;
2593 gen_opc_instr_start
[lj
++] = 0;
2595 gen_opc_pc
[lj
] = ctx
.pc
;
2596 gen_opc_instr_start
[lj
] = 1;
2597 gen_opc_icount
[lj
] = num_insns
;
2599 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
2601 insn
= ldl_code(ctx
.pc
);
2604 ret
= translate_one(ctxp
, insn
);
2607 /* if we reach a page boundary or are single stepping, stop
2610 if (env
->singlestep_enabled
) {
2611 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2615 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
2618 if (gen_opc_ptr
>= gen_opc_end
)
2621 if (num_insns
>= max_insns
)
2628 if (ret
!= 1 && ret
!= 3) {
2629 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2631 if (tb
->cflags
& CF_LAST_IO
)
2633 /* Generate the return instruction */
2635 gen_icount_end(tb
, num_insns
);
2636 *gen_opc_ptr
= INDEX_op_end
;
2638 j
= gen_opc_ptr
- gen_opc_buf
;
2641 gen_opc_instr_start
[lj
++] = 0;
2643 tb
->size
= ctx
.pc
- pc_start
;
2644 tb
->icount
= num_insns
;
2647 log_cpu_state_mask(CPU_LOG_TB_CPU
, env
, 0);
2648 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2649 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2650 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
2656 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
2658 gen_intermediate_code_internal(env
, tb
, 0);
2661 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
2663 gen_intermediate_code_internal(env
, tb
, 1);
2671 static const struct cpu_def_t cpu_defs
[] = {
2672 { "ev4", IMPLVER_2106x
, 0 },
2673 { "ev5", IMPLVER_21164
, 0 },
2674 { "ev56", IMPLVER_21164
, AMASK_BWX
},
2675 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
2676 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
2677 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2678 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
2679 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2680 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
2681 { "21064", IMPLVER_2106x
, 0 },
2682 { "21164", IMPLVER_21164
, 0 },
2683 { "21164a", IMPLVER_21164
, AMASK_BWX
},
2684 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
2685 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
2686 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
2687 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
2690 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
2694 int implver
, amask
, i
, max
;
2696 env
= qemu_mallocz(sizeof(CPUAlphaState
));
2698 alpha_translate_init();
2701 /* Default to ev67; no reason not to emulate insns by default. */
2702 implver
= IMPLVER_21264
;
2703 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
2704 | AMASK_TRAP
| AMASK_PREFETCH
);
2706 max
= ARRAY_SIZE(cpu_defs
);
2707 for (i
= 0; i
< max
; i
++) {
2708 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
2709 implver
= cpu_defs
[i
].implver
;
2710 amask
= cpu_defs
[i
].amask
;
2714 env
->implver
= implver
;
2718 #if defined (CONFIG_USER_ONLY)
2722 /* Initialize IPR */
2723 hwpcb
= env
->ipr
[IPR_PCBB
];
2724 env
->ipr
[IPR_ASN
] = 0;
2725 env
->ipr
[IPR_ASTEN
] = 0;
2726 env
->ipr
[IPR_ASTSR
] = 0;
2727 env
->ipr
[IPR_DATFX
] = 0;
2729 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
2730 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
2731 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
2732 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
2733 env
->ipr
[IPR_FEN
] = 0;
2734 env
->ipr
[IPR_IPL
] = 31;
2735 env
->ipr
[IPR_MCES
] = 0;
2736 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
2737 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
2738 env
->ipr
[IPR_SISR
] = 0;
2739 env
->ipr
[IPR_VIRBND
] = -1ULL;
2741 qemu_init_vcpu(env
);
2745 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
2746 unsigned long searched_pc
, int pc_pos
, void *puc
)
2748 env
->pc
= gen_opc_pc
[pc_pos
];