2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
50 #if !defined (CONFIG_USER_ONLY)
55 /* Current rounding mode for this TB. */
57 /* Current flush-to-zero setting for this TB. */
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
67 /* We have emitted one or more goto_tb. No fixup required. */
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
79 /* We are ending the TB with a noreturn function call, e.g. longjmp.
80 No following code will be executed. */
84 /* global register indexes */
85 static TCGv_ptr cpu_env
;
86 static TCGv cpu_ir
[31];
87 static TCGv cpu_fir
[31];
89 static TCGv cpu_lock_addr
;
90 static TCGv cpu_lock_st_addr
;
91 static TCGv cpu_lock_value
;
92 #ifdef CONFIG_USER_ONLY
97 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
99 #include "gen-icount.h"
101 static void alpha_translate_init(void)
105 static int done_init
= 0;
110 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
113 for (i
= 0; i
< 31; i
++) {
114 sprintf(p
, "ir%d", i
);
115 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
116 offsetof(CPUState
, ir
[i
]), p
);
117 p
+= (i
< 10) ? 4 : 5;
119 sprintf(p
, "fir%d", i
);
120 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
121 offsetof(CPUState
, fir
[i
]), p
);
122 p
+= (i
< 10) ? 5 : 6;
125 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
126 offsetof(CPUState
, pc
), "pc");
128 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
129 offsetof(CPUState
, lock_addr
),
131 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
132 offsetof(CPUState
, lock_st_addr
),
134 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
135 offsetof(CPUState
, lock_value
),
138 #ifdef CONFIG_USER_ONLY
139 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUState
, unique
), "uniq");
143 /* register helpers */
150 static void gen_excp_1(int exception
, int error_code
)
154 tmp1
= tcg_const_i32(exception
);
155 tmp2
= tcg_const_i32(error_code
);
156 gen_helper_excp(tmp1
, tmp2
);
157 tcg_temp_free_i32(tmp2
);
158 tcg_temp_free_i32(tmp1
);
161 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
163 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
164 gen_excp_1(exception
, error_code
);
165 return EXIT_NORETURN
;
168 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
170 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
173 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
175 TCGv tmp
= tcg_temp_new();
176 TCGv_i32 tmp32
= tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
178 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
179 gen_helper_memory_to_f(t0
, tmp32
);
180 tcg_temp_free_i32(tmp32
);
184 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
186 TCGv tmp
= tcg_temp_new();
187 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
188 gen_helper_memory_to_g(t0
, tmp
);
192 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
194 TCGv tmp
= tcg_temp_new();
195 TCGv_i32 tmp32
= tcg_temp_new_i32();
196 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
197 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
198 gen_helper_memory_to_s(t0
, tmp32
);
199 tcg_temp_free_i32(tmp32
);
203 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
205 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
206 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
207 tcg_gen_mov_i64(cpu_lock_value
, t0
);
210 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
212 tcg_gen_qemu_ld64(t0
, t1
, flags
);
213 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
214 tcg_gen_mov_i64(cpu_lock_value
, t0
);
217 static inline void gen_load_mem(DisasContext
*ctx
,
218 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
220 int ra
, int rb
, int32_t disp16
, int fp
,
225 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
226 prefetches, which we can treat as nops. No worries about
227 missed exceptions here. */
228 if (unlikely(ra
== 31)) {
232 addr
= tcg_temp_new();
234 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
236 tcg_gen_andi_i64(addr
, addr
, ~0x7);
242 tcg_gen_movi_i64(addr
, disp16
);
245 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
246 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
251 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
253 TCGv_i32 tmp32
= tcg_temp_new_i32();
254 TCGv tmp
= tcg_temp_new();
255 gen_helper_f_to_memory(tmp32
, t0
);
256 tcg_gen_extu_i32_i64(tmp
, tmp32
);
257 tcg_gen_qemu_st32(tmp
, t1
, flags
);
259 tcg_temp_free_i32(tmp32
);
262 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
264 TCGv tmp
= tcg_temp_new();
265 gen_helper_g_to_memory(tmp
, t0
);
266 tcg_gen_qemu_st64(tmp
, t1
, flags
);
270 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
272 TCGv_i32 tmp32
= tcg_temp_new_i32();
273 TCGv tmp
= tcg_temp_new();
274 gen_helper_s_to_memory(tmp32
, t0
);
275 tcg_gen_extu_i32_i64(tmp
, tmp32
);
276 tcg_gen_qemu_st32(tmp
, t1
, flags
);
278 tcg_temp_free_i32(tmp32
);
281 static inline void gen_store_mem(DisasContext
*ctx
,
282 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
284 int ra
, int rb
, int32_t disp16
, int fp
,
289 addr
= tcg_temp_new();
291 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
293 tcg_gen_andi_i64(addr
, addr
, ~0x7);
299 tcg_gen_movi_i64(addr
, disp16
);
303 va
= tcg_const_i64(0);
305 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
307 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
315 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
316 int32_t disp16
, int quad
)
321 /* ??? Don't bother storing anything. The user can't tell
322 the difference, since the zero register always reads zero. */
326 #if defined(CONFIG_USER_ONLY)
327 addr
= cpu_lock_st_addr
;
329 addr
= tcg_temp_local_new();
333 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
335 tcg_gen_movi_i64(addr
, disp16
);
338 #if defined(CONFIG_USER_ONLY)
339 /* ??? This is handled via a complicated version of compare-and-swap
340 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
341 in TCG so that this isn't necessary. */
342 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
344 /* ??? In system mode we are never multi-threaded, so CAS can be
345 implemented via a non-atomic load-compare-store sequence. */
347 int lab_fail
, lab_done
;
350 lab_fail
= gen_new_label();
351 lab_done
= gen_new_label();
352 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
354 val
= tcg_temp_new();
356 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
358 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
360 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
363 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
365 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
368 tcg_gen_br(lab_done
);
370 gen_set_label(lab_fail
);
371 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
373 gen_set_label(lab_done
);
374 tcg_gen_movi_i64(cpu_lock_addr
, -1);
382 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
384 /* Check for the dest on the same page as the start of the TB. We
385 also want to suppress goto_tb in the case of single-steping and IO. */
386 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
387 && !ctx
->env
->singlestep_enabled
388 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
391 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
393 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
396 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
399 /* Notice branch-to-next; used to initialize RA with the PC. */
402 } else if (use_goto_tb(ctx
, dest
)) {
404 tcg_gen_movi_i64(cpu_pc
, dest
);
405 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
408 tcg_gen_movi_i64(cpu_pc
, dest
);
409 return EXIT_PC_UPDATED
;
413 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
414 TCGv cmp
, int32_t disp
)
416 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
417 int lab_true
= gen_new_label();
419 if (use_goto_tb(ctx
, dest
)) {
420 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
423 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
424 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
426 gen_set_label(lab_true
);
428 tcg_gen_movi_i64(cpu_pc
, dest
);
429 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
433 int lab_over
= gen_new_label();
435 /* ??? Consider using either
438 movcond pc, cond, 0, tmp, pc
445 The current diamond subgraph surely isn't efficient. */
447 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
448 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
449 tcg_gen_br(lab_over
);
450 gen_set_label(lab_true
);
451 tcg_gen_movi_i64(cpu_pc
, dest
);
452 gen_set_label(lab_over
);
454 return EXIT_PC_UPDATED
;
458 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
459 int32_t disp
, int mask
)
463 if (unlikely(ra
== 31)) {
464 cmp_tmp
= tcg_const_i64(0);
466 cmp_tmp
= tcg_temp_new();
468 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
470 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
474 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
477 /* Fold -0.0 for comparison with COND. */
479 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
481 uint64_t mzero
= 1ull << 63;
486 /* For <= or >, the -0.0 value directly compares the way we want. */
487 tcg_gen_mov_i64(dest
, src
);
492 /* For == or !=, we can simply mask off the sign bit and compare. */
493 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
498 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
499 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
500 tcg_gen_neg_i64(dest
, dest
);
501 tcg_gen_and_i64(dest
, dest
, src
);
509 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
514 if (unlikely(ra
== 31)) {
515 /* Very uncommon case, but easier to optimize it to an integer
516 comparison than continuing with the floating point comparison. */
517 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
520 cmp_tmp
= tcg_temp_new();
521 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
522 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
525 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
526 int islit
, uint8_t lit
, int mask
)
528 TCGCond inv_cond
= tcg_invert_cond(cond
);
531 if (unlikely(rc
== 31))
534 l1
= gen_new_label();
538 TCGv tmp
= tcg_temp_new();
539 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
540 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
543 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
545 /* Very uncommon case - Do not bother to optimize. */
546 TCGv tmp
= tcg_const_i64(0);
547 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
552 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
554 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
558 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
563 if (unlikely(rc
== 31)) {
567 cmp_tmp
= tcg_temp_new();
568 if (unlikely(ra
== 31)) {
569 tcg_gen_movi_i64(cmp_tmp
, 0);
571 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
574 l1
= gen_new_label();
575 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
576 tcg_temp_free(cmp_tmp
);
579 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
581 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
585 #define QUAL_RM_N 0x080 /* Round mode nearest even */
586 #define QUAL_RM_C 0x000 /* Round mode chopped */
587 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
588 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
589 #define QUAL_RM_MASK 0x0c0
591 #define QUAL_U 0x100 /* Underflow enable (fp output) */
592 #define QUAL_V 0x100 /* Overflow enable (int output) */
593 #define QUAL_S 0x400 /* Software completion enable */
594 #define QUAL_I 0x200 /* Inexact detection enable */
596 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
600 fn11
&= QUAL_RM_MASK
;
601 if (fn11
== ctx
->tb_rm
) {
606 tmp
= tcg_temp_new_i32();
609 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
612 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
615 tcg_gen_movi_i32(tmp
, float_round_down
);
618 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
624 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
625 sets the one field. */
626 tcg_gen_st8_i32(tmp
, cpu_env
,
627 offsetof(CPUState
, fp_status
.float_rounding_mode
));
629 gen_helper_setroundmode(tmp
);
632 tcg_temp_free_i32(tmp
);
635 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
640 if (fn11
== ctx
->tb_ftz
) {
645 tmp
= tcg_temp_new_i32();
647 /* Underflow is enabled, use the FPCR setting. */
648 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
650 /* Underflow is disabled, force flush-to-zero. */
651 tcg_gen_movi_i32(tmp
, 1);
654 #if defined(CONFIG_SOFTFLOAT_INLINE)
655 tcg_gen_st8_i32(tmp
, cpu_env
,
656 offsetof(CPUState
, fp_status
.flush_to_zero
));
658 gen_helper_setflushzero(tmp
);
661 tcg_temp_free_i32(tmp
);
664 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
666 TCGv val
= tcg_temp_new();
668 tcg_gen_movi_i64(val
, 0);
669 } else if (fn11
& QUAL_S
) {
670 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
672 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
674 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
679 static void gen_fp_exc_clear(void)
681 #if defined(CONFIG_SOFTFLOAT_INLINE)
682 TCGv_i32 zero
= tcg_const_i32(0);
683 tcg_gen_st8_i32(zero
, cpu_env
,
684 offsetof(CPUState
, fp_status
.float_exception_flags
));
685 tcg_temp_free_i32(zero
);
687 gen_helper_fp_exc_clear();
691 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
693 /* ??? We ought to be able to do something with imprecise exceptions.
694 E.g. notice we're still in the trap shadow of something within the
695 TB and do not generate the code to signal the exception; end the TB
696 when an exception is forced to arrive, either by consumption of a
697 register value or TRAPB or EXCB. */
698 TCGv_i32 exc
= tcg_temp_new_i32();
701 #if defined(CONFIG_SOFTFLOAT_INLINE)
702 tcg_gen_ld8u_i32(exc
, cpu_env
,
703 offsetof(CPUState
, fp_status
.float_exception_flags
));
705 gen_helper_fp_exc_get(exc
);
709 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
712 /* ??? Pass in the regno of the destination so that the helper can
713 set EXC_MASK, which contains a bitmask of destination registers
714 that have caused arithmetic traps. A simple userspace emulation
715 does not require this. We do need it for a guest kernel's entArith,
716 or if we were to do something clever with imprecise exceptions. */
717 reg
= tcg_const_i32(rc
+ 32);
720 gen_helper_fp_exc_raise_s(exc
, reg
);
722 gen_helper_fp_exc_raise(exc
, reg
);
725 tcg_temp_free_i32(reg
);
726 tcg_temp_free_i32(exc
);
729 static inline void gen_fp_exc_raise(int rc
, int fn11
)
731 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
734 static void gen_fcvtlq(int rb
, int rc
)
736 if (unlikely(rc
== 31)) {
739 if (unlikely(rb
== 31)) {
740 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
742 TCGv tmp
= tcg_temp_new();
744 /* The arithmetic right shift here, plus the sign-extended mask below
745 yields a sign-extended result without an explicit ext32s_i64. */
746 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
747 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
748 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
749 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
750 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
756 static void gen_fcvtql(int rb
, int rc
)
758 if (unlikely(rc
== 31)) {
761 if (unlikely(rb
== 31)) {
762 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
764 TCGv tmp
= tcg_temp_new();
766 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
767 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
768 tcg_gen_shli_i64(tmp
, tmp
, 32);
769 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
770 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
776 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
779 int lab
= gen_new_label();
780 TCGv tmp
= tcg_temp_new();
782 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
783 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
784 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
791 #define FARITH2(name) \
792 static inline void glue(gen_f, name)(int rb, int rc) \
794 if (unlikely(rc == 31)) { \
798 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
800 TCGv tmp = tcg_const_i64(0); \
801 gen_helper_ ## name (cpu_fir[rc], tmp); \
802 tcg_temp_free(tmp); \
806 /* ??? VAX instruction qualifiers ignored. */
814 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
815 int rb
, int rc
, int fn11
)
819 /* ??? This is wrong: the instruction is not a nop, it still may
821 if (unlikely(rc
== 31)) {
825 gen_qual_roundmode(ctx
, fn11
);
826 gen_qual_flushzero(ctx
, fn11
);
829 vb
= gen_ieee_input(rb
, fn11
, 0);
830 helper(cpu_fir
[rc
], vb
);
833 gen_fp_exc_raise(rc
, fn11
);
836 #define IEEE_ARITH2(name) \
837 static inline void glue(gen_f, name)(DisasContext *ctx, \
838 int rb, int rc, int fn11) \
840 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
847 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
852 /* ??? This is wrong: the instruction is not a nop, it still may
854 if (unlikely(rc
== 31)) {
858 /* No need to set flushzero, since we have an integer output. */
860 vb
= gen_ieee_input(rb
, fn11
, 0);
862 /* Almost all integer conversions use cropped rounding, and most
863 also do not have integer overflow enabled. Special case that. */
866 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
868 case QUAL_V
| QUAL_RM_C
:
869 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
870 ignore
= float_flag_inexact
;
872 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
873 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
876 gen_qual_roundmode(ctx
, fn11
);
877 gen_helper_cvttq(cpu_fir
[rc
], vb
);
878 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
879 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
884 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
887 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
888 int rb
, int rc
, int fn11
)
892 /* ??? This is wrong: the instruction is not a nop, it still may
894 if (unlikely(rc
== 31)) {
898 gen_qual_roundmode(ctx
, fn11
);
901 vb
= tcg_const_i64(0);
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
911 helper(cpu_fir
[rc
], vb
);
912 gen_fp_exc_raise(rc
, fn11
);
914 helper(cpu_fir
[rc
], vb
);
922 #define IEEE_INTCVT(name) \
923 static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
931 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
936 if (unlikely(rc
== 31)) {
940 vmask
= tcg_const_i64(mask
);
950 va
= tcg_temp_new_i64();
951 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
953 tcg_gen_andc_i64(va
, vmask
, va
);
955 tcg_gen_and_i64(va
, va
, vmask
);
963 vb
= tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
967 switch (za
<< 1 | zb
) {
969 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
972 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
975 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
978 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
982 tcg_temp_free(vmask
);
991 static inline void gen_fcpys(int ra
, int rb
, int rc
)
993 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
996 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
998 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1001 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1003 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1006 #define FARITH3(name) \
1007 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1011 if (unlikely(rc == 31)) { \
1015 va = tcg_const_i64(0); \
1020 vb = tcg_const_i64(0); \
1025 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1028 tcg_temp_free(va); \
1031 tcg_temp_free(vb); \
1035 /* ??? VAX instruction qualifiers ignored. */
1048 static void gen_ieee_arith3(DisasContext
*ctx
,
1049 void (*helper
)(TCGv
, TCGv
, TCGv
),
1050 int ra
, int rb
, int rc
, int fn11
)
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc
== 31)) {
1060 gen_qual_roundmode(ctx
, fn11
);
1061 gen_qual_flushzero(ctx
, fn11
);
1064 va
= gen_ieee_input(ra
, fn11
, 0);
1065 vb
= gen_ieee_input(rb
, fn11
, 0);
1066 helper(cpu_fir
[rc
], va
, vb
);
1070 gen_fp_exc_raise(rc
, fn11
);
1073 #define IEEE_ARITH3(name) \
1074 static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1088 static void gen_ieee_compare(DisasContext
*ctx
,
1089 void (*helper
)(TCGv
, TCGv
, TCGv
),
1090 int ra
, int rb
, int rc
, int fn11
)
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc
== 31)) {
1102 va
= gen_ieee_input(ra
, fn11
, 1);
1103 vb
= gen_ieee_input(rb
, fn11
, 1);
1104 helper(cpu_fir
[rc
], va
, vb
);
1108 gen_fp_exc_raise(rc
, fn11
);
1111 #define IEEE_CMP3(name) \
1112 static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1122 static inline uint64_t zapnot_mask(uint8_t lit
)
1127 for (i
= 0; i
< 8; ++i
) {
1129 mask
|= 0xffull
<< (i
* 8);
1134 /* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
1137 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1141 tcg_gen_movi_i64(dest
, 0);
1144 tcg_gen_ext8u_i64(dest
, src
);
1147 tcg_gen_ext16u_i64(dest
, src
);
1150 tcg_gen_ext32u_i64(dest
, src
);
1153 tcg_gen_mov_i64(dest
, src
);
1156 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1161 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1163 if (unlikely(rc
== 31))
1165 else if (unlikely(ra
== 31))
1166 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1168 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1170 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1173 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1175 if (unlikely(rc
== 31))
1177 else if (unlikely(ra
== 31))
1178 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1180 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1182 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1186 /* EXTWH, EXTLH, EXTQH */
1187 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1188 uint8_t lit
, uint8_t byte_mask
)
1190 if (unlikely(rc
== 31))
1192 else if (unlikely(ra
== 31))
1193 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1196 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1199 TCGv tmp1
= tcg_temp_new();
1200 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1201 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1202 tcg_gen_neg_i64(tmp1
, tmp1
);
1203 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1204 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1205 tcg_temp_free(tmp1
);
1207 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1211 /* EXTBL, EXTWL, EXTLL, EXTQL */
1212 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1213 uint8_t lit
, uint8_t byte_mask
)
1215 if (unlikely(rc
== 31))
1217 else if (unlikely(ra
== 31))
1218 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1221 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1223 TCGv tmp
= tcg_temp_new();
1224 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1225 tcg_gen_shli_i64(tmp
, tmp
, 3);
1226 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1229 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1233 /* INSWH, INSLH, INSQH */
1234 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1235 uint8_t lit
, uint8_t byte_mask
)
1237 if (unlikely(rc
== 31))
1239 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1242 TCGv tmp
= tcg_temp_new();
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1248 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1254 TCGv shift
= tcg_temp_new();
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1262 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1263 tcg_gen_shli_i64(shift
, shift
, 3);
1264 tcg_gen_not_i64(shift
, shift
);
1265 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1267 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1268 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1269 tcg_temp_free(shift
);
1275 /* INSBL, INSWL, INSLL, INSQL */
1276 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1277 uint8_t lit
, uint8_t byte_mask
)
1279 if (unlikely(rc
== 31))
1281 else if (unlikely(ra
== 31))
1282 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1284 TCGv tmp
= tcg_temp_new();
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1293 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1295 TCGv shift
= tcg_temp_new();
1296 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1297 tcg_gen_shli_i64(shift
, shift
, 3);
1298 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1299 tcg_temp_free(shift
);
1305 /* MSKWH, MSKLH, MSKQH */
1306 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1307 uint8_t lit
, uint8_t byte_mask
)
1309 if (unlikely(rc
== 31))
1311 else if (unlikely(ra
== 31))
1312 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1314 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1316 TCGv shift
= tcg_temp_new();
1317 TCGv mask
= tcg_temp_new();
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1328 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1329 tcg_gen_shli_i64(shift
, shift
, 3);
1330 tcg_gen_not_i64(shift
, shift
);
1331 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1332 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1333 tcg_gen_shr_i64(mask
, mask
, shift
);
1334 tcg_gen_shri_i64(mask
, mask
, 1);
1336 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1338 tcg_temp_free(mask
);
1339 tcg_temp_free(shift
);
1343 /* MSKBL, MSKWL, MSKLL, MSKQL */
1344 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1345 uint8_t lit
, uint8_t byte_mask
)
1347 if (unlikely(rc
== 31))
1349 else if (unlikely(ra
== 31))
1350 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1352 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1354 TCGv shift
= tcg_temp_new();
1355 TCGv mask
= tcg_temp_new();
1357 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1358 tcg_gen_shli_i64(shift
, shift
, 3);
1359 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1360 tcg_gen_shl_i64(mask
, mask
, shift
);
1362 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1364 tcg_temp_free(mask
);
1365 tcg_temp_free(shift
);
1369 /* Code to call arith3 helpers */
1370 #define ARITH3(name) \
1371 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374 if (unlikely(rc == 31)) \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1385 TCGv tmp1 = tcg_const_i64(0); \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1413 #define MVIOP2(name) \
1414 static inline void glue(gen_, name)(int rb, int rc) \
1416 if (unlikely(rc == 31)) \
1418 if (unlikely(rb == 31)) \
1419 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1421 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1428 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1429 int islit
, uint8_t lit
)
1433 if (unlikely(rc
== 31)) {
1438 va
= tcg_const_i64(0);
1443 vb
= tcg_const_i64(lit
);
1448 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1458 static void gen_rx(int ra
, int set
)
1463 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1466 tmp
= tcg_const_i32(set
);
1467 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1468 tcg_temp_free_i32(tmp
);
1471 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1474 int32_t disp21
, disp16
, disp12
;
1476 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, fn2
, islit
, real_islit
;
1480 /* Decode all instruction fields */
1482 ra
= (insn
>> 21) & 0x1F;
1483 rb
= (insn
>> 16) & 0x1F;
1485 real_islit
= islit
= (insn
>> 12) & 1;
1486 if (rb
== 31 && !islit
) {
1490 lit
= (insn
>> 13) & 0xFF;
1491 palcode
= insn
& 0x03FFFFFF;
1492 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1493 disp16
= (int16_t)(insn
& 0x0000FFFF);
1494 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1495 fn11
= (insn
>> 5) & 0x000007FF;
1497 fn7
= (insn
>> 5) & 0x0000007F;
1498 fn2
= (insn
>> 5) & 0x00000003;
1499 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1500 opc
, ra
, rb
, rc
, disp16
);
1506 #ifdef CONFIG_USER_ONLY
1507 if (palcode
== 0x9E) {
1509 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
1511 } else if (palcode
== 0x9F) {
1513 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
1517 if (palcode
>= 0x80 && palcode
< 0xC0) {
1518 /* Unprivileged PAL call */
1519 ret
= gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
1522 #ifndef CONFIG_USER_ONLY
1523 if (palcode
< 0x40) {
1524 /* Privileged PAL code */
1525 if (ctx
->mem_idx
!= MMU_KERNEL_IDX
) {
1528 ret
= gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
1531 /* Invalid PAL call */
1556 if (likely(ra
!= 31)) {
1558 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1560 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1565 if (likely(ra
!= 31)) {
1567 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1569 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1574 if (!(ctx
->amask
& AMASK_BWX
))
1576 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1580 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1584 if (!(ctx
->amask
& AMASK_BWX
))
1586 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1590 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1594 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1598 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1604 if (likely(rc
!= 31)) {
1607 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1608 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1610 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1611 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1615 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1617 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1623 if (likely(rc
!= 31)) {
1625 TCGv tmp
= tcg_temp_new();
1626 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1628 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1630 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1631 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1635 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1637 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1643 if (likely(rc
!= 31)) {
1646 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1648 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1649 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1652 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1654 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1655 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1661 if (likely(rc
!= 31)) {
1663 TCGv tmp
= tcg_temp_new();
1664 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1666 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1668 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1669 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1673 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1675 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1676 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1683 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1687 if (likely(rc
!= 31)) {
1689 TCGv tmp
= tcg_temp_new();
1690 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1692 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1694 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1695 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1699 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1701 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1707 if (likely(rc
!= 31)) {
1709 TCGv tmp
= tcg_temp_new();
1710 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1712 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1714 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1715 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1719 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1721 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1722 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1729 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1733 if (likely(rc
!= 31)) {
1736 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1738 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1741 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1743 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1749 if (likely(rc
!= 31)) {
1751 TCGv tmp
= tcg_temp_new();
1752 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1754 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1756 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1760 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1762 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1768 if (likely(rc
!= 31)) {
1771 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1773 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1776 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1778 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1784 if (likely(rc
!= 31)) {
1786 TCGv tmp
= tcg_temp_new();
1787 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1789 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1791 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1795 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1797 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1803 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1807 if (likely(rc
!= 31)) {
1809 TCGv tmp
= tcg_temp_new();
1810 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1812 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1814 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1818 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1820 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1826 if (likely(rc
!= 31)) {
1828 TCGv tmp
= tcg_temp_new();
1829 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1831 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1833 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1837 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1839 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1845 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1849 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1853 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1857 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1861 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1865 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1869 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1879 if (likely(rc
!= 31)) {
1881 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1883 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1885 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1890 if (likely(rc
!= 31)) {
1893 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1895 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1897 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1902 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1906 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1910 if (likely(rc
!= 31)) {
1913 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1915 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1918 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1920 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1926 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1930 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1934 if (likely(rc
!= 31)) {
1937 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1939 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1942 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1944 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1950 if (likely(rc
!= 31)) {
1953 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1955 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1958 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1960 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1966 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1970 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1974 if (likely(rc
!= 31)) {
1977 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1979 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1982 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1984 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1990 if (likely(rc
!= 31)) {
1992 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1994 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1995 switch (ctx
->env
->implver
) {
1997 /* EV4, EV45, LCA, LCA45 & EV5 */
2002 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
2003 ~(uint64_t)ctx
->amask
);
2010 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2014 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2019 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2029 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2033 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2037 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2041 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2045 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2049 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2053 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2057 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2061 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2065 gen_zap(ra
, rb
, rc
, islit
, lit
);
2069 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2073 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2077 if (likely(rc
!= 31)) {
2080 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2082 TCGv shift
= tcg_temp_new();
2083 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2084 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2085 tcg_temp_free(shift
);
2088 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2093 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2097 if (likely(rc
!= 31)) {
2100 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2102 TCGv shift
= tcg_temp_new();
2103 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2104 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2105 tcg_temp_free(shift
);
2108 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2113 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2117 if (likely(rc
!= 31)) {
2120 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2122 TCGv shift
= tcg_temp_new();
2123 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2124 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2125 tcg_temp_free(shift
);
2128 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2133 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2137 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2141 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2145 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2149 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2153 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2157 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2161 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2165 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2175 if (likely(rc
!= 31)) {
2177 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2180 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2182 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2183 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2189 if (likely(rc
!= 31)) {
2191 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2193 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2195 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2200 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2204 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2208 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2215 switch (fpfn
) { /* fn11 & 0x3F */
2218 if (!(ctx
->amask
& AMASK_FIX
))
2220 if (likely(rc
!= 31)) {
2222 TCGv_i32 tmp
= tcg_temp_new_i32();
2223 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2224 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2225 tcg_temp_free_i32(tmp
);
2227 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2232 if (!(ctx
->amask
& AMASK_FIX
))
2238 if (!(ctx
->amask
& AMASK_FIX
))
2240 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2244 if (!(ctx
->amask
& AMASK_FIX
))
2246 if (likely(rc
!= 31)) {
2248 TCGv_i32 tmp
= tcg_temp_new_i32();
2249 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2250 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2251 tcg_temp_free_i32(tmp
);
2253 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2258 if (!(ctx
->amask
& AMASK_FIX
))
2260 if (likely(rc
!= 31)) {
2262 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2264 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2269 if (!(ctx
->amask
& AMASK_FIX
))
2275 if (!(ctx
->amask
& AMASK_FIX
))
2277 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2284 /* VAX floating point */
2285 /* XXX: rounding mode and trap are ignored (!) */
2286 switch (fpfn
) { /* fn11 & 0x3F */
2289 gen_faddf(ra
, rb
, rc
);
2293 gen_fsubf(ra
, rb
, rc
);
2297 gen_fmulf(ra
, rb
, rc
);
2301 gen_fdivf(ra
, rb
, rc
);
2313 gen_faddg(ra
, rb
, rc
);
2317 gen_fsubg(ra
, rb
, rc
);
2321 gen_fmulg(ra
, rb
, rc
);
2325 gen_fdivg(ra
, rb
, rc
);
2329 gen_fcmpgeq(ra
, rb
, rc
);
2333 gen_fcmpglt(ra
, rb
, rc
);
2337 gen_fcmpgle(ra
, rb
, rc
);
2368 /* IEEE floating-point */
2369 switch (fpfn
) { /* fn11 & 0x3F */
2372 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2376 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2380 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2384 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2388 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2392 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2396 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2400 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2404 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2408 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2412 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2416 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2419 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2421 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2424 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2429 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2433 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2437 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2450 if (likely(rc
!= 31)) {
2454 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2456 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2459 gen_fcpys(ra
, rb
, rc
);
2465 gen_fcpysn(ra
, rb
, rc
);
2469 gen_fcpyse(ra
, rb
, rc
);
2473 if (likely(ra
!= 31))
2474 gen_helper_store_fpcr(cpu_fir
[ra
]);
2476 TCGv tmp
= tcg_const_i64(0);
2477 gen_helper_store_fpcr(tmp
);
2483 if (likely(ra
!= 31))
2484 gen_helper_load_fpcr(cpu_fir
[ra
]);
2488 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2492 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2496 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2500 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2504 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2508 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2518 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2519 /v doesn't do. The only thing I can think is that /sv is a
2520 valid instruction merely for completeness in the ISA. */
2521 gen_fcvtql_v(ctx
, rb
, rc
);
2528 switch ((uint16_t)disp16
) {
2556 gen_helper_load_pcc(cpu_ir
[ra
]);
2578 /* HW_MFPR (PALcode) */
2579 #if defined (CONFIG_USER_ONLY)
2585 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
2586 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
2592 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2593 prediction stack action, which of course we don't implement. */
2595 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2597 tcg_gen_movi_i64(cpu_pc
, 0);
2600 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2602 ret
= EXIT_PC_UPDATED
;
2605 /* HW_LD (PALcode) */
2606 #if defined (CONFIG_USER_ONLY)
2612 TCGv addr
= tcg_temp_new();
2614 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2616 tcg_gen_movi_i64(addr
, disp12
);
2617 switch ((insn
>> 12) & 0xF) {
2619 /* Longword physical access (hw_ldl/p) */
2620 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2623 /* Quadword physical access (hw_ldq/p) */
2624 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2627 /* Longword physical access with lock (hw_ldl_l/p) */
2628 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2631 /* Quadword physical access with lock (hw_ldq_l/p) */
2632 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2635 /* Longword virtual PTE fetch (hw_ldl/v) */
2638 /* Quadword virtual PTE fetch (hw_ldq/v) */
2642 /* Incpu_ir[ra]id */
2645 /* Incpu_ir[ra]id */
2648 /* Longword virtual access (hw_ldl) */
2651 /* Quadword virtual access (hw_ldq) */
2654 /* Longword virtual access with protection check (hw_ldl/w) */
2655 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2658 /* Quadword virtual access with protection check (hw_ldq/w) */
2659 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2662 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2665 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2668 /* Longword virtual access with alternate access mode and
2669 protection checks (hw_ldl/wa) */
2670 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2673 /* Quadword virtual access with alternate access mode and
2674 protection checks (hw_ldq/wa) */
2675 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2678 tcg_temp_free(addr
);
2686 if (!(ctx
->amask
& AMASK_BWX
))
2688 if (likely(rc
!= 31)) {
2690 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2692 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2697 if (!(ctx
->amask
& AMASK_BWX
))
2699 if (likely(rc
!= 31)) {
2701 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2703 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2708 if (!(ctx
->amask
& AMASK_CIX
))
2710 if (likely(rc
!= 31)) {
2712 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2714 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2719 if (!(ctx
->amask
& AMASK_MVI
))
2721 gen_perr(ra
, rb
, rc
, islit
, lit
);
2725 if (!(ctx
->amask
& AMASK_CIX
))
2727 if (likely(rc
!= 31)) {
2729 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2731 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2736 if (!(ctx
->amask
& AMASK_CIX
))
2738 if (likely(rc
!= 31)) {
2740 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2742 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2747 if (!(ctx
->amask
& AMASK_MVI
))
2749 if (real_islit
|| ra
!= 31)
2751 gen_unpkbw (rb
, rc
);
2755 if (!(ctx
->amask
& AMASK_MVI
))
2757 if (real_islit
|| ra
!= 31)
2759 gen_unpkbl (rb
, rc
);
2763 if (!(ctx
->amask
& AMASK_MVI
))
2765 if (real_islit
|| ra
!= 31)
2771 if (!(ctx
->amask
& AMASK_MVI
))
2773 if (real_islit
|| ra
!= 31)
2779 if (!(ctx
->amask
& AMASK_MVI
))
2781 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2785 if (!(ctx
->amask
& AMASK_MVI
))
2787 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2791 if (!(ctx
->amask
& AMASK_MVI
))
2793 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2797 if (!(ctx
->amask
& AMASK_MVI
))
2799 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2803 if (!(ctx
->amask
& AMASK_MVI
))
2805 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2809 if (!(ctx
->amask
& AMASK_MVI
))
2811 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2815 if (!(ctx
->amask
& AMASK_MVI
))
2817 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2821 if (!(ctx
->amask
& AMASK_MVI
))
2823 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2827 if (!(ctx
->amask
& AMASK_FIX
))
2829 if (likely(rc
!= 31)) {
2831 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2833 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2838 if (!(ctx
->amask
& AMASK_FIX
))
2841 TCGv_i32 tmp1
= tcg_temp_new_i32();
2843 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2845 TCGv tmp2
= tcg_const_i64(0);
2846 gen_helper_s_to_memory(tmp1
, tmp2
);
2847 tcg_temp_free(tmp2
);
2849 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2850 tcg_temp_free_i32(tmp1
);
2858 /* HW_MTPR (PALcode) */
2859 #if defined (CONFIG_USER_ONLY)
2865 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2867 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2869 TCGv tmp2
= tcg_const_i64(0);
2870 gen_helper_mtpr(tmp1
, tmp2
);
2871 tcg_temp_free(tmp2
);
2873 tcg_temp_free(tmp1
);
2874 ret
= EXIT_PC_STALE
;
2879 /* HW_REI (PALcode) */
2880 #if defined (CONFIG_USER_ONLY)
2887 gen_helper_hw_rei();
2892 tmp
= tcg_temp_new();
2893 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2895 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2896 gen_helper_hw_ret(tmp
);
2899 ret
= EXIT_PC_UPDATED
;
2903 /* HW_ST (PALcode) */
2904 #if defined (CONFIG_USER_ONLY)
2911 addr
= tcg_temp_new();
2913 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2915 tcg_gen_movi_i64(addr
, disp12
);
2919 val
= tcg_temp_new();
2920 tcg_gen_movi_i64(val
, 0);
2922 switch ((insn
>> 12) & 0xF) {
2924 /* Longword physical access */
2925 gen_helper_stl_phys(addr
, val
);
2928 /* Quadword physical access */
2929 gen_helper_stq_phys(addr
, val
);
2932 /* Longword physical access with lock */
2933 gen_helper_stl_c_phys(val
, addr
, val
);
2936 /* Quadword physical access with lock */
2937 gen_helper_stq_c_phys(val
, addr
, val
);
2940 /* Longword virtual access */
2943 /* Quadword virtual access */
2964 /* Longword virtual access with alternate access mode */
2967 /* Quadword virtual access with alternate access mode */
2978 tcg_temp_free(addr
);
2984 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2988 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2992 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2996 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3000 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3004 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3008 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3012 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3016 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3020 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3024 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3028 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3032 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3036 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3040 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3044 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3048 ret
= gen_bdirect(ctx
, ra
, disp21
);
3050 case 0x31: /* FBEQ */
3051 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3053 case 0x32: /* FBLT */
3054 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3056 case 0x33: /* FBLE */
3057 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3061 ret
= gen_bdirect(ctx
, ra
, disp21
);
3063 case 0x35: /* FBNE */
3064 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3066 case 0x36: /* FBGE */
3067 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3069 case 0x37: /* FBGT */
3070 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3074 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3078 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3082 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3086 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3090 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3094 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3098 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3102 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3105 ret
= gen_invalid(ctx
);
3112 static inline void gen_intermediate_code_internal(CPUState
*env
,
3113 TranslationBlock
*tb
,
3116 DisasContext ctx
, *ctxp
= &ctx
;
3117 target_ulong pc_start
;
3119 uint16_t *gen_opc_end
;
3127 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3132 ctx
.amask
= env
->amask
;
3133 #if defined (CONFIG_USER_ONLY)
3136 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
3137 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
3140 /* ??? Every TB begins with unset rounding mode, to be initialized on
3141 the first fp insn of the TB. Alternately we could define a proper
3142 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3143 to reset the FP_STATUS to that default at the end of any TB that
3144 changes the default. We could even (gasp) dynamiclly figure out
3145 what default would be most efficient given the running program. */
3147 /* Similarly for flush-to-zero. */
3151 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3153 max_insns
= CF_COUNT_MASK
;
3157 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3158 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3159 if (bp
->pc
== ctx
.pc
) {
3160 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3166 j
= gen_opc_ptr
- gen_opc_buf
;
3170 gen_opc_instr_start
[lj
++] = 0;
3172 gen_opc_pc
[lj
] = ctx
.pc
;
3173 gen_opc_instr_start
[lj
] = 1;
3174 gen_opc_icount
[lj
] = num_insns
;
3176 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3178 insn
= ldl_code(ctx
.pc
);
3181 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3182 tcg_gen_debug_insn_start(ctx
.pc
);
3186 ret
= translate_one(ctxp
, insn
);
3188 /* If we reach a page boundary, are single stepping,
3189 or exhaust instruction count, stop generation. */
3191 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3192 || gen_opc_ptr
>= gen_opc_end
3193 || num_insns
>= max_insns
3195 || env
->singlestep_enabled
)) {
3196 ret
= EXIT_PC_STALE
;
3198 } while (ret
== NO_EXIT
);
3200 if (tb
->cflags
& CF_LAST_IO
) {
3209 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3211 case EXIT_PC_UPDATED
:
3212 if (env
->singlestep_enabled
) {
3213 gen_excp_1(EXCP_DEBUG
, 0);
3222 gen_icount_end(tb
, num_insns
);
3223 *gen_opc_ptr
= INDEX_op_end
;
3225 j
= gen_opc_ptr
- gen_opc_buf
;
3228 gen_opc_instr_start
[lj
++] = 0;
3230 tb
->size
= ctx
.pc
- pc_start
;
3231 tb
->icount
= num_insns
;
3235 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3236 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3237 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3243 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3245 gen_intermediate_code_internal(env
, tb
, 0);
3248 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3250 gen_intermediate_code_internal(env
, tb
, 1);
3258 static const struct cpu_def_t cpu_defs
[] = {
3259 { "ev4", IMPLVER_2106x
, 0 },
3260 { "ev5", IMPLVER_21164
, 0 },
3261 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3262 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3263 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3264 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3265 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3266 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3267 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3268 { "21064", IMPLVER_2106x
, 0 },
3269 { "21164", IMPLVER_21164
, 0 },
3270 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3271 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3272 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3273 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3274 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3277 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3280 int implver
, amask
, i
, max
;
3282 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3284 alpha_translate_init();
3287 /* Default to ev67; no reason not to emulate insns by default. */
3288 implver
= IMPLVER_21264
;
3289 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3290 | AMASK_TRAP
| AMASK_PREFETCH
);
3292 max
= ARRAY_SIZE(cpu_defs
);
3293 for (i
= 0; i
< max
; i
++) {
3294 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3295 implver
= cpu_defs
[i
].implver
;
3296 amask
= cpu_defs
[i
].amask
;
3300 env
->implver
= implver
;
3304 #if defined (CONFIG_USER_ONLY)
3306 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3307 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3309 env
->lock_addr
= -1;
3311 /* Initialize IPR */
3312 #if defined (CONFIG_USER_ONLY)
3313 env
->ipr
[IPR_EXC_ADDR
] = 0;
3314 env
->ipr
[IPR_EXC_SUM
] = 0;
3315 env
->ipr
[IPR_EXC_MASK
] = 0;
3319 // hwpcb = env->ipr[IPR_PCBB];
3320 env
->ipr
[IPR_ASN
] = 0;
3321 env
->ipr
[IPR_ASTEN
] = 0;
3322 env
->ipr
[IPR_ASTSR
] = 0;
3323 env
->ipr
[IPR_DATFX
] = 0;
3325 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3326 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3327 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3328 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3329 env
->ipr
[IPR_FEN
] = 0;
3330 env
->ipr
[IPR_IPL
] = 31;
3331 env
->ipr
[IPR_MCES
] = 0;
3332 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
3333 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3334 env
->ipr
[IPR_SISR
] = 0;
3335 env
->ipr
[IPR_VIRBND
] = -1ULL;
3339 qemu_init_vcpu(env
);
3343 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3345 env
->pc
= gen_opc_pc
[pc_pos
];