2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
51 /* Current rounding mode for this TB. */
53 /* Current flush-to-zero setting for this TB. */
57 /* Return values from translate_one, indicating the state of the TB.
58 Note that zero indicates that we are not exiting the TB. */
63 /* We have emitted one or more goto_tb. No fixup required. */
66 /* We are not using a goto_tb (for whatever reason), but have updated
67 the PC (for whatever reason), so there's no need to do it again on
71 /* We are exiting the TB, but have neither emitted a goto_tb, nor
72 updated the PC for the next instruction to be executed. */
75 /* We are ending the TB with a noreturn function call, e.g. longjmp.
76 No following code will be executed. */
80 /* global register indexes */
81 static TCGv_ptr cpu_env
;
82 static TCGv cpu_ir
[31];
83 static TCGv cpu_fir
[31];
85 static TCGv cpu_lock_addr
;
86 static TCGv cpu_lock_st_addr
;
87 static TCGv cpu_lock_value
;
88 static TCGv cpu_unique
;
89 #ifndef CONFIG_USER_ONLY
90 static TCGv cpu_sysval
;
95 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
97 #include "gen-icount.h"
99 static void alpha_translate_init(void)
103 static int done_init
= 0;
108 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
111 for (i
= 0; i
< 31; i
++) {
112 sprintf(p
, "ir%d", i
);
113 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
114 offsetof(CPUState
, ir
[i
]), p
);
115 p
+= (i
< 10) ? 4 : 5;
117 sprintf(p
, "fir%d", i
);
118 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
119 offsetof(CPUState
, fir
[i
]), p
);
120 p
+= (i
< 10) ? 5 : 6;
123 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
124 offsetof(CPUState
, pc
), "pc");
126 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUState
, lock_addr
),
129 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
130 offsetof(CPUState
, lock_st_addr
),
132 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
133 offsetof(CPUState
, lock_value
),
136 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
137 offsetof(CPUState
, unique
), "unique");
138 #ifndef CONFIG_USER_ONLY
139 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUState
, sysval
), "sysval");
141 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
142 offsetof(CPUState
, usp
), "usp");
145 /* register helpers */
152 static void gen_excp_1(int exception
, int error_code
)
156 tmp1
= tcg_const_i32(exception
);
157 tmp2
= tcg_const_i32(error_code
);
158 gen_helper_excp(tmp1
, tmp2
);
159 tcg_temp_free_i32(tmp2
);
160 tcg_temp_free_i32(tmp1
);
163 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
165 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
166 gen_excp_1(exception
, error_code
);
167 return EXIT_NORETURN
;
170 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
172 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
175 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
177 TCGv tmp
= tcg_temp_new();
178 TCGv_i32 tmp32
= tcg_temp_new_i32();
179 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
180 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
181 gen_helper_memory_to_f(t0
, tmp32
);
182 tcg_temp_free_i32(tmp32
);
186 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
188 TCGv tmp
= tcg_temp_new();
189 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
190 gen_helper_memory_to_g(t0
, tmp
);
194 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
196 TCGv tmp
= tcg_temp_new();
197 TCGv_i32 tmp32
= tcg_temp_new_i32();
198 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
199 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
200 gen_helper_memory_to_s(t0
, tmp32
);
201 tcg_temp_free_i32(tmp32
);
205 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
207 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
208 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
209 tcg_gen_mov_i64(cpu_lock_value
, t0
);
212 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
214 tcg_gen_qemu_ld64(t0
, t1
, flags
);
215 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
216 tcg_gen_mov_i64(cpu_lock_value
, t0
);
219 static inline void gen_load_mem(DisasContext
*ctx
,
220 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
222 int ra
, int rb
, int32_t disp16
, int fp
,
227 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
228 prefetches, which we can treat as nops. No worries about
229 missed exceptions here. */
230 if (unlikely(ra
== 31)) {
234 addr
= tcg_temp_new();
236 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
238 tcg_gen_andi_i64(addr
, addr
, ~0x7);
244 tcg_gen_movi_i64(addr
, disp16
);
247 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
248 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
253 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
255 TCGv_i32 tmp32
= tcg_temp_new_i32();
256 TCGv tmp
= tcg_temp_new();
257 gen_helper_f_to_memory(tmp32
, t0
);
258 tcg_gen_extu_i32_i64(tmp
, tmp32
);
259 tcg_gen_qemu_st32(tmp
, t1
, flags
);
261 tcg_temp_free_i32(tmp32
);
264 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
266 TCGv tmp
= tcg_temp_new();
267 gen_helper_g_to_memory(tmp
, t0
);
268 tcg_gen_qemu_st64(tmp
, t1
, flags
);
272 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
274 TCGv_i32 tmp32
= tcg_temp_new_i32();
275 TCGv tmp
= tcg_temp_new();
276 gen_helper_s_to_memory(tmp32
, t0
);
277 tcg_gen_extu_i32_i64(tmp
, tmp32
);
278 tcg_gen_qemu_st32(tmp
, t1
, flags
);
280 tcg_temp_free_i32(tmp32
);
283 static inline void gen_store_mem(DisasContext
*ctx
,
284 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
286 int ra
, int rb
, int32_t disp16
, int fp
,
291 addr
= tcg_temp_new();
293 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
295 tcg_gen_andi_i64(addr
, addr
, ~0x7);
301 tcg_gen_movi_i64(addr
, disp16
);
305 va
= tcg_const_i64(0);
307 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
309 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
317 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
318 int32_t disp16
, int quad
)
323 /* ??? Don't bother storing anything. The user can't tell
324 the difference, since the zero register always reads zero. */
328 #if defined(CONFIG_USER_ONLY)
329 addr
= cpu_lock_st_addr
;
331 addr
= tcg_temp_local_new();
335 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
337 tcg_gen_movi_i64(addr
, disp16
);
340 #if defined(CONFIG_USER_ONLY)
341 /* ??? This is handled via a complicated version of compare-and-swap
342 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
343 in TCG so that this isn't necessary. */
344 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
346 /* ??? In system mode we are never multi-threaded, so CAS can be
347 implemented via a non-atomic load-compare-store sequence. */
349 int lab_fail
, lab_done
;
352 lab_fail
= gen_new_label();
353 lab_done
= gen_new_label();
354 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
356 val
= tcg_temp_new();
358 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
360 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
362 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
365 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
367 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
369 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
370 tcg_gen_br(lab_done
);
372 gen_set_label(lab_fail
);
373 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
375 gen_set_label(lab_done
);
376 tcg_gen_movi_i64(cpu_lock_addr
, -1);
384 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
386 /* Check for the dest on the same page as the start of the TB. We
387 also want to suppress goto_tb in the case of single-steping and IO. */
388 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
389 && !ctx
->env
->singlestep_enabled
390 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
393 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
395 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
398 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
401 /* Notice branch-to-next; used to initialize RA with the PC. */
404 } else if (use_goto_tb(ctx
, dest
)) {
406 tcg_gen_movi_i64(cpu_pc
, dest
);
407 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
410 tcg_gen_movi_i64(cpu_pc
, dest
);
411 return EXIT_PC_UPDATED
;
415 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
416 TCGv cmp
, int32_t disp
)
418 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
419 int lab_true
= gen_new_label();
421 if (use_goto_tb(ctx
, dest
)) {
422 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
425 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
426 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
428 gen_set_label(lab_true
);
430 tcg_gen_movi_i64(cpu_pc
, dest
);
431 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
435 int lab_over
= gen_new_label();
437 /* ??? Consider using either
440 movcond pc, cond, 0, tmp, pc
447 The current diamond subgraph surely isn't efficient. */
449 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
450 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
451 tcg_gen_br(lab_over
);
452 gen_set_label(lab_true
);
453 tcg_gen_movi_i64(cpu_pc
, dest
);
454 gen_set_label(lab_over
);
456 return EXIT_PC_UPDATED
;
460 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
461 int32_t disp
, int mask
)
465 if (unlikely(ra
== 31)) {
466 cmp_tmp
= tcg_const_i64(0);
468 cmp_tmp
= tcg_temp_new();
470 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
472 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
476 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
479 /* Fold -0.0 for comparison with COND. */
481 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
483 uint64_t mzero
= 1ull << 63;
488 /* For <= or >, the -0.0 value directly compares the way we want. */
489 tcg_gen_mov_i64(dest
, src
);
494 /* For == or !=, we can simply mask off the sign bit and compare. */
495 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
500 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
501 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
502 tcg_gen_neg_i64(dest
, dest
);
503 tcg_gen_and_i64(dest
, dest
, src
);
511 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
516 if (unlikely(ra
== 31)) {
517 /* Very uncommon case, but easier to optimize it to an integer
518 comparison than continuing with the floating point comparison. */
519 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
522 cmp_tmp
= tcg_temp_new();
523 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
524 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
527 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
528 int islit
, uint8_t lit
, int mask
)
530 TCGCond inv_cond
= tcg_invert_cond(cond
);
533 if (unlikely(rc
== 31))
536 l1
= gen_new_label();
540 TCGv tmp
= tcg_temp_new();
541 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
542 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
545 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
547 /* Very uncommon case - Do not bother to optimize. */
548 TCGv tmp
= tcg_const_i64(0);
549 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
554 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
556 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
560 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
565 if (unlikely(rc
== 31)) {
569 cmp_tmp
= tcg_temp_new();
570 if (unlikely(ra
== 31)) {
571 tcg_gen_movi_i64(cmp_tmp
, 0);
573 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
576 l1
= gen_new_label();
577 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
578 tcg_temp_free(cmp_tmp
);
581 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
583 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
587 #define QUAL_RM_N 0x080 /* Round mode nearest even */
588 #define QUAL_RM_C 0x000 /* Round mode chopped */
589 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
590 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
591 #define QUAL_RM_MASK 0x0c0
593 #define QUAL_U 0x100 /* Underflow enable (fp output) */
594 #define QUAL_V 0x100 /* Overflow enable (int output) */
595 #define QUAL_S 0x400 /* Software completion enable */
596 #define QUAL_I 0x200 /* Inexact detection enable */
598 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
602 fn11
&= QUAL_RM_MASK
;
603 if (fn11
== ctx
->tb_rm
) {
608 tmp
= tcg_temp_new_i32();
611 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
614 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
617 tcg_gen_movi_i32(tmp
, float_round_down
);
620 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
624 #if defined(CONFIG_SOFTFLOAT_INLINE)
625 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
626 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
627 sets the one field. */
628 tcg_gen_st8_i32(tmp
, cpu_env
,
629 offsetof(CPUState
, fp_status
.float_rounding_mode
));
631 gen_helper_setroundmode(tmp
);
634 tcg_temp_free_i32(tmp
);
637 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
642 if (fn11
== ctx
->tb_ftz
) {
647 tmp
= tcg_temp_new_i32();
649 /* Underflow is enabled, use the FPCR setting. */
650 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
652 /* Underflow is disabled, force flush-to-zero. */
653 tcg_gen_movi_i32(tmp
, 1);
656 #if defined(CONFIG_SOFTFLOAT_INLINE)
657 tcg_gen_st8_i32(tmp
, cpu_env
,
658 offsetof(CPUState
, fp_status
.flush_to_zero
));
660 gen_helper_setflushzero(tmp
);
663 tcg_temp_free_i32(tmp
);
666 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
668 TCGv val
= tcg_temp_new();
670 tcg_gen_movi_i64(val
, 0);
671 } else if (fn11
& QUAL_S
) {
672 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
674 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
676 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
681 static void gen_fp_exc_clear(void)
683 #if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero
= tcg_const_i32(0);
685 tcg_gen_st8_i32(zero
, cpu_env
,
686 offsetof(CPUState
, fp_status
.float_exception_flags
));
687 tcg_temp_free_i32(zero
);
689 gen_helper_fp_exc_clear();
693 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc
= tcg_temp_new_i32();
703 #if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc
, cpu_env
,
705 offsetof(CPUState
, fp_status
.float_exception_flags
));
707 gen_helper_fp_exc_get(exc
);
711 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg
= tcg_const_i32(rc
+ 32);
722 gen_helper_fp_exc_raise_s(exc
, reg
);
724 gen_helper_fp_exc_raise(exc
, reg
);
727 tcg_temp_free_i32(reg
);
728 tcg_temp_free_i32(exc
);
731 static inline void gen_fp_exc_raise(int rc
, int fn11
)
733 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
736 static void gen_fcvtlq(int rb
, int rc
)
738 if (unlikely(rc
== 31)) {
741 if (unlikely(rb
== 31)) {
742 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
744 TCGv tmp
= tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
749 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
750 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
758 static void gen_fcvtql(int rb
, int rc
)
760 if (unlikely(rc
== 31)) {
763 if (unlikely(rb
== 31)) {
764 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
766 TCGv tmp
= tcg_temp_new();
768 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp
, tmp
, 32);
771 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
772 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
778 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
781 int lab
= gen_new_label();
782 TCGv tmp
= tcg_temp_new();
784 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
785 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
786 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
793 #define FARITH2(name) \
794 static inline void glue(gen_f, name)(int rb, int rc) \
796 if (unlikely(rc == 31)) { \
800 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name (cpu_fir[rc], tmp); \
804 tcg_temp_free(tmp); \
808 /* ??? VAX instruction qualifiers ignored. */
816 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
817 int rb
, int rc
, int fn11
)
821 /* ??? This is wrong: the instruction is not a nop, it still may
823 if (unlikely(rc
== 31)) {
827 gen_qual_roundmode(ctx
, fn11
);
828 gen_qual_flushzero(ctx
, fn11
);
831 vb
= gen_ieee_input(rb
, fn11
, 0);
832 helper(cpu_fir
[rc
], vb
);
835 gen_fp_exc_raise(rc
, fn11
);
838 #define IEEE_ARITH2(name) \
839 static inline void glue(gen_f, name)(DisasContext *ctx, \
840 int rb, int rc, int fn11) \
842 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
849 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
854 /* ??? This is wrong: the instruction is not a nop, it still may
856 if (unlikely(rc
== 31)) {
860 /* No need to set flushzero, since we have an integer output. */
862 vb
= gen_ieee_input(rb
, fn11
, 0);
864 /* Almost all integer conversions use cropped rounding, and most
865 also do not have integer overflow enabled. Special case that. */
868 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
870 case QUAL_V
| QUAL_RM_C
:
871 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
872 ignore
= float_flag_inexact
;
874 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
875 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
878 gen_qual_roundmode(ctx
, fn11
);
879 gen_helper_cvttq(cpu_fir
[rc
], vb
);
880 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
881 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
886 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
889 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
890 int rb
, int rc
, int fn11
)
894 /* ??? This is wrong: the instruction is not a nop, it still may
896 if (unlikely(rc
== 31)) {
900 gen_qual_roundmode(ctx
, fn11
);
903 vb
= tcg_const_i64(0);
908 /* The only exception that can be raised by integer conversion
909 is inexact. Thus we only need to worry about exceptions when
910 inexact handling is requested. */
913 helper(cpu_fir
[rc
], vb
);
914 gen_fp_exc_raise(rc
, fn11
);
916 helper(cpu_fir
[rc
], vb
);
924 #define IEEE_INTCVT(name) \
925 static inline void glue(gen_f, name)(DisasContext *ctx, \
926 int rb, int rc, int fn11) \
928 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
933 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
938 if (unlikely(rc
== 31)) {
942 vmask
= tcg_const_i64(mask
);
952 va
= tcg_temp_new_i64();
953 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
955 tcg_gen_andc_i64(va
, vmask
, va
);
957 tcg_gen_and_i64(va
, va
, vmask
);
965 vb
= tcg_temp_new_i64();
966 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
969 switch (za
<< 1 | zb
) {
971 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
974 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
977 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
980 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
984 tcg_temp_free(vmask
);
993 static inline void gen_fcpys(int ra
, int rb
, int rc
)
995 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
998 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
1000 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1003 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1005 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1008 #define FARITH3(name) \
1009 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1013 if (unlikely(rc == 31)) { \
1017 va = tcg_const_i64(0); \
1022 vb = tcg_const_i64(0); \
1027 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1030 tcg_temp_free(va); \
1033 tcg_temp_free(vb); \
1037 /* ??? VAX instruction qualifiers ignored. */
1050 static void gen_ieee_arith3(DisasContext
*ctx
,
1051 void (*helper
)(TCGv
, TCGv
, TCGv
),
1052 int ra
, int rb
, int rc
, int fn11
)
1056 /* ??? This is wrong: the instruction is not a nop, it still may
1057 raise exceptions. */
1058 if (unlikely(rc
== 31)) {
1062 gen_qual_roundmode(ctx
, fn11
);
1063 gen_qual_flushzero(ctx
, fn11
);
1066 va
= gen_ieee_input(ra
, fn11
, 0);
1067 vb
= gen_ieee_input(rb
, fn11
, 0);
1068 helper(cpu_fir
[rc
], va
, vb
);
1072 gen_fp_exc_raise(rc
, fn11
);
1075 #define IEEE_ARITH3(name) \
1076 static inline void glue(gen_f, name)(DisasContext *ctx, \
1077 int ra, int rb, int rc, int fn11) \
1079 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1090 static void gen_ieee_compare(DisasContext
*ctx
,
1091 void (*helper
)(TCGv
, TCGv
, TCGv
),
1092 int ra
, int rb
, int rc
, int fn11
)
1096 /* ??? This is wrong: the instruction is not a nop, it still may
1097 raise exceptions. */
1098 if (unlikely(rc
== 31)) {
1104 va
= gen_ieee_input(ra
, fn11
, 1);
1105 vb
= gen_ieee_input(rb
, fn11
, 1);
1106 helper(cpu_fir
[rc
], va
, vb
);
1110 gen_fp_exc_raise(rc
, fn11
);
1113 #define IEEE_CMP3(name) \
1114 static inline void glue(gen_f, name)(DisasContext *ctx, \
1115 int ra, int rb, int rc, int fn11) \
1117 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1124 static inline uint64_t zapnot_mask(uint8_t lit
)
1129 for (i
= 0; i
< 8; ++i
) {
1131 mask
|= 0xffull
<< (i
* 8);
1136 /* Implement zapnot with an immediate operand, which expands to some
1137 form of immediate AND. This is a basic building block in the
1138 definition of many of the other byte manipulation instructions. */
1139 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1143 tcg_gen_movi_i64(dest
, 0);
1146 tcg_gen_ext8u_i64(dest
, src
);
1149 tcg_gen_ext16u_i64(dest
, src
);
1152 tcg_gen_ext32u_i64(dest
, src
);
1155 tcg_gen_mov_i64(dest
, src
);
1158 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1163 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1165 if (unlikely(rc
== 31))
1167 else if (unlikely(ra
== 31))
1168 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1170 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1172 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1175 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1177 if (unlikely(rc
== 31))
1179 else if (unlikely(ra
== 31))
1180 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1182 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1184 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1188 /* EXTWH, EXTLH, EXTQH */
1189 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1190 uint8_t lit
, uint8_t byte_mask
)
1192 if (unlikely(rc
== 31))
1194 else if (unlikely(ra
== 31))
1195 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1198 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1199 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1201 TCGv tmp1
= tcg_temp_new();
1202 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1203 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1204 tcg_gen_neg_i64(tmp1
, tmp1
);
1205 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1206 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1207 tcg_temp_free(tmp1
);
1209 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1213 /* EXTBL, EXTWL, EXTLL, EXTQL */
1214 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1215 uint8_t lit
, uint8_t byte_mask
)
1217 if (unlikely(rc
== 31))
1219 else if (unlikely(ra
== 31))
1220 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1223 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1225 TCGv tmp
= tcg_temp_new();
1226 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1227 tcg_gen_shli_i64(tmp
, tmp
, 3);
1228 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1231 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1235 /* INSWH, INSLH, INSQH */
1236 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1237 uint8_t lit
, uint8_t byte_mask
)
1239 if (unlikely(rc
== 31))
1241 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1242 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1244 TCGv tmp
= tcg_temp_new();
1246 /* The instruction description has us left-shift the byte mask
1247 and extract bits <15:8> and apply that zap at the end. This
1248 is equivalent to simply performing the zap first and shifting
1250 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1253 /* Note that we have handled the lit==0 case above. */
1254 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1256 TCGv shift
= tcg_temp_new();
1258 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1259 Do this portably by splitting the shift into two parts:
1260 shift_count-1 and 1. Arrange for the -1 by using
1261 ones-complement instead of twos-complement in the negation:
1262 ~((B & 7) * 8) & 63. */
1264 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1265 tcg_gen_shli_i64(shift
, shift
, 3);
1266 tcg_gen_not_i64(shift
, shift
);
1267 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1269 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1270 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1271 tcg_temp_free(shift
);
1277 /* INSBL, INSWL, INSLL, INSQL */
1278 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1279 uint8_t lit
, uint8_t byte_mask
)
1281 if (unlikely(rc
== 31))
1283 else if (unlikely(ra
== 31))
1284 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1286 TCGv tmp
= tcg_temp_new();
1288 /* The instruction description has us left-shift the byte mask
1289 the same number of byte slots as the data and apply the zap
1290 at the end. This is equivalent to simply performing the zap
1291 first and shifting afterward. */
1292 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1295 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1297 TCGv shift
= tcg_temp_new();
1298 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1299 tcg_gen_shli_i64(shift
, shift
, 3);
1300 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1301 tcg_temp_free(shift
);
1307 /* MSKWH, MSKLH, MSKQH */
1308 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1309 uint8_t lit
, uint8_t byte_mask
)
1311 if (unlikely(rc
== 31))
1313 else if (unlikely(ra
== 31))
1314 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1316 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1318 TCGv shift
= tcg_temp_new();
1319 TCGv mask
= tcg_temp_new();
1321 /* The instruction description is as above, where the byte_mask
1322 is shifted left, and then we extract bits <15:8>. This can be
1323 emulated with a right-shift on the expanded byte mask. This
1324 requires extra care because for an input <2:0> == 0 we need a
1325 shift of 64 bits in order to generate a zero. This is done by
1326 splitting the shift into two parts, the variable shift - 1
1327 followed by a constant 1 shift. The code we expand below is
1328 equivalent to ~((B & 7) * 8) & 63. */
1330 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1331 tcg_gen_shli_i64(shift
, shift
, 3);
1332 tcg_gen_not_i64(shift
, shift
);
1333 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1334 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1335 tcg_gen_shr_i64(mask
, mask
, shift
);
1336 tcg_gen_shri_i64(mask
, mask
, 1);
1338 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1340 tcg_temp_free(mask
);
1341 tcg_temp_free(shift
);
1345 /* MSKBL, MSKWL, MSKLL, MSKQL */
1346 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1347 uint8_t lit
, uint8_t byte_mask
)
1349 if (unlikely(rc
== 31))
1351 else if (unlikely(ra
== 31))
1352 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1354 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1356 TCGv shift
= tcg_temp_new();
1357 TCGv mask
= tcg_temp_new();
1359 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1360 tcg_gen_shli_i64(shift
, shift
, 3);
1361 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1362 tcg_gen_shl_i64(mask
, mask
, shift
);
1364 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1366 tcg_temp_free(mask
);
1367 tcg_temp_free(shift
);
1371 /* Code to call arith3 helpers */
1372 #define ARITH3(name) \
1373 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1376 if (unlikely(rc == 31)) \
1381 TCGv tmp = tcg_const_i64(lit); \
1382 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1383 tcg_temp_free(tmp); \
1385 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1387 TCGv tmp1 = tcg_const_i64(0); \
1389 TCGv tmp2 = tcg_const_i64(lit); \
1390 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1391 tcg_temp_free(tmp2); \
1393 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1394 tcg_temp_free(tmp1); \
1415 #define MVIOP2(name) \
1416 static inline void glue(gen_, name)(int rb, int rc) \
1418 if (unlikely(rc == 31)) \
1420 if (unlikely(rb == 31)) \
1421 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1423 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1430 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1431 int islit
, uint8_t lit
)
1435 if (unlikely(rc
== 31)) {
1440 va
= tcg_const_i64(0);
1445 vb
= tcg_const_i64(lit
);
1450 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1460 static void gen_rx(int ra
, int set
)
1465 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1468 tmp
= tcg_const_i32(set
);
1469 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1470 tcg_temp_free_i32(tmp
);
1473 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1475 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1476 to internal cpu registers. */
1478 /* Unprivileged PAL call */
1479 if (palcode
>= 0x80 && palcode
< 0xC0) {
1483 /* No-op inside QEMU. */
1487 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1491 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1494 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1499 #ifndef CONFIG_USER_ONLY
1500 /* Privileged PAL code */
1501 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1505 /* No-op inside QEMU. */
1509 /* No-op inside QEMU. */
1513 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUState
, vptptr
));
1517 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1521 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1528 /* Note that we already know we're in kernel mode, so we know
1529 that PS only contains the 3 IPL bits. */
1530 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1532 /* But make sure and store only the 3 IPL bits from the user. */
1533 tmp
= tcg_temp_new();
1534 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1535 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUState
, ps
));
1542 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1546 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1550 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1554 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1555 offsetof(CPUState
, cpu_index
));
1559 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1565 return gen_invalid(ctx
);
1568 #ifndef CONFIG_USER_ONLY
1570 #define PR_BYTE 0x100000
1571 #define PR_LONG 0x200000
1573 static int cpu_pr_data(int pr
)
1576 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1577 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1578 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1579 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1580 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1581 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1582 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1583 case 7: return offsetof(CPUAlphaState
, palbr
);
1584 case 8: return offsetof(CPUAlphaState
, ptbr
);
1585 case 9: return offsetof(CPUAlphaState
, vptptr
);
1586 case 10: return offsetof(CPUAlphaState
, unique
);
1587 case 11: return offsetof(CPUAlphaState
, sysval
);
1588 case 12: return offsetof(CPUAlphaState
, usp
);
1591 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1593 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1598 static void gen_mfpr(int ra
, int regno
)
1600 int data
= cpu_pr_data(regno
);
1602 /* In our emulated PALcode, these processor registers have no
1603 side effects from reading. */
1608 /* The basic registers are data only, and unknown registers
1609 are read-zero, write-ignore. */
1611 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1612 } else if (data
& PR_BYTE
) {
1613 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1614 } else if (data
& PR_LONG
) {
1615 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1617 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1621 static void gen_mtpr(int rb
, int regno
)
1626 tmp
= tcg_const_i64(0);
1631 /* These two register numbers perform a TLB cache flush. Thankfully we
1632 can only do this inside PALmode, which means that the current basic
1633 block cannot be affected by the change in mappings. */
1637 } else if (regno
== 254) {
1639 gen_helper_tbis(tmp
);
1641 /* The basic registers are data only, and unknown registers
1642 are read-zero, write-ignore. */
1643 int data
= cpu_pr_data(regno
);
1645 if (data
& PR_BYTE
) {
1646 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1647 } else if (data
& PR_LONG
) {
1648 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1650 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1659 #endif /* !USER_ONLY*/
1661 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1664 int32_t disp21
, disp16
;
1665 #ifndef CONFIG_USER_ONLY
1669 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1673 /* Decode all instruction fields */
1675 ra
= (insn
>> 21) & 0x1F;
1676 rb
= (insn
>> 16) & 0x1F;
1678 real_islit
= islit
= (insn
>> 12) & 1;
1679 if (rb
== 31 && !islit
) {
1683 lit
= (insn
>> 13) & 0xFF;
1684 palcode
= insn
& 0x03FFFFFF;
1685 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1686 disp16
= (int16_t)(insn
& 0x0000FFFF);
1687 #ifndef CONFIG_USER_ONLY
1688 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1690 fn11
= (insn
>> 5) & 0x000007FF;
1692 fn7
= (insn
>> 5) & 0x0000007F;
1693 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1694 opc
, ra
, rb
, rc
, disp16
);
1700 ret
= gen_call_pal(ctx
, palcode
);
1725 if (likely(ra
!= 31)) {
1727 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1729 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1734 if (likely(ra
!= 31)) {
1736 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1738 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1743 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1744 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1750 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1754 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1755 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1761 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1765 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1769 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1775 if (likely(rc
!= 31)) {
1778 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1779 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1781 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1782 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1786 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1788 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1794 if (likely(rc
!= 31)) {
1796 TCGv tmp
= tcg_temp_new();
1797 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1799 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1801 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1802 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1806 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1808 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1814 if (likely(rc
!= 31)) {
1817 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1819 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1820 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1823 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1825 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1826 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1832 if (likely(rc
!= 31)) {
1834 TCGv tmp
= tcg_temp_new();
1835 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1837 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1839 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1840 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1844 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1846 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1847 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1854 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1858 if (likely(rc
!= 31)) {
1860 TCGv tmp
= tcg_temp_new();
1861 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1863 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1865 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1866 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1870 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1872 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1878 if (likely(rc
!= 31)) {
1880 TCGv tmp
= tcg_temp_new();
1881 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1883 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1885 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1886 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1890 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1892 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1893 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1900 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1904 if (likely(rc
!= 31)) {
1907 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1909 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1912 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1914 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1920 if (likely(rc
!= 31)) {
1922 TCGv tmp
= tcg_temp_new();
1923 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1925 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1927 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1931 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1933 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1939 if (likely(rc
!= 31)) {
1942 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1944 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1947 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1949 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1955 if (likely(rc
!= 31)) {
1957 TCGv tmp
= tcg_temp_new();
1958 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1960 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1962 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1966 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1968 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1974 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1978 if (likely(rc
!= 31)) {
1980 TCGv tmp
= tcg_temp_new();
1981 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1983 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1985 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1989 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1991 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1997 if (likely(rc
!= 31)) {
1999 TCGv tmp
= tcg_temp_new();
2000 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2002 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2004 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2008 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2010 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2016 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2020 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2024 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2028 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2032 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2036 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2040 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2050 if (likely(rc
!= 31)) {
2052 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2054 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2056 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2061 if (likely(rc
!= 31)) {
2064 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2066 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2068 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2073 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2077 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2081 if (likely(rc
!= 31)) {
2084 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2086 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2089 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2091 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2097 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2101 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2105 if (likely(rc
!= 31)) {
2108 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2110 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2113 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2115 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2121 if (likely(rc
!= 31)) {
2124 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2126 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2129 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2131 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2137 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2141 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2145 if (likely(rc
!= 31)) {
2148 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2150 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2153 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2155 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2161 if (likely(rc
!= 31)) {
2162 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2165 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2167 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2173 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2177 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2182 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2192 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2196 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2200 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2204 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2208 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2212 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2216 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2220 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2224 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2228 gen_zap(ra
, rb
, rc
, islit
, lit
);
2232 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2236 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2240 if (likely(rc
!= 31)) {
2243 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2245 TCGv shift
= tcg_temp_new();
2246 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2247 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2248 tcg_temp_free(shift
);
2251 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2256 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2260 if (likely(rc
!= 31)) {
2263 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2265 TCGv shift
= tcg_temp_new();
2266 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2267 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2268 tcg_temp_free(shift
);
2271 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2276 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2280 if (likely(rc
!= 31)) {
2283 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2285 TCGv shift
= tcg_temp_new();
2286 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2287 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2288 tcg_temp_free(shift
);
2291 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2296 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2300 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2304 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2308 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2312 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2316 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2320 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2324 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2328 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2338 if (likely(rc
!= 31)) {
2340 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2343 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2345 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2346 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2352 if (likely(rc
!= 31)) {
2354 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2356 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2358 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2363 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2367 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2371 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2378 switch (fpfn
) { /* fn11 & 0x3F */
2381 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2384 if (likely(rc
!= 31)) {
2386 TCGv_i32 tmp
= tcg_temp_new_i32();
2387 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2388 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2389 tcg_temp_free_i32(tmp
);
2391 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2396 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2403 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2404 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2410 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2413 if (likely(rc
!= 31)) {
2415 TCGv_i32 tmp
= tcg_temp_new_i32();
2416 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2417 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2418 tcg_temp_free_i32(tmp
);
2420 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2425 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2428 if (likely(rc
!= 31)) {
2430 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2432 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2437 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2444 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2445 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2454 /* VAX floating point */
2455 /* XXX: rounding mode and trap are ignored (!) */
2456 switch (fpfn
) { /* fn11 & 0x3F */
2459 gen_faddf(ra
, rb
, rc
);
2463 gen_fsubf(ra
, rb
, rc
);
2467 gen_fmulf(ra
, rb
, rc
);
2471 gen_fdivf(ra
, rb
, rc
);
2483 gen_faddg(ra
, rb
, rc
);
2487 gen_fsubg(ra
, rb
, rc
);
2491 gen_fmulg(ra
, rb
, rc
);
2495 gen_fdivg(ra
, rb
, rc
);
2499 gen_fcmpgeq(ra
, rb
, rc
);
2503 gen_fcmpglt(ra
, rb
, rc
);
2507 gen_fcmpgle(ra
, rb
, rc
);
2538 /* IEEE floating-point */
2539 switch (fpfn
) { /* fn11 & 0x3F */
2542 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2546 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2550 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2554 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2558 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2562 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2566 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2570 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2574 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2578 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2582 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2586 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2589 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2591 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2594 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2599 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2603 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2607 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2620 if (likely(rc
!= 31)) {
2624 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2626 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2629 gen_fcpys(ra
, rb
, rc
);
2635 gen_fcpysn(ra
, rb
, rc
);
2639 gen_fcpyse(ra
, rb
, rc
);
2643 if (likely(ra
!= 31))
2644 gen_helper_store_fpcr(cpu_fir
[ra
]);
2646 TCGv tmp
= tcg_const_i64(0);
2647 gen_helper_store_fpcr(tmp
);
2653 if (likely(ra
!= 31))
2654 gen_helper_load_fpcr(cpu_fir
[ra
]);
2658 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2662 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2666 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2670 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2674 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2678 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2688 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2689 /v doesn't do. The only thing I can think is that /sv is a
2690 valid instruction merely for completeness in the ISA. */
2691 gen_fcvtql_v(ctx
, rb
, rc
);
2698 switch ((uint16_t)disp16
) {
2726 gen_helper_load_pcc(cpu_ir
[ra
]);
2748 /* HW_MFPR (PALcode) */
2749 #ifndef CONFIG_USER_ONLY
2750 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2751 gen_mfpr(ra
, insn
& 0xffff);
2757 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2758 prediction stack action, which of course we don't implement. */
2760 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2762 tcg_gen_movi_i64(cpu_pc
, 0);
2765 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2767 ret
= EXIT_PC_UPDATED
;
2770 /* HW_LD (PALcode) */
2771 #ifndef CONFIG_USER_ONLY
2772 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2779 addr
= tcg_temp_new();
2781 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2783 tcg_gen_movi_i64(addr
, disp12
);
2784 switch ((insn
>> 12) & 0xF) {
2786 /* Longword physical access (hw_ldl/p) */
2787 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2790 /* Quadword physical access (hw_ldq/p) */
2791 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2794 /* Longword physical access with lock (hw_ldl_l/p) */
2795 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2798 /* Quadword physical access with lock (hw_ldq_l/p) */
2799 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2802 /* Longword virtual PTE fetch (hw_ldl/v) */
2805 /* Quadword virtual PTE fetch (hw_ldq/v) */
2809 /* Incpu_ir[ra]id */
2812 /* Incpu_ir[ra]id */
2815 /* Longword virtual access (hw_ldl) */
2818 /* Quadword virtual access (hw_ldq) */
2821 /* Longword virtual access with protection check (hw_ldl/w) */
2822 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2825 /* Quadword virtual access with protection check (hw_ldq/w) */
2826 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2829 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2832 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2835 /* Longword virtual access with alternate access mode and
2836 protection checks (hw_ldl/wa) */
2837 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2840 /* Quadword virtual access with alternate access mode and
2841 protection checks (hw_ldq/wa) */
2842 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2845 tcg_temp_free(addr
);
2854 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2857 if (likely(rc
!= 31)) {
2859 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2861 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2866 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2867 if (likely(rc
!= 31)) {
2869 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2871 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2879 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2880 if (likely(rc
!= 31)) {
2882 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2884 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2892 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2893 gen_perr(ra
, rb
, rc
, islit
, lit
);
2899 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2900 if (likely(rc
!= 31)) {
2902 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2904 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2912 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2913 if (likely(rc
!= 31)) {
2915 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2917 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2925 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2926 if (real_islit
|| ra
!= 31) {
2935 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2936 if (real_islit
|| ra
!= 31) {
2945 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2946 if (real_islit
|| ra
!= 31) {
2955 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2956 if (real_islit
|| ra
!= 31) {
2965 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2966 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
2972 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2973 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
2979 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2980 gen_minub8(ra
, rb
, rc
, islit
, lit
);
2986 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2987 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
2993 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2994 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3000 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3001 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3007 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3008 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3014 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3015 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3021 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3024 if (likely(rc
!= 31)) {
3026 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3028 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3033 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3037 TCGv_i32 tmp1
= tcg_temp_new_i32();
3039 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3041 TCGv tmp2
= tcg_const_i64(0);
3042 gen_helper_s_to_memory(tmp1
, tmp2
);
3043 tcg_temp_free(tmp2
);
3045 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3046 tcg_temp_free_i32(tmp1
);
3054 /* HW_MTPR (PALcode) */
3055 #ifndef CONFIG_USER_ONLY
3056 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3057 gen_mtpr(rb
, insn
& 0xffff);
3063 /* HW_RET (PALcode) */
3064 #ifndef CONFIG_USER_ONLY
3065 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3067 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3068 address from EXC_ADDR. This turns out to be useful for our
3069 emulation PALcode, so continue to accept it. */
3070 TCGv tmp
= tcg_temp_new();
3071 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUState
, exc_addr
));
3072 gen_helper_hw_ret(tmp
);
3075 gen_helper_hw_ret(cpu_ir
[rb
]);
3077 ret
= EXIT_PC_UPDATED
;
3083 /* HW_ST (PALcode) */
3084 #ifndef CONFIG_USER_ONLY
3085 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3087 addr
= tcg_temp_new();
3089 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3091 tcg_gen_movi_i64(addr
, disp12
);
3095 val
= tcg_temp_new();
3096 tcg_gen_movi_i64(val
, 0);
3098 switch ((insn
>> 12) & 0xF) {
3100 /* Longword physical access */
3101 gen_helper_stl_phys(addr
, val
);
3104 /* Quadword physical access */
3105 gen_helper_stq_phys(addr
, val
);
3108 /* Longword physical access with lock */
3109 gen_helper_stl_c_phys(val
, addr
, val
);
3112 /* Quadword physical access with lock */
3113 gen_helper_stq_c_phys(val
, addr
, val
);
3116 /* Longword virtual access */
3119 /* Quadword virtual access */
3140 /* Longword virtual access with alternate access mode */
3143 /* Quadword virtual access with alternate access mode */
3154 tcg_temp_free(addr
);
3161 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3165 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3169 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3173 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3177 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3181 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3185 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3189 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3193 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3197 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3201 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3205 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3209 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3213 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3217 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3221 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3225 ret
= gen_bdirect(ctx
, ra
, disp21
);
3227 case 0x31: /* FBEQ */
3228 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3230 case 0x32: /* FBLT */
3231 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3233 case 0x33: /* FBLE */
3234 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3238 ret
= gen_bdirect(ctx
, ra
, disp21
);
3240 case 0x35: /* FBNE */
3241 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3243 case 0x36: /* FBGE */
3244 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3246 case 0x37: /* FBGT */
3247 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3251 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3255 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3259 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3263 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3267 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3271 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3275 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3279 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3282 ret
= gen_invalid(ctx
);
3289 static inline void gen_intermediate_code_internal(CPUState
*env
,
3290 TranslationBlock
*tb
,
3293 DisasContext ctx
, *ctxp
= &ctx
;
3294 target_ulong pc_start
;
3296 uint16_t *gen_opc_end
;
3304 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3309 ctx
.mem_idx
= cpu_mmu_index(env
);
3311 /* ??? Every TB begins with unset rounding mode, to be initialized on
3312 the first fp insn of the TB. Alternately we could define a proper
3313 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3314 to reset the FP_STATUS to that default at the end of any TB that
3315 changes the default. We could even (gasp) dynamiclly figure out
3316 what default would be most efficient given the running program. */
3318 /* Similarly for flush-to-zero. */
3322 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3324 max_insns
= CF_COUNT_MASK
;
3328 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3329 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3330 if (bp
->pc
== ctx
.pc
) {
3331 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3337 j
= gen_opc_ptr
- gen_opc_buf
;
3341 gen_opc_instr_start
[lj
++] = 0;
3343 gen_opc_pc
[lj
] = ctx
.pc
;
3344 gen_opc_instr_start
[lj
] = 1;
3345 gen_opc_icount
[lj
] = num_insns
;
3347 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3349 insn
= ldl_code(ctx
.pc
);
3352 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3353 tcg_gen_debug_insn_start(ctx
.pc
);
3357 ret
= translate_one(ctxp
, insn
);
3359 /* If we reach a page boundary, are single stepping,
3360 or exhaust instruction count, stop generation. */
3362 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3363 || gen_opc_ptr
>= gen_opc_end
3364 || num_insns
>= max_insns
3366 || env
->singlestep_enabled
)) {
3367 ret
= EXIT_PC_STALE
;
3369 } while (ret
== NO_EXIT
);
3371 if (tb
->cflags
& CF_LAST_IO
) {
3380 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3382 case EXIT_PC_UPDATED
:
3383 if (env
->singlestep_enabled
) {
3384 gen_excp_1(EXCP_DEBUG
, 0);
3393 gen_icount_end(tb
, num_insns
);
3394 *gen_opc_ptr
= INDEX_op_end
;
3396 j
= gen_opc_ptr
- gen_opc_buf
;
3399 gen_opc_instr_start
[lj
++] = 0;
3401 tb
->size
= ctx
.pc
- pc_start
;
3402 tb
->icount
= num_insns
;
3406 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3407 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3408 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3414 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3416 gen_intermediate_code_internal(env
, tb
, 0);
3419 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3421 gen_intermediate_code_internal(env
, tb
, 1);
3429 static const struct cpu_def_t cpu_defs
[] = {
3430 { "ev4", IMPLVER_2106x
, 0 },
3431 { "ev5", IMPLVER_21164
, 0 },
3432 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3433 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3434 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3435 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3436 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3437 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3438 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3439 { "21064", IMPLVER_2106x
, 0 },
3440 { "21164", IMPLVER_21164
, 0 },
3441 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3442 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3443 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3444 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3445 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3448 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3451 int implver
, amask
, i
, max
;
3453 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3455 alpha_translate_init();
3458 /* Default to ev67; no reason not to emulate insns by default. */
3459 implver
= IMPLVER_21264
;
3460 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3461 | AMASK_TRAP
| AMASK_PREFETCH
);
3463 max
= ARRAY_SIZE(cpu_defs
);
3464 for (i
= 0; i
< max
; i
++) {
3465 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3466 implver
= cpu_defs
[i
].implver
;
3467 amask
= cpu_defs
[i
].amask
;
3471 env
->implver
= implver
;
3474 #if defined (CONFIG_USER_ONLY)
3475 env
->ps
= PS_USER_MODE
;
3476 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3477 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3479 env
->lock_addr
= -1;
3482 qemu_init_vcpu(env
);
3486 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3488 env
->pc
= gen_opc_pc
[pc_pos
];