2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
25 #include "exec/helper-proto.h"
26 #include "exec/helper-gen.h"
28 #undef ALPHA_DEBUG_DISAS
29 #define CONFIG_SOFTFLOAT_INLINE
31 #ifdef ALPHA_DEBUG_DISAS
32 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
34 # define LOG_DISAS(...) do { } while (0)
37 typedef struct DisasContext DisasContext
;
39 struct TranslationBlock
*tb
;
43 /* Current rounding mode for this TB. */
45 /* Current flush-to-zero setting for this TB. */
48 /* implver value for this CPU. */
51 /* Temporaries for $31 and $f31 as source and destination. */
54 /* Temporary for immediate constants. */
57 bool singlestep_enabled
;
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
66 /* We have emitted one or more goto_tb. No fixup required. */
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the PC (for whatever reason), so there's no need to do it again on
74 /* We are exiting the TB, but have neither emitted a goto_tb, nor
75 updated the PC for the next instruction to be executed. */
78 /* We are ending the TB with a noreturn function call, e.g. longjmp.
79 No following code will be executed. */
83 /* global register indexes */
84 static TCGv_ptr cpu_env
;
85 static TCGv cpu_ir
[31];
86 static TCGv cpu_fir
[31];
88 static TCGv cpu_lock_addr
;
89 static TCGv cpu_lock_st_addr
;
90 static TCGv cpu_lock_value
;
92 #include "exec/gen-icount.h"
94 void alpha_translate_init(void)
96 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
98 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
99 static const GlobalVar vars
[] = {
102 DEF_VAR(lock_st_addr
),
108 /* Use the symbolic register names that match the disassembler. */
109 static const char greg_names
[31][4] = {
110 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
111 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
112 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
113 "t10", "t11", "ra", "t12", "at", "gp", "sp"
115 static const char freg_names
[31][4] = {
116 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
117 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
118 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
119 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
122 static bool done_init
= 0;
130 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
132 for (i
= 0; i
< 31; i
++) {
133 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
134 offsetof(CPUAlphaState
, ir
[i
]),
138 for (i
= 0; i
< 31; i
++) {
139 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUAlphaState
, fir
[i
]),
144 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
145 const GlobalVar
*v
= &vars
[i
];
146 *v
->var
= tcg_global_mem_new_i64(TCG_AREG0
, v
->ofs
, v
->name
);
150 static TCGv
load_zero(DisasContext
*ctx
)
152 if (TCGV_IS_UNUSED_I64(ctx
->zero
)) {
153 ctx
->zero
= tcg_const_i64(0);
158 static TCGv
dest_sink(DisasContext
*ctx
)
160 if (TCGV_IS_UNUSED_I64(ctx
->sink
)) {
161 ctx
->sink
= tcg_temp_new();
166 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
168 if (likely(reg
< 31)) {
171 return load_zero(ctx
);
175 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
176 uint8_t lit
, bool islit
)
179 ctx
->lit
= tcg_const_i64(lit
);
181 } else if (likely(reg
< 31)) {
184 return load_zero(ctx
);
188 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
190 if (likely(reg
< 31)) {
193 return dest_sink(ctx
);
197 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
199 if (likely(reg
< 31)) {
202 return load_zero(ctx
);
206 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
208 if (likely(reg
< 31)) {
211 return dest_sink(ctx
);
215 static void gen_excp_1(int exception
, int error_code
)
219 tmp1
= tcg_const_i32(exception
);
220 tmp2
= tcg_const_i32(error_code
);
221 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
222 tcg_temp_free_i32(tmp2
);
223 tcg_temp_free_i32(tmp1
);
226 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
228 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
229 gen_excp_1(exception
, error_code
);
230 return EXIT_NORETURN
;
233 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
235 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
238 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
240 TCGv_i32 tmp32
= tcg_temp_new_i32();
241 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
242 gen_helper_memory_to_f(t0
, tmp32
);
243 tcg_temp_free_i32(tmp32
);
246 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
248 TCGv tmp
= tcg_temp_new();
249 tcg_gen_qemu_ld_i64(tmp
, t1
, flags
, MO_LEQ
);
250 gen_helper_memory_to_g(t0
, tmp
);
254 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
256 TCGv_i32 tmp32
= tcg_temp_new_i32();
257 tcg_gen_qemu_ld_i32(tmp32
, t1
, flags
, MO_LEUL
);
258 gen_helper_memory_to_s(t0
, tmp32
);
259 tcg_temp_free_i32(tmp32
);
262 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
264 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LESL
);
265 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
266 tcg_gen_mov_i64(cpu_lock_value
, t0
);
269 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
271 tcg_gen_qemu_ld_i64(t0
, t1
, flags
, MO_LEQ
);
272 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
273 tcg_gen_mov_i64(cpu_lock_value
, t0
);
276 static inline void gen_load_mem(DisasContext
*ctx
,
277 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
279 int ra
, int rb
, int32_t disp16
, bool fp
,
284 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
285 prefetches, which we can treat as nops. No worries about
286 missed exceptions here. */
287 if (unlikely(ra
== 31)) {
291 tmp
= tcg_temp_new();
292 addr
= load_gpr(ctx
, rb
);
295 tcg_gen_addi_i64(tmp
, addr
, disp16
);
299 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
303 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
304 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
309 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
311 TCGv_i32 tmp32
= tcg_temp_new_i32();
312 gen_helper_f_to_memory(tmp32
, t0
);
313 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
314 tcg_temp_free_i32(tmp32
);
317 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
319 TCGv tmp
= tcg_temp_new();
320 gen_helper_g_to_memory(tmp
, t0
);
321 tcg_gen_qemu_st_i64(tmp
, t1
, flags
, MO_LEQ
);
325 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
327 TCGv_i32 tmp32
= tcg_temp_new_i32();
328 gen_helper_s_to_memory(tmp32
, t0
);
329 tcg_gen_qemu_st_i32(tmp32
, t1
, flags
, MO_LEUL
);
330 tcg_temp_free_i32(tmp32
);
333 static inline void gen_store_mem(DisasContext
*ctx
,
334 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
336 int ra
, int rb
, int32_t disp16
, bool fp
,
341 tmp
= tcg_temp_new();
342 addr
= load_gpr(ctx
, rb
);
345 tcg_gen_addi_i64(tmp
, addr
, disp16
);
349 tcg_gen_andi_i64(tmp
, addr
, ~0x7);
353 va
= (fp
? load_fpr(ctx
, ra
) : load_gpr(ctx
, ra
));
354 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
359 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
360 int32_t disp16
, int quad
)
365 /* ??? Don't bother storing anything. The user can't tell
366 the difference, since the zero register always reads zero. */
370 #if defined(CONFIG_USER_ONLY)
371 addr
= cpu_lock_st_addr
;
373 addr
= tcg_temp_local_new();
376 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
378 #if defined(CONFIG_USER_ONLY)
379 /* ??? This is handled via a complicated version of compare-and-swap
380 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
381 in TCG so that this isn't necessary. */
382 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
384 /* ??? In system mode we are never multi-threaded, so CAS can be
385 implemented via a non-atomic load-compare-store sequence. */
387 int lab_fail
, lab_done
;
390 lab_fail
= gen_new_label();
391 lab_done
= gen_new_label();
392 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
394 val
= tcg_temp_new();
395 tcg_gen_qemu_ld_i64(val
, addr
, ctx
->mem_idx
, quad
? MO_LEQ
: MO_LESL
);
396 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
398 tcg_gen_qemu_st_i64(cpu_ir
[ra
], addr
, ctx
->mem_idx
,
399 quad
? MO_LEQ
: MO_LEUL
);
400 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
401 tcg_gen_br(lab_done
);
403 gen_set_label(lab_fail
);
404 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
406 gen_set_label(lab_done
);
407 tcg_gen_movi_i64(cpu_lock_addr
, -1);
415 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
417 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
419 && ((addr
>> 41) & 3) == 2
420 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
423 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
425 /* Suppress goto_tb in the case of single-steping and IO. */
426 if ((ctx
->tb
->cflags
& CF_LAST_IO
)
427 || ctx
->singlestep_enabled
|| singlestep
) {
430 /* If the destination is in the superpage, the page perms can't change. */
431 if (in_superpage(ctx
, dest
)) {
434 /* Check for the dest on the same page as the start of the TB. */
435 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
438 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
440 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
443 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
446 /* Notice branch-to-next; used to initialize RA with the PC. */
449 } else if (use_goto_tb(ctx
, dest
)) {
451 tcg_gen_movi_i64(cpu_pc
, dest
);
452 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
455 tcg_gen_movi_i64(cpu_pc
, dest
);
456 return EXIT_PC_UPDATED
;
460 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
461 TCGv cmp
, int32_t disp
)
463 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
464 int lab_true
= gen_new_label();
466 if (use_goto_tb(ctx
, dest
)) {
467 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
470 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
471 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
473 gen_set_label(lab_true
);
475 tcg_gen_movi_i64(cpu_pc
, dest
);
476 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
480 TCGv_i64 z
= tcg_const_i64(0);
481 TCGv_i64 d
= tcg_const_i64(dest
);
482 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
484 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
486 tcg_temp_free_i64(z
);
487 tcg_temp_free_i64(d
);
488 tcg_temp_free_i64(p
);
489 return EXIT_PC_UPDATED
;
493 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
494 int32_t disp
, int mask
)
499 cmp_tmp
= tcg_temp_new();
500 tcg_gen_andi_i64(cmp_tmp
, load_gpr(ctx
, ra
), 1);
502 cmp_tmp
= load_gpr(ctx
, ra
);
505 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
508 /* Fold -0.0 for comparison with COND. */
510 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
512 uint64_t mzero
= 1ull << 63;
517 /* For <= or >, the -0.0 value directly compares the way we want. */
518 tcg_gen_mov_i64(dest
, src
);
523 /* For == or !=, we can simply mask off the sign bit and compare. */
524 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
529 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
530 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
531 tcg_gen_neg_i64(dest
, dest
);
532 tcg_gen_and_i64(dest
, dest
, src
);
540 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
543 TCGv cmp_tmp
= tcg_temp_new();
544 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
545 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
548 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
553 vb
= load_fpr(ctx
, rb
);
555 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
557 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
562 #define QUAL_RM_N 0x080 /* Round mode nearest even */
563 #define QUAL_RM_C 0x000 /* Round mode chopped */
564 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
565 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
566 #define QUAL_RM_MASK 0x0c0
568 #define QUAL_U 0x100 /* Underflow enable (fp output) */
569 #define QUAL_V 0x100 /* Overflow enable (int output) */
570 #define QUAL_S 0x400 /* Software completion enable */
571 #define QUAL_I 0x200 /* Inexact detection enable */
573 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
577 fn11
&= QUAL_RM_MASK
;
578 if (fn11
== ctx
->tb_rm
) {
583 tmp
= tcg_temp_new_i32();
586 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
589 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
592 tcg_gen_movi_i32(tmp
, float_round_down
);
595 tcg_gen_ld8u_i32(tmp
, cpu_env
,
596 offsetof(CPUAlphaState
, fpcr_dyn_round
));
600 #if defined(CONFIG_SOFTFLOAT_INLINE)
601 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
602 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
603 sets the one field. */
604 tcg_gen_st8_i32(tmp
, cpu_env
,
605 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
607 gen_helper_setroundmode(tmp
);
610 tcg_temp_free_i32(tmp
);
613 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
618 if (fn11
== ctx
->tb_ftz
) {
623 tmp
= tcg_temp_new_i32();
625 /* Underflow is enabled, use the FPCR setting. */
626 tcg_gen_ld8u_i32(tmp
, cpu_env
,
627 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
629 /* Underflow is disabled, force flush-to-zero. */
630 tcg_gen_movi_i32(tmp
, 1);
633 #if defined(CONFIG_SOFTFLOAT_INLINE)
634 tcg_gen_st8_i32(tmp
, cpu_env
,
635 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
637 gen_helper_setflushzero(tmp
);
640 tcg_temp_free_i32(tmp
);
643 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
647 if (unlikely(reg
== 31)) {
648 val
= load_zero(ctx
);
651 if ((fn11
& QUAL_S
) == 0) {
653 gen_helper_ieee_input_cmp(cpu_env
, val
);
655 gen_helper_ieee_input(cpu_env
, val
);
662 static void gen_fp_exc_clear(void)
664 #if defined(CONFIG_SOFTFLOAT_INLINE)
665 TCGv_i32 zero
= tcg_const_i32(0);
666 tcg_gen_st8_i32(zero
, cpu_env
,
667 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
668 tcg_temp_free_i32(zero
);
670 gen_helper_fp_exc_clear(cpu_env
);
674 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
676 /* ??? We ought to be able to do something with imprecise exceptions.
677 E.g. notice we're still in the trap shadow of something within the
678 TB and do not generate the code to signal the exception; end the TB
679 when an exception is forced to arrive, either by consumption of a
680 register value or TRAPB or EXCB. */
681 TCGv_i32 exc
= tcg_temp_new_i32();
684 #if defined(CONFIG_SOFTFLOAT_INLINE)
685 tcg_gen_ld8u_i32(exc
, cpu_env
,
686 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
688 gen_helper_fp_exc_get(exc
, cpu_env
);
692 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
695 /* ??? Pass in the regno of the destination so that the helper can
696 set EXC_MASK, which contains a bitmask of destination registers
697 that have caused arithmetic traps. A simple userspace emulation
698 does not require this. We do need it for a guest kernel's entArith,
699 or if we were to do something clever with imprecise exceptions. */
700 reg
= tcg_const_i32(rc
+ 32);
703 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
705 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
708 tcg_temp_free_i32(reg
);
709 tcg_temp_free_i32(exc
);
712 static inline void gen_fp_exc_raise(int rc
, int fn11
)
714 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
717 static void gen_fcvtlq(TCGv vc
, TCGv vb
)
719 TCGv tmp
= tcg_temp_new();
721 /* The arithmetic right shift here, plus the sign-extended mask below
722 yields a sign-extended result without an explicit ext32s_i64. */
723 tcg_gen_sari_i64(tmp
, vb
, 32);
724 tcg_gen_shri_i64(vc
, vb
, 29);
725 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
726 tcg_gen_andi_i64(vc
, vc
, 0x3fffffff);
727 tcg_gen_or_i64(vc
, vc
, tmp
);
732 static void gen_fcvtql(TCGv vc
, TCGv vb
)
734 TCGv tmp
= tcg_temp_new();
736 tcg_gen_andi_i64(tmp
, vb
, (int32_t)0xc0000000);
737 tcg_gen_andi_i64(vc
, vb
, 0x3FFFFFFF);
738 tcg_gen_shli_i64(tmp
, tmp
, 32);
739 tcg_gen_shli_i64(vc
, vc
, 29);
740 tcg_gen_or_i64(vc
, vc
, tmp
);
745 static void gen_ieee_arith2(DisasContext
*ctx
,
746 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
747 int rb
, int rc
, int fn11
)
751 gen_qual_roundmode(ctx
, fn11
);
752 gen_qual_flushzero(ctx
, fn11
);
755 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
756 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
758 gen_fp_exc_raise(rc
, fn11
);
761 #define IEEE_ARITH2(name) \
762 static inline void glue(gen_f, name)(DisasContext *ctx, \
763 int rb, int rc, int fn11) \
765 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
772 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
777 /* No need to set flushzero, since we have an integer output. */
779 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
780 vc
= dest_fpr(ctx
, rc
);
782 /* Almost all integer conversions use cropped rounding, and most
783 also do not have integer overflow enabled. Special case that. */
786 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
788 case QUAL_V
| QUAL_RM_C
:
789 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
790 ignore
= float_flag_inexact
;
792 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
793 gen_helper_cvttq_svic(vc
, cpu_env
, vb
);
796 gen_qual_roundmode(ctx
, fn11
);
797 gen_helper_cvttq(vc
, cpu_env
, vb
);
798 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
799 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
803 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
806 static void gen_ieee_intcvt(DisasContext
*ctx
,
807 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
808 int rb
, int rc
, int fn11
)
812 gen_qual_roundmode(ctx
, fn11
);
813 vb
= load_fpr(ctx
, rb
);
814 vc
= dest_fpr(ctx
, rc
);
816 /* The only exception that can be raised by integer conversion
817 is inexact. Thus we only need to worry about exceptions when
818 inexact handling is requested. */
821 helper(vc
, cpu_env
, vb
);
822 gen_fp_exc_raise(rc
, fn11
);
824 helper(vc
, cpu_env
, vb
);
828 #define IEEE_INTCVT(name) \
829 static inline void glue(gen_f, name)(DisasContext *ctx, \
830 int rb, int rc, int fn11) \
832 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
837 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
839 TCGv vmask
= tcg_const_i64(mask
);
840 TCGv tmp
= tcg_temp_new_i64();
843 tcg_gen_andc_i64(tmp
, vmask
, va
);
845 tcg_gen_and_i64(tmp
, va
, vmask
);
848 tcg_gen_andc_i64(vc
, vb
, vmask
);
849 tcg_gen_or_i64(vc
, vc
, tmp
);
851 tcg_temp_free(vmask
);
855 static void gen_ieee_arith3(DisasContext
*ctx
,
856 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
857 int ra
, int rb
, int rc
, int fn11
)
861 gen_qual_roundmode(ctx
, fn11
);
862 gen_qual_flushzero(ctx
, fn11
);
865 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
866 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
867 vc
= dest_fpr(ctx
, rc
);
868 helper(vc
, cpu_env
, va
, vb
);
870 gen_fp_exc_raise(rc
, fn11
);
873 #define IEEE_ARITH3(name) \
874 static inline void glue(gen_f, name)(DisasContext *ctx, \
875 int ra, int rb, int rc, int fn11) \
877 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
888 static void gen_ieee_compare(DisasContext
*ctx
,
889 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
890 int ra
, int rb
, int rc
, int fn11
)
896 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
897 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
898 vc
= dest_fpr(ctx
, rc
);
899 helper(vc
, cpu_env
, va
, vb
);
901 gen_fp_exc_raise(rc
, fn11
);
904 #define IEEE_CMP3(name) \
905 static inline void glue(gen_f, name)(DisasContext *ctx, \
906 int ra, int rb, int rc, int fn11) \
908 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
915 static inline uint64_t zapnot_mask(uint8_t lit
)
920 for (i
= 0; i
< 8; ++i
) {
921 if ((lit
>> i
) & 1) {
922 mask
|= 0xffull
<< (i
* 8);
928 /* Implement zapnot with an immediate operand, which expands to some
929 form of immediate AND. This is a basic building block in the
930 definition of many of the other byte manipulation instructions. */
931 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
935 tcg_gen_movi_i64(dest
, 0);
938 tcg_gen_ext8u_i64(dest
, src
);
941 tcg_gen_ext16u_i64(dest
, src
);
944 tcg_gen_ext32u_i64(dest
, src
);
947 tcg_gen_mov_i64(dest
, src
);
950 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
955 /* EXTWH, EXTLH, EXTQH */
956 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
957 uint8_t lit
, uint8_t byte_mask
)
960 tcg_gen_shli_i64(vc
, va
, (64 - lit
* 8) & 0x3f);
962 TCGv tmp
= tcg_temp_new();
963 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
964 tcg_gen_neg_i64(tmp
, tmp
);
965 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
966 tcg_gen_shl_i64(vc
, va
, tmp
);
969 gen_zapnoti(vc
, vc
, byte_mask
);
972 /* EXTBL, EXTWL, EXTLL, EXTQL */
973 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
974 uint8_t lit
, uint8_t byte_mask
)
977 tcg_gen_shri_i64(vc
, va
, (lit
& 7) * 8);
979 TCGv tmp
= tcg_temp_new();
980 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
981 tcg_gen_shli_i64(tmp
, tmp
, 3);
982 tcg_gen_shr_i64(vc
, va
, tmp
);
985 gen_zapnoti(vc
, vc
, byte_mask
);
988 /* INSWH, INSLH, INSQH */
989 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
990 uint8_t lit
, uint8_t byte_mask
)
992 TCGv tmp
= tcg_temp_new();
994 /* The instruction description has us left-shift the byte mask and extract
995 bits <15:8> and apply that zap at the end. This is equivalent to simply
996 performing the zap first and shifting afterward. */
997 gen_zapnoti(tmp
, va
, byte_mask
);
1001 if (unlikely(lit
== 0)) {
1002 tcg_gen_movi_i64(vc
, 0);
1004 tcg_gen_shri_i64(vc
, tmp
, 64 - lit
* 8);
1007 TCGv shift
= tcg_temp_new();
1009 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
1010 portably by splitting the shift into two parts: shift_count-1 and 1.
1011 Arrange for the -1 by using ones-complement instead of
1012 twos-complement in the negation: ~(B * 8) & 63. */
1014 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1015 tcg_gen_not_i64(shift
, shift
);
1016 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1018 tcg_gen_shr_i64(vc
, tmp
, shift
);
1019 tcg_gen_shri_i64(vc
, vc
, 1);
1020 tcg_temp_free(shift
);
1025 /* INSBL, INSWL, INSLL, INSQL */
1026 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1027 uint8_t lit
, uint8_t byte_mask
)
1029 TCGv tmp
= tcg_temp_new();
1031 /* The instruction description has us left-shift the byte mask
1032 the same number of byte slots as the data and apply the zap
1033 at the end. This is equivalent to simply performing the zap
1034 first and shifting afterward. */
1035 gen_zapnoti(tmp
, va
, byte_mask
);
1038 tcg_gen_shli_i64(vc
, tmp
, (lit
& 7) * 8);
1040 TCGv shift
= tcg_temp_new();
1041 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1042 tcg_gen_shli_i64(shift
, shift
, 3);
1043 tcg_gen_shl_i64(vc
, tmp
, shift
);
1044 tcg_temp_free(shift
);
1049 /* MSKWH, MSKLH, MSKQH */
1050 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1051 uint8_t lit
, uint8_t byte_mask
)
1054 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1056 TCGv shift
= tcg_temp_new();
1057 TCGv mask
= tcg_temp_new();
1059 /* The instruction description is as above, where the byte_mask
1060 is shifted left, and then we extract bits <15:8>. This can be
1061 emulated with a right-shift on the expanded byte mask. This
1062 requires extra care because for an input <2:0> == 0 we need a
1063 shift of 64 bits in order to generate a zero. This is done by
1064 splitting the shift into two parts, the variable shift - 1
1065 followed by a constant 1 shift. The code we expand below is
1066 equivalent to ~(B * 8) & 63. */
1068 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1069 tcg_gen_not_i64(shift
, shift
);
1070 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1071 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1072 tcg_gen_shr_i64(mask
, mask
, shift
);
1073 tcg_gen_shri_i64(mask
, mask
, 1);
1075 tcg_gen_andc_i64(vc
, va
, mask
);
1077 tcg_temp_free(mask
);
1078 tcg_temp_free(shift
);
1082 /* MSKBL, MSKWL, MSKLL, MSKQL */
1083 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1084 uint8_t lit
, uint8_t byte_mask
)
1087 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1089 TCGv shift
= tcg_temp_new();
1090 TCGv mask
= tcg_temp_new();
1092 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1093 tcg_gen_shli_i64(shift
, shift
, 3);
1094 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1095 tcg_gen_shl_i64(mask
, mask
, shift
);
1097 tcg_gen_andc_i64(vc
, va
, mask
);
1099 tcg_temp_free(mask
);
1100 tcg_temp_free(shift
);
1104 static void gen_rx(int ra
, int set
)
1109 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1112 tmp
= tcg_const_i32(set
);
1113 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1114 tcg_temp_free_i32(tmp
);
1117 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1119 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1120 to internal cpu registers. */
1122 /* Unprivileged PAL call */
1123 if (palcode
>= 0x80 && palcode
< 0xC0) {
1127 /* No-op inside QEMU. */
1131 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1132 offsetof(CPUAlphaState
, unique
));
1136 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1137 offsetof(CPUAlphaState
, unique
));
1146 #ifndef CONFIG_USER_ONLY
1147 /* Privileged PAL code */
1148 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1152 /* No-op inside QEMU. */
1156 /* No-op inside QEMU. */
1160 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1161 offsetof(CPUAlphaState
, vptptr
));
1165 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1166 offsetof(CPUAlphaState
, sysval
));
1170 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1171 offsetof(CPUAlphaState
, sysval
));
1178 /* Note that we already know we're in kernel mode, so we know
1179 that PS only contains the 3 IPL bits. */
1180 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
,
1181 offsetof(CPUAlphaState
, ps
));
1183 /* But make sure and store only the 3 IPL bits from the user. */
1184 tmp
= tcg_temp_new();
1185 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1186 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1193 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
,
1194 offsetof(CPUAlphaState
, ps
));
1198 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
,
1199 offsetof(CPUAlphaState
, usp
));
1203 tcg_gen_ld_i64(cpu_ir
[IR_V0
], cpu_env
,
1204 offsetof(CPUAlphaState
, usp
));
1208 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1209 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1219 return gen_invalid(ctx
);
1222 #ifdef CONFIG_USER_ONLY
1223 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1226 TCGv pc
= tcg_const_i64(ctx
->pc
);
1227 TCGv entry
= tcg_const_i64(palcode
& 0x80
1228 ? 0x2000 + (palcode
- 0x80) * 64
1229 : 0x1000 + palcode
* 64);
1231 gen_helper_call_pal(cpu_env
, pc
, entry
);
1233 tcg_temp_free(entry
);
1236 /* Since the destination is running in PALmode, we don't really
1237 need the page permissions check. We'll see the existence of
1238 the page when we create the TB, and we'll flush all TBs if
1239 we change the PAL base register. */
1240 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1242 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1243 return EXIT_GOTO_TB
;
1246 return EXIT_PC_UPDATED
;
1251 #ifndef CONFIG_USER_ONLY
1253 #define PR_BYTE 0x100000
1254 #define PR_LONG 0x200000
1256 static int cpu_pr_data(int pr
)
1259 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1260 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1261 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1262 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1263 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1264 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1265 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1266 case 7: return offsetof(CPUAlphaState
, palbr
);
1267 case 8: return offsetof(CPUAlphaState
, ptbr
);
1268 case 9: return offsetof(CPUAlphaState
, vptptr
);
1269 case 10: return offsetof(CPUAlphaState
, unique
);
1270 case 11: return offsetof(CPUAlphaState
, sysval
);
1271 case 12: return offsetof(CPUAlphaState
, usp
);
1274 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1276 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1279 return offsetof(CPUAlphaState
, alarm_expire
);
1284 static ExitStatus
gen_mfpr(TCGv va
, int regno
)
1286 int data
= cpu_pr_data(regno
);
1288 /* Special help for VMTIME and WALLTIME. */
1289 if (regno
== 250 || regno
== 249) {
1290 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1292 helper
= gen_helper_get_vmtime
;
1298 return EXIT_PC_STALE
;
1305 /* The basic registers are data only, and unknown registers
1306 are read-zero, write-ignore. */
1308 tcg_gen_movi_i64(va
, 0);
1309 } else if (data
& PR_BYTE
) {
1310 tcg_gen_ld8u_i64(va
, cpu_env
, data
& ~PR_BYTE
);
1311 } else if (data
& PR_LONG
) {
1312 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1314 tcg_gen_ld_i64(va
, cpu_env
, data
);
1319 static ExitStatus
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1327 gen_helper_tbia(cpu_env
);
1332 gen_helper_tbis(cpu_env
, vb
);
1337 tmp
= tcg_const_i64(1);
1338 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1339 offsetof(CPUState
, halted
));
1340 return gen_excp(ctx
, EXCP_HLT
, 0);
1344 gen_helper_halt(vb
);
1345 return EXIT_PC_STALE
;
1349 gen_helper_set_alarm(cpu_env
, vb
);
1354 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1355 /* Changing the PAL base register implies un-chaining all of the TBs
1356 that ended with a CALL_PAL. Since the base register usually only
1357 changes during boot, flushing everything works well. */
1358 gen_helper_tb_flush(cpu_env
);
1359 return EXIT_PC_STALE
;
1362 /* The basic registers are data only, and unknown registers
1363 are read-zero, write-ignore. */
1364 data
= cpu_pr_data(regno
);
1366 if (data
& PR_BYTE
) {
1367 tcg_gen_st8_i64(vb
, cpu_env
, data
& ~PR_BYTE
);
1368 } else if (data
& PR_LONG
) {
1369 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1371 tcg_gen_st_i64(vb
, cpu_env
, data
);
1379 #endif /* !USER_ONLY*/
1381 #define REQUIRE_TB_FLAG(FLAG) \
1383 if ((ctx->tb->flags & (FLAG)) == 0) { \
1388 #define REQUIRE_REG_31(WHICH) \
1390 if (WHICH != 31) { \
1395 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1397 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1399 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1401 TCGv va
, vb
, vc
, tmp
;
1405 /* Decode all instruction fields */
1406 opc
= extract32(insn
, 26, 6);
1407 ra
= extract32(insn
, 21, 5);
1408 rb
= extract32(insn
, 16, 5);
1409 rc
= extract32(insn
, 0, 5);
1410 islit
= extract32(insn
, 12, 1);
1411 lit
= extract32(insn
, 13, 8);
1413 disp21
= sextract32(insn
, 0, 21);
1414 disp16
= sextract32(insn
, 0, 16);
1415 disp12
= sextract32(insn
, 0, 12);
1417 fn11
= extract32(insn
, 5, 11);
1418 fpfn
= extract32(insn
, 5, 6);
1419 fn7
= extract32(insn
, 5, 7);
1421 if (rb
== 31 && !islit
) {
1430 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1456 disp16
= (uint32_t)disp16
<< 16;
1460 va
= dest_gpr(ctx
, ra
);
1461 /* It's worth special-casing immediate loads. */
1463 tcg_gen_movi_i64(va
, disp16
);
1465 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1471 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1472 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1476 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1480 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1481 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1485 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1486 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1490 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
1491 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1495 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1499 vc
= dest_gpr(ctx
, rc
);
1500 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1504 /* Special case ADDL as SEXTL. */
1505 tcg_gen_ext32s_i64(vc
, vb
);
1509 /* Special case SUBQ as NEGQ. */
1510 tcg_gen_neg_i64(vc
, vb
);
1515 va
= load_gpr(ctx
, ra
);
1519 tcg_gen_add_i64(vc
, va
, vb
);
1520 tcg_gen_ext32s_i64(vc
, vc
);
1524 tmp
= tcg_temp_new();
1525 tcg_gen_shli_i64(tmp
, va
, 2);
1526 tcg_gen_add_i64(tmp
, tmp
, vb
);
1527 tcg_gen_ext32s_i64(vc
, tmp
);
1532 tcg_gen_sub_i64(vc
, va
, vb
);
1533 tcg_gen_ext32s_i64(vc
, vc
);
1537 tmp
= tcg_temp_new();
1538 tcg_gen_shli_i64(tmp
, va
, 2);
1539 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1540 tcg_gen_ext32s_i64(vc
, tmp
);
1545 gen_helper_cmpbge(vc
, va
, vb
);
1549 tmp
= tcg_temp_new();
1550 tcg_gen_shli_i64(tmp
, va
, 3);
1551 tcg_gen_add_i64(tmp
, tmp
, vb
);
1552 tcg_gen_ext32s_i64(vc
, tmp
);
1557 tmp
= tcg_temp_new();
1558 tcg_gen_shli_i64(tmp
, va
, 3);
1559 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1560 tcg_gen_ext32s_i64(vc
, tmp
);
1565 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1569 tcg_gen_add_i64(vc
, va
, vb
);
1573 tmp
= tcg_temp_new();
1574 tcg_gen_shli_i64(tmp
, va
, 2);
1575 tcg_gen_add_i64(vc
, tmp
, vb
);
1580 tcg_gen_sub_i64(vc
, va
, vb
);
1584 tmp
= tcg_temp_new();
1585 tcg_gen_shli_i64(tmp
, va
, 2);
1586 tcg_gen_sub_i64(vc
, tmp
, vb
);
1591 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1595 tmp
= tcg_temp_new();
1596 tcg_gen_shli_i64(tmp
, va
, 3);
1597 tcg_gen_add_i64(vc
, tmp
, vb
);
1602 tmp
= tcg_temp_new();
1603 tcg_gen_shli_i64(tmp
, va
, 3);
1604 tcg_gen_sub_i64(vc
, tmp
, vb
);
1609 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1613 gen_helper_addlv(vc
, cpu_env
, va
, vb
);
1617 gen_helper_sublv(vc
, cpu_env
, va
, vb
);
1621 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1625 gen_helper_addqv(vc
, cpu_env
, va
, vb
);
1629 gen_helper_subqv(vc
, cpu_env
, va
, vb
);
1633 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1643 /* Special case BIS as NOP. */
1647 /* Special case BIS as MOV. */
1648 vc
= dest_gpr(ctx
, rc
);
1650 tcg_gen_movi_i64(vc
, lit
);
1652 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1658 vc
= dest_gpr(ctx
, rc
);
1659 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1661 if (fn7
== 0x28 && ra
== 31) {
1662 /* Special case ORNOT as NOT. */
1663 tcg_gen_not_i64(vc
, vb
);
1667 va
= load_gpr(ctx
, ra
);
1671 tcg_gen_and_i64(vc
, va
, vb
);
1675 tcg_gen_andc_i64(vc
, va
, vb
);
1679 tmp
= tcg_temp_new();
1680 tcg_gen_andi_i64(tmp
, va
, 1);
1681 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1682 vb
, load_gpr(ctx
, rc
));
1687 tmp
= tcg_temp_new();
1688 tcg_gen_andi_i64(tmp
, va
, 1);
1689 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1690 vb
, load_gpr(ctx
, rc
));
1695 tcg_gen_or_i64(vc
, va
, vb
);
1699 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1700 vb
, load_gpr(ctx
, rc
));
1704 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1705 vb
, load_gpr(ctx
, rc
));
1709 tcg_gen_orc_i64(vc
, va
, vb
);
1713 tcg_gen_xor_i64(vc
, va
, vb
);
1717 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1718 vb
, load_gpr(ctx
, rc
));
1722 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1723 vb
, load_gpr(ctx
, rc
));
1727 tcg_gen_eqv_i64(vc
, va
, vb
);
1733 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
1734 tcg_gen_andi_i64(vc
, vb
, ~amask
);
1739 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1740 vb
, load_gpr(ctx
, rc
));
1744 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1745 vb
, load_gpr(ctx
, rc
));
1750 tcg_gen_movi_i64(vc
, ctx
->implver
);
1758 vc
= dest_gpr(ctx
, rc
);
1759 va
= load_gpr(ctx
, ra
);
1763 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1767 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1771 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1775 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1779 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1783 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1787 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1791 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1795 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1800 gen_zapnoti(vc
, va
, ~lit
);
1802 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1808 gen_zapnoti(vc
, va
, lit
);
1810 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1815 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1820 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1822 tmp
= tcg_temp_new();
1823 vb
= load_gpr(ctx
, rb
);
1824 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1825 tcg_gen_shr_i64(vc
, va
, tmp
);
1831 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1836 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1838 tmp
= tcg_temp_new();
1839 vb
= load_gpr(ctx
, rb
);
1840 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1841 tcg_gen_shl_i64(vc
, va
, tmp
);
1847 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1852 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1854 tmp
= tcg_temp_new();
1855 vb
= load_gpr(ctx
, rb
);
1856 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1857 tcg_gen_sar_i64(vc
, va
, tmp
);
1863 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1867 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1871 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1875 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1879 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1883 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1887 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1891 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1895 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1903 vc
= dest_gpr(ctx
, rc
);
1904 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1905 va
= load_gpr(ctx
, ra
);
1909 tcg_gen_mul_i64(vc
, va
, vb
);
1910 tcg_gen_ext32s_i64(vc
, vc
);
1914 tcg_gen_mul_i64(vc
, va
, vb
);
1918 tmp
= tcg_temp_new();
1919 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1924 gen_helper_mullv(vc
, cpu_env
, va
, vb
);
1928 gen_helper_mulqv(vc
, cpu_env
, va
, vb
);
1936 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
1937 vc
= dest_fpr(ctx
, rc
);
1938 switch (fpfn
) { /* fn11 & 0x3F */
1942 t32
= tcg_temp_new_i32();
1943 va
= load_gpr(ctx
, ra
);
1944 tcg_gen_trunc_i64_i32(t32
, va
);
1945 gen_helper_memory_to_s(vc
, t32
);
1946 tcg_temp_free_i32(t32
);
1951 vb
= load_fpr(ctx
, rb
);
1952 gen_helper_sqrtf(vc
, cpu_env
, vb
);
1957 gen_fsqrts(ctx
, rb
, rc
, fn11
);
1962 t32
= tcg_temp_new_i32();
1963 va
= load_gpr(ctx
, ra
);
1964 tcg_gen_trunc_i64_i32(t32
, va
);
1965 gen_helper_memory_to_f(vc
, t32
);
1966 tcg_temp_free_i32(t32
);
1971 va
= load_gpr(ctx
, ra
);
1972 tcg_gen_mov_i64(vc
, va
);
1977 vb
= load_fpr(ctx
, rb
);
1978 gen_helper_sqrtg(vc
, cpu_env
, vb
);
1983 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
1991 /* VAX floating point */
1992 /* XXX: rounding mode and trap are ignored (!) */
1993 vc
= dest_fpr(ctx
, rc
);
1994 vb
= load_fpr(ctx
, rb
);
1995 va
= load_fpr(ctx
, ra
);
1996 switch (fpfn
) { /* fn11 & 0x3F */
1999 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2003 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2007 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2011 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2019 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2023 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2027 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2031 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2035 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2039 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2043 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2048 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2057 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2062 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2067 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2075 /* IEEE floating-point */
2076 switch (fpfn
) { /* fn11 & 0x3F */
2079 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2083 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2087 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2091 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2095 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2099 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2103 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2107 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2111 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2115 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2119 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2123 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2127 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2129 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2132 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2138 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2143 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2148 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2160 vc
= dest_fpr(ctx
, rc
);
2161 vb
= load_fpr(ctx
, rb
);
2167 /* Special case CPYS as FNOP. */
2169 vc
= dest_fpr(ctx
, rc
);
2170 va
= load_fpr(ctx
, ra
);
2172 /* Special case CPYS as FMOV. */
2173 tcg_gen_mov_i64(vc
, va
);
2175 vb
= load_fpr(ctx
, rb
);
2176 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2182 vc
= dest_fpr(ctx
, rc
);
2183 vb
= load_fpr(ctx
, rb
);
2184 va
= load_fpr(ctx
, ra
);
2185 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2189 vc
= dest_fpr(ctx
, rc
);
2190 vb
= load_fpr(ctx
, rb
);
2191 va
= load_fpr(ctx
, ra
);
2192 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2196 va
= load_fpr(ctx
, ra
);
2197 gen_helper_store_fpcr(cpu_env
, va
);
2201 va
= dest_fpr(ctx
, ra
);
2202 gen_helper_load_fpcr(va
, cpu_env
);
2206 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2210 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2214 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2218 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2222 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2226 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2231 vc
= dest_fpr(ctx
, rc
);
2232 vb
= load_fpr(ctx
, rb
);
2240 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2241 /v doesn't do. The only thing I can think is that /sv is a
2242 valid instruction merely for completeness in the ISA. */
2243 vc
= dest_fpr(ctx
, rc
);
2244 vb
= load_fpr(ctx
, rb
);
2245 gen_helper_fcvtql_v_input(cpu_env
, vb
);
2254 switch ((uint16_t)disp16
) {
2281 va
= dest_gpr(ctx
, ra
);
2284 gen_helper_load_pcc(va
, cpu_env
);
2286 ret
= EXIT_PC_STALE
;
2288 gen_helper_load_pcc(va
, cpu_env
);
2312 /* HW_MFPR (PALcode) */
2313 #ifndef CONFIG_USER_ONLY
2314 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2315 va
= dest_gpr(ctx
, ra
);
2316 ret
= gen_mfpr(va
, insn
& 0xffff);
2323 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2324 prediction stack action, which of course we don't implement. */
2325 vb
= load_gpr(ctx
, rb
);
2326 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2328 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2330 ret
= EXIT_PC_UPDATED
;
2334 /* HW_LD (PALcode) */
2335 #ifndef CONFIG_USER_ONLY
2336 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2338 TCGv addr
= tcg_temp_new();
2339 vb
= load_gpr(ctx
, rb
);
2340 va
= dest_gpr(ctx
, ra
);
2342 tcg_gen_addi_i64(addr
, vb
, disp12
);
2343 switch ((insn
>> 12) & 0xF) {
2345 /* Longword physical access (hw_ldl/p) */
2346 gen_helper_ldl_phys(va
, cpu_env
, addr
);
2349 /* Quadword physical access (hw_ldq/p) */
2350 gen_helper_ldq_phys(va
, cpu_env
, addr
);
2353 /* Longword physical access with lock (hw_ldl_l/p) */
2354 gen_helper_ldl_l_phys(va
, cpu_env
, addr
);
2357 /* Quadword physical access with lock (hw_ldq_l/p) */
2358 gen_helper_ldq_l_phys(va
, cpu_env
, addr
);
2361 /* Longword virtual PTE fetch (hw_ldl/v) */
2364 /* Quadword virtual PTE fetch (hw_ldq/v) */
2368 /* Incpu_ir[ra]id */
2371 /* Incpu_ir[ra]id */
2374 /* Longword virtual access (hw_ldl) */
2377 /* Quadword virtual access (hw_ldq) */
2380 /* Longword virtual access with protection check (hw_ldl/w) */
2381 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2384 /* Quadword virtual access with protection check (hw_ldq/w) */
2385 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2388 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2391 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2394 /* Longword virtual access with alternate access mode and
2395 protection checks (hw_ldl/wa) */
2396 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2399 /* Quadword virtual access with alternate access mode and
2400 protection checks (hw_ldq/wa) */
2401 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2404 tcg_temp_free(addr
);
2412 vc
= dest_gpr(ctx
, rc
);
2415 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2417 va
= load_fpr(ctx
, ra
);
2418 tcg_gen_mov_i64(vc
, va
);
2420 } else if (fn7
== 0x78) {
2422 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_FIX
);
2424 t32
= tcg_temp_new_i32();
2425 va
= load_fpr(ctx
, ra
);
2426 gen_helper_s_to_memory(t32
, va
);
2427 tcg_gen_ext_i32_i64(vc
, t32
);
2428 tcg_temp_free_i32(t32
);
2432 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2436 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2438 tcg_gen_ext8s_i64(vc
, vb
);
2442 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_BWX
);
2444 tcg_gen_ext16s_i64(vc
, vb
);
2448 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2450 gen_helper_ctpop(vc
, vb
);
2454 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2455 va
= load_gpr(ctx
, ra
);
2456 gen_helper_perr(vc
, va
, vb
);
2460 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2462 gen_helper_ctlz(vc
, vb
);
2466 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_CIX
);
2468 gen_helper_cttz(vc
, vb
);
2472 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2474 gen_helper_unpkbw(vc
, vb
);
2478 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2480 gen_helper_unpkbl(vc
, vb
);
2484 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2486 gen_helper_pkwb(vc
, vb
);
2490 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2492 gen_helper_pklb(vc
, vb
);
2496 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2497 va
= load_gpr(ctx
, ra
);
2498 gen_helper_minsb8(vc
, va
, vb
);
2502 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2503 va
= load_gpr(ctx
, ra
);
2504 gen_helper_minsw4(vc
, va
, vb
);
2508 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2509 va
= load_gpr(ctx
, ra
);
2510 gen_helper_minub8(vc
, va
, vb
);
2514 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2515 va
= load_gpr(ctx
, ra
);
2516 gen_helper_minuw4(vc
, va
, vb
);
2520 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2521 va
= load_gpr(ctx
, ra
);
2522 gen_helper_maxub8(vc
, va
, vb
);
2526 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2527 va
= load_gpr(ctx
, ra
);
2528 gen_helper_maxuw4(vc
, va
, vb
);
2532 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2533 va
= load_gpr(ctx
, ra
);
2534 gen_helper_maxsb8(vc
, va
, vb
);
2538 REQUIRE_TB_FLAG(TB_FLAGS_AMASK_MVI
);
2539 va
= load_gpr(ctx
, ra
);
2540 gen_helper_maxsw4(vc
, va
, vb
);
2548 /* HW_MTPR (PALcode) */
2549 #ifndef CONFIG_USER_ONLY
2550 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2551 vb
= load_gpr(ctx
, rb
);
2552 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2559 /* HW_RET (PALcode) */
2560 #ifndef CONFIG_USER_ONLY
2561 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2563 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2564 address from EXC_ADDR. This turns out to be useful for our
2565 emulation PALcode, so continue to accept it. */
2566 tmp
= tcg_temp_new();
2567 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2568 gen_helper_hw_ret(cpu_env
, tmp
);
2571 gen_helper_hw_ret(cpu_env
, load_gpr(ctx
, rb
));
2573 ret
= EXIT_PC_UPDATED
;
2580 /* HW_ST (PALcode) */
2581 #ifndef CONFIG_USER_ONLY
2582 REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE
);
2584 TCGv addr
= tcg_temp_new();
2585 va
= load_gpr(ctx
, ra
);
2586 vb
= load_gpr(ctx
, rb
);
2588 tcg_gen_addi_i64(addr
, vb
, disp12
);
2589 switch ((insn
>> 12) & 0xF) {
2591 /* Longword physical access */
2592 gen_helper_stl_phys(cpu_env
, addr
, va
);
2595 /* Quadword physical access */
2596 gen_helper_stq_phys(cpu_env
, addr
, va
);
2599 /* Longword physical access with lock */
2600 gen_helper_stl_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2603 /* Quadword physical access with lock */
2604 gen_helper_stq_c_phys(dest_gpr(ctx
, ra
), cpu_env
, addr
, va
);
2607 /* Longword virtual access */
2610 /* Quadword virtual access */
2631 /* Longword virtual access with alternate access mode */
2634 /* Quadword virtual access with alternate access mode */
2643 tcg_temp_free(addr
);
2651 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2655 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2659 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2663 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2667 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
2671 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
2675 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
2679 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
2683 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2687 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2691 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2695 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2699 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
2703 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
2707 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
2711 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
2715 ret
= gen_bdirect(ctx
, ra
, disp21
);
2717 case 0x31: /* FBEQ */
2718 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2720 case 0x32: /* FBLT */
2721 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2723 case 0x33: /* FBLE */
2724 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2728 ret
= gen_bdirect(ctx
, ra
, disp21
);
2730 case 0x35: /* FBNE */
2731 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2733 case 0x36: /* FBGE */
2734 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2736 case 0x37: /* FBGT */
2737 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2741 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2745 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2749 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2753 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2757 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2761 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2765 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2769 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2772 ret
= gen_invalid(ctx
);
2779 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
2780 TranslationBlock
*tb
,
2783 CPUState
*cs
= CPU(cpu
);
2784 CPUAlphaState
*env
= &cpu
->env
;
2785 DisasContext ctx
, *ctxp
= &ctx
;
2786 target_ulong pc_start
;
2787 target_ulong pc_mask
;
2789 uint16_t *gen_opc_end
;
2797 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
2801 ctx
.mem_idx
= cpu_mmu_index(env
);
2802 ctx
.implver
= env
->implver
;
2803 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2805 /* ??? Every TB begins with unset rounding mode, to be initialized on
2806 the first fp insn of the TB. Alternately we could define a proper
2807 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2808 to reset the FP_STATUS to that default at the end of any TB that
2809 changes the default. We could even (gasp) dynamiclly figure out
2810 what default would be most efficient given the running program. */
2812 /* Similarly for flush-to-zero. */
2816 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2817 if (max_insns
== 0) {
2818 max_insns
= CF_COUNT_MASK
;
2821 if (in_superpage(&ctx
, pc_start
)) {
2822 pc_mask
= (1ULL << 41) - 1;
2824 pc_mask
= ~TARGET_PAGE_MASK
;
2829 if (unlikely(!QTAILQ_EMPTY(&cs
->breakpoints
))) {
2830 QTAILQ_FOREACH(bp
, &cs
->breakpoints
, entry
) {
2831 if (bp
->pc
== ctx
.pc
) {
2832 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2838 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2842 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2844 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
2845 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
2846 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
2848 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2851 insn
= cpu_ldl_code(env
, ctx
.pc
);
2854 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2855 tcg_gen_debug_insn_start(ctx
.pc
);
2858 TCGV_UNUSED_I64(ctx
.zero
);
2859 TCGV_UNUSED_I64(ctx
.sink
);
2860 TCGV_UNUSED_I64(ctx
.lit
);
2863 ret
= translate_one(ctxp
, insn
);
2865 if (!TCGV_IS_UNUSED_I64(ctx
.sink
)) {
2866 tcg_gen_discard_i64(ctx
.sink
);
2867 tcg_temp_free(ctx
.sink
);
2869 if (!TCGV_IS_UNUSED_I64(ctx
.zero
)) {
2870 tcg_temp_free(ctx
.zero
);
2872 if (!TCGV_IS_UNUSED_I64(ctx
.lit
)) {
2873 tcg_temp_free(ctx
.lit
);
2876 /* If we reach a page boundary, are single stepping,
2877 or exhaust instruction count, stop generation. */
2879 && ((ctx
.pc
& pc_mask
) == 0
2880 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
2881 || num_insns
>= max_insns
2883 || ctx
.singlestep_enabled
)) {
2884 ret
= EXIT_PC_STALE
;
2886 } while (ret
== NO_EXIT
);
2888 if (tb
->cflags
& CF_LAST_IO
) {
2897 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
2899 case EXIT_PC_UPDATED
:
2900 if (ctx
.singlestep_enabled
) {
2901 gen_excp_1(EXCP_DEBUG
, 0);
2910 gen_tb_end(tb
, num_insns
);
2911 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
2913 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
2916 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
2918 tb
->size
= ctx
.pc
- pc_start
;
2919 tb
->icount
= num_insns
;
2923 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
2924 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
2925 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
2931 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2933 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
2936 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
2938 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
2941 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
2943 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];