2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "sysemu/cpu-timers.h"
24 #include "disas/disas.h"
25 #include "qemu/host-utils.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 DisasContextBase base
;
48 #ifndef CONFIG_USER_ONLY
54 /* implver and amask values for this CPU. */
58 /* Current rounding mode for this TB. */
60 /* Current flush-to-zero setting for this TB. */
63 /* The set of registers active in the current context. */
66 /* Temporaries for $31 and $f31 as source and destination. */
71 /* Target-specific return values from translate_one, indicating the
72 state of the TB. Note that DISAS_NEXT indicates that we are not
74 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
75 #define DISAS_PC_UPDATED DISAS_TARGET_1
76 #define DISAS_PC_STALE DISAS_TARGET_2
78 /* global register indexes */
79 static TCGv cpu_std_ir
[31];
80 static TCGv cpu_fir
[31];
82 static TCGv cpu_lock_addr
;
83 static TCGv cpu_lock_value
;
85 #ifndef CONFIG_USER_ONLY
86 static TCGv cpu_pal_ir
[31];
89 #include "exec/gen-icount.h"
91 void alpha_translate_init(void)
93 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
95 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
96 static const GlobalVar vars
[] = {
104 /* Use the symbolic register names that match the disassembler. */
105 static const char greg_names
[31][4] = {
106 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
107 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
108 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
109 "t10", "t11", "ra", "t12", "at", "gp", "sp"
111 static const char freg_names
[31][4] = {
112 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
113 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
114 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
115 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
117 #ifndef CONFIG_USER_ONLY
118 static const char shadow_names
[8][8] = {
119 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
120 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
126 for (i
= 0; i
< 31; i
++) {
127 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
128 offsetof(CPUAlphaState
, ir
[i
]),
132 for (i
= 0; i
< 31; i
++) {
133 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
134 offsetof(CPUAlphaState
, fir
[i
]),
138 #ifndef CONFIG_USER_ONLY
139 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
140 for (i
= 0; i
< 8; i
++) {
141 int r
= (i
== 7 ? 25 : i
+ 8);
142 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
143 offsetof(CPUAlphaState
,
149 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
150 const GlobalVar
*v
= &vars
[i
];
151 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
155 static TCGv
load_zero(DisasContext
*ctx
)
158 ctx
->zero
= tcg_constant_i64(0);
163 static TCGv
dest_sink(DisasContext
*ctx
)
166 ctx
->sink
= tcg_temp_new();
171 static void free_context_temps(DisasContext
*ctx
)
174 tcg_gen_discard_i64(ctx
->sink
);
175 tcg_temp_free(ctx
->sink
);
180 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
182 if (likely(reg
< 31)) {
185 return load_zero(ctx
);
189 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
190 uint8_t lit
, bool islit
)
193 return tcg_constant_i64(lit
);
194 } else if (likely(reg
< 31)) {
197 return load_zero(ctx
);
201 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
203 if (likely(reg
< 31)) {
206 return dest_sink(ctx
);
210 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
212 if (likely(reg
< 31)) {
215 return load_zero(ctx
);
219 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
221 if (likely(reg
< 31)) {
224 return dest_sink(ctx
);
228 static int get_flag_ofs(unsigned shift
)
230 int ofs
= offsetof(CPUAlphaState
, flags
);
231 #ifdef HOST_WORDS_BIGENDIAN
232 ofs
+= 3 - (shift
/ 8);
239 static void ld_flag_byte(TCGv val
, unsigned shift
)
241 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
244 static void st_flag_byte(TCGv val
, unsigned shift
)
246 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
249 static void gen_excp_1(int exception
, int error_code
)
253 tmp1
= tcg_constant_i32(exception
);
254 tmp2
= tcg_constant_i32(error_code
);
255 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
258 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
260 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
261 gen_excp_1(exception
, error_code
);
262 return DISAS_NORETURN
;
265 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
267 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
270 static void gen_ldf(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
272 TCGv_i32 tmp32
= tcg_temp_new_i32();
273 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
);
274 gen_helper_memory_to_f(dest
, tmp32
);
275 tcg_temp_free_i32(tmp32
);
278 static void gen_ldg(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
280 TCGv tmp
= tcg_temp_new();
281 tcg_gen_qemu_ld_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEQ
);
282 gen_helper_memory_to_g(dest
, tmp
);
286 static void gen_lds(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
288 TCGv_i32 tmp32
= tcg_temp_new_i32();
289 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
);
290 gen_helper_memory_to_s(dest
, tmp32
);
291 tcg_temp_free_i32(tmp32
);
294 static void gen_ldt(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
296 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, MO_LEQ
);
299 static void gen_load_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
300 void (*func
)(DisasContext
*, TCGv
, TCGv
))
302 /* Loads to $f31 are prefetches, which we can treat as nops. */
303 if (likely(ra
!= 31)) {
304 TCGv addr
= tcg_temp_new();
305 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
306 func(ctx
, cpu_fir
[ra
], addr
);
311 static void gen_load_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
312 MemOp op
, bool clear
, bool locked
)
316 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
317 prefetches, which we can treat as nops. No worries about
318 missed exceptions here. */
319 if (unlikely(ra
== 31)) {
323 addr
= tcg_temp_new();
324 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
326 tcg_gen_andi_i64(addr
, addr
, ~0x7);
330 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, op
);
333 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
334 tcg_gen_mov_i64(cpu_lock_value
, dest
);
339 static void gen_stf(DisasContext
*ctx
, TCGv src
, TCGv addr
)
341 TCGv_i32 tmp32
= tcg_temp_new_i32();
342 gen_helper_f_to_memory(tmp32
, addr
);
343 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
);
344 tcg_temp_free_i32(tmp32
);
347 static void gen_stg(DisasContext
*ctx
, TCGv src
, TCGv addr
)
349 TCGv tmp
= tcg_temp_new();
350 gen_helper_g_to_memory(tmp
, src
);
351 tcg_gen_qemu_st_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEQ
);
355 static void gen_sts(DisasContext
*ctx
, TCGv src
, TCGv addr
)
357 TCGv_i32 tmp32
= tcg_temp_new_i32();
358 gen_helper_s_to_memory(tmp32
, src
);
359 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
);
360 tcg_temp_free_i32(tmp32
);
363 static void gen_stt(DisasContext
*ctx
, TCGv src
, TCGv addr
)
365 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, MO_LEQ
);
368 static void gen_store_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
369 void (*func
)(DisasContext
*, TCGv
, TCGv
))
371 TCGv addr
= tcg_temp_new();
372 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
373 func(ctx
, load_fpr(ctx
, ra
), addr
);
377 static void gen_store_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
378 MemOp op
, bool clear
)
382 addr
= tcg_temp_new();
383 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
385 tcg_gen_andi_i64(addr
, addr
, ~0x7);
388 src
= load_gpr(ctx
, ra
);
389 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, op
);
394 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
395 int32_t disp16
, int mem_idx
,
398 TCGLabel
*lab_fail
, *lab_done
;
401 addr
= tcg_temp_new_i64();
402 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
403 free_context_temps(ctx
);
405 lab_fail
= gen_new_label();
406 lab_done
= gen_new_label();
407 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
408 tcg_temp_free_i64(addr
);
410 val
= tcg_temp_new_i64();
411 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
412 load_gpr(ctx
, ra
), mem_idx
, op
);
413 free_context_temps(ctx
);
416 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
418 tcg_temp_free_i64(val
);
419 tcg_gen_br(lab_done
);
421 gen_set_label(lab_fail
);
423 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
426 gen_set_label(lab_done
);
427 tcg_gen_movi_i64(cpu_lock_addr
, -1);
431 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
433 return translator_use_goto_tb(&ctx
->base
, dest
);
436 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
438 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
441 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
444 /* Notice branch-to-next; used to initialize RA with the PC. */
447 } else if (use_goto_tb(ctx
, dest
)) {
449 tcg_gen_movi_i64(cpu_pc
, dest
);
450 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
451 return DISAS_NORETURN
;
453 tcg_gen_movi_i64(cpu_pc
, dest
);
454 return DISAS_PC_UPDATED
;
458 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
459 TCGv cmp
, int32_t disp
)
461 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
462 TCGLabel
*lab_true
= gen_new_label();
464 if (use_goto_tb(ctx
, dest
)) {
465 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
468 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
469 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
471 gen_set_label(lab_true
);
473 tcg_gen_movi_i64(cpu_pc
, dest
);
474 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
476 return DISAS_NORETURN
;
478 TCGv_i64 z
= load_zero(ctx
);
479 TCGv_i64 d
= tcg_constant_i64(dest
);
480 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
482 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
483 return DISAS_PC_UPDATED
;
487 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
488 int32_t disp
, int mask
)
491 TCGv tmp
= tcg_temp_new();
494 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
495 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
499 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
502 /* Fold -0.0 for comparison with COND. */
504 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
506 uint64_t mzero
= 1ull << 63;
511 /* For <= or >, the -0.0 value directly compares the way we want. */
512 tcg_gen_mov_i64(dest
, src
);
517 /* For == or !=, we can simply mask off the sign bit and compare. */
518 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
523 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
524 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
525 tcg_gen_neg_i64(dest
, dest
);
526 tcg_gen_and_i64(dest
, dest
, src
);
534 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
537 TCGv cmp_tmp
= tcg_temp_new();
540 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
541 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
542 tcg_temp_free(cmp_tmp
);
546 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
551 vb
= load_fpr(ctx
, rb
);
553 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
555 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
560 #define QUAL_RM_N 0x080 /* Round mode nearest even */
561 #define QUAL_RM_C 0x000 /* Round mode chopped */
562 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
563 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
564 #define QUAL_RM_MASK 0x0c0
566 #define QUAL_U 0x100 /* Underflow enable (fp output) */
567 #define QUAL_V 0x100 /* Overflow enable (int output) */
568 #define QUAL_S 0x400 /* Software completion enable */
569 #define QUAL_I 0x200 /* Inexact detection enable */
571 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
575 fn11
&= QUAL_RM_MASK
;
576 if (fn11
== ctx
->tb_rm
) {
581 tmp
= tcg_temp_new_i32();
584 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
587 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
590 tcg_gen_movi_i32(tmp
, float_round_down
);
593 tcg_gen_ld8u_i32(tmp
, cpu_env
,
594 offsetof(CPUAlphaState
, fpcr_dyn_round
));
598 #if defined(CONFIG_SOFTFLOAT_INLINE)
599 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
600 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
601 sets the one field. */
602 tcg_gen_st8_i32(tmp
, cpu_env
,
603 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
605 gen_helper_setroundmode(tmp
);
608 tcg_temp_free_i32(tmp
);
611 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
616 if (fn11
== ctx
->tb_ftz
) {
621 tmp
= tcg_temp_new_i32();
623 /* Underflow is enabled, use the FPCR setting. */
624 tcg_gen_ld8u_i32(tmp
, cpu_env
,
625 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
627 /* Underflow is disabled, force flush-to-zero. */
628 tcg_gen_movi_i32(tmp
, 1);
631 #if defined(CONFIG_SOFTFLOAT_INLINE)
632 tcg_gen_st8_i32(tmp
, cpu_env
,
633 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
635 gen_helper_setflushzero(tmp
);
638 tcg_temp_free_i32(tmp
);
641 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
645 if (unlikely(reg
== 31)) {
646 val
= load_zero(ctx
);
649 if ((fn11
& QUAL_S
) == 0) {
651 gen_helper_ieee_input_cmp(cpu_env
, val
);
653 gen_helper_ieee_input(cpu_env
, val
);
656 #ifndef CONFIG_USER_ONLY
657 /* In system mode, raise exceptions for denormals like real
658 hardware. In user mode, proceed as if the OS completion
659 handler is handling the denormal as per spec. */
660 gen_helper_ieee_input_s(cpu_env
, val
);
667 static void gen_fp_exc_raise(int rc
, int fn11
)
669 /* ??? We ought to be able to do something with imprecise exceptions.
670 E.g. notice we're still in the trap shadow of something within the
671 TB and do not generate the code to signal the exception; end the TB
672 when an exception is forced to arrive, either by consumption of a
673 register value or TRAPB or EXCB. */
677 if (!(fn11
& QUAL_U
)) {
678 /* Note that QUAL_U == QUAL_V, so ignore either. */
679 ignore
|= FPCR_UNF
| FPCR_IOV
;
681 if (!(fn11
& QUAL_I
)) {
684 ign
= tcg_constant_i32(ignore
);
686 /* ??? Pass in the regno of the destination so that the helper can
687 set EXC_MASK, which contains a bitmask of destination registers
688 that have caused arithmetic traps. A simple userspace emulation
689 does not require this. We do need it for a guest kernel's entArith,
690 or if we were to do something clever with imprecise exceptions. */
691 reg
= tcg_constant_i32(rc
+ 32);
693 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
695 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
699 static void gen_cvtlq(TCGv vc
, TCGv vb
)
701 TCGv tmp
= tcg_temp_new();
703 /* The arithmetic right shift here, plus the sign-extended mask below
704 yields a sign-extended result without an explicit ext32s_i64. */
705 tcg_gen_shri_i64(tmp
, vb
, 29);
706 tcg_gen_sari_i64(vc
, vb
, 32);
707 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
712 static void gen_ieee_arith2(DisasContext
*ctx
,
713 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
714 int rb
, int rc
, int fn11
)
718 gen_qual_roundmode(ctx
, fn11
);
719 gen_qual_flushzero(ctx
, fn11
);
721 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
722 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
724 gen_fp_exc_raise(rc
, fn11
);
727 #define IEEE_ARITH2(name) \
728 static inline void glue(gen_, name)(DisasContext *ctx, \
729 int rb, int rc, int fn11) \
731 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
738 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
742 /* No need to set flushzero, since we have an integer output. */
743 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
744 vc
= dest_fpr(ctx
, rc
);
746 /* Almost all integer conversions use cropped rounding;
747 special case that. */
748 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
749 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
751 gen_qual_roundmode(ctx
, fn11
);
752 gen_helper_cvttq(vc
, cpu_env
, vb
);
754 gen_fp_exc_raise(rc
, fn11
);
757 static void gen_ieee_intcvt(DisasContext
*ctx
,
758 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
759 int rb
, int rc
, int fn11
)
763 gen_qual_roundmode(ctx
, fn11
);
764 vb
= load_fpr(ctx
, rb
);
765 vc
= dest_fpr(ctx
, rc
);
767 /* The only exception that can be raised by integer conversion
768 is inexact. Thus we only need to worry about exceptions when
769 inexact handling is requested. */
771 helper(vc
, cpu_env
, vb
);
772 gen_fp_exc_raise(rc
, fn11
);
774 helper(vc
, cpu_env
, vb
);
778 #define IEEE_INTCVT(name) \
779 static inline void glue(gen_, name)(DisasContext *ctx, \
780 int rb, int rc, int fn11) \
782 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
787 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
789 TCGv vmask
= tcg_constant_i64(mask
);
790 TCGv tmp
= tcg_temp_new_i64();
793 tcg_gen_andc_i64(tmp
, vmask
, va
);
795 tcg_gen_and_i64(tmp
, va
, vmask
);
798 tcg_gen_andc_i64(vc
, vb
, vmask
);
799 tcg_gen_or_i64(vc
, vc
, tmp
);
804 static void gen_ieee_arith3(DisasContext
*ctx
,
805 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
806 int ra
, int rb
, int rc
, int fn11
)
810 gen_qual_roundmode(ctx
, fn11
);
811 gen_qual_flushzero(ctx
, fn11
);
813 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
814 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
815 vc
= dest_fpr(ctx
, rc
);
816 helper(vc
, cpu_env
, va
, vb
);
818 gen_fp_exc_raise(rc
, fn11
);
821 #define IEEE_ARITH3(name) \
822 static inline void glue(gen_, name)(DisasContext *ctx, \
823 int ra, int rb, int rc, int fn11) \
825 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
836 static void gen_ieee_compare(DisasContext
*ctx
,
837 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
838 int ra
, int rb
, int rc
, int fn11
)
842 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
843 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
844 vc
= dest_fpr(ctx
, rc
);
845 helper(vc
, cpu_env
, va
, vb
);
847 gen_fp_exc_raise(rc
, fn11
);
850 #define IEEE_CMP3(name) \
851 static inline void glue(gen_, name)(DisasContext *ctx, \
852 int ra, int rb, int rc, int fn11) \
854 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
861 static inline uint64_t zapnot_mask(uint8_t lit
)
866 for (i
= 0; i
< 8; ++i
) {
867 if ((lit
>> i
) & 1) {
868 mask
|= 0xffull
<< (i
* 8);
874 /* Implement zapnot with an immediate operand, which expands to some
875 form of immediate AND. This is a basic building block in the
876 definition of many of the other byte manipulation instructions. */
877 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
881 tcg_gen_movi_i64(dest
, 0);
884 tcg_gen_ext8u_i64(dest
, src
);
887 tcg_gen_ext16u_i64(dest
, src
);
890 tcg_gen_ext32u_i64(dest
, src
);
893 tcg_gen_mov_i64(dest
, src
);
896 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
901 /* EXTWH, EXTLH, EXTQH */
902 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
903 uint8_t lit
, uint8_t byte_mask
)
906 int pos
= (64 - lit
* 8) & 0x3f;
907 int len
= cto32(byte_mask
) * 8;
909 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
911 tcg_gen_movi_i64(vc
, 0);
914 TCGv tmp
= tcg_temp_new();
915 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
916 tcg_gen_neg_i64(tmp
, tmp
);
917 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
918 tcg_gen_shl_i64(vc
, va
, tmp
);
921 gen_zapnoti(vc
, vc
, byte_mask
);
924 /* EXTBL, EXTWL, EXTLL, EXTQL */
925 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
926 uint8_t lit
, uint8_t byte_mask
)
929 int pos
= (lit
& 7) * 8;
930 int len
= cto32(byte_mask
) * 8;
931 if (pos
+ len
>= 64) {
934 tcg_gen_extract_i64(vc
, va
, pos
, len
);
936 TCGv tmp
= tcg_temp_new();
937 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
938 tcg_gen_shli_i64(tmp
, tmp
, 3);
939 tcg_gen_shr_i64(vc
, va
, tmp
);
941 gen_zapnoti(vc
, vc
, byte_mask
);
945 /* INSWH, INSLH, INSQH */
946 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
947 uint8_t lit
, uint8_t byte_mask
)
950 int pos
= 64 - (lit
& 7) * 8;
951 int len
= cto32(byte_mask
) * 8;
953 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
955 tcg_gen_movi_i64(vc
, 0);
958 TCGv tmp
= tcg_temp_new();
959 TCGv shift
= tcg_temp_new();
961 /* The instruction description has us left-shift the byte mask
962 and extract bits <15:8> and apply that zap at the end. This
963 is equivalent to simply performing the zap first and shifting
965 gen_zapnoti(tmp
, va
, byte_mask
);
967 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
968 portably by splitting the shift into two parts: shift_count-1 and 1.
969 Arrange for the -1 by using ones-complement instead of
970 twos-complement in the negation: ~(B * 8) & 63. */
972 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
973 tcg_gen_not_i64(shift
, shift
);
974 tcg_gen_andi_i64(shift
, shift
, 0x3f);
976 tcg_gen_shr_i64(vc
, tmp
, shift
);
977 tcg_gen_shri_i64(vc
, vc
, 1);
978 tcg_temp_free(shift
);
983 /* INSBL, INSWL, INSLL, INSQL */
984 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
985 uint8_t lit
, uint8_t byte_mask
)
988 int pos
= (lit
& 7) * 8;
989 int len
= cto32(byte_mask
) * 8;
990 if (pos
+ len
> 64) {
993 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
995 TCGv tmp
= tcg_temp_new();
996 TCGv shift
= tcg_temp_new();
998 /* The instruction description has us left-shift the byte mask
999 and extract bits <15:8> and apply that zap at the end. This
1000 is equivalent to simply performing the zap first and shifting
1002 gen_zapnoti(tmp
, va
, byte_mask
);
1004 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1005 tcg_gen_shli_i64(shift
, shift
, 3);
1006 tcg_gen_shl_i64(vc
, tmp
, shift
);
1007 tcg_temp_free(shift
);
1012 /* MSKWH, MSKLH, MSKQH */
1013 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1014 uint8_t lit
, uint8_t byte_mask
)
1017 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
1019 TCGv shift
= tcg_temp_new();
1020 TCGv mask
= tcg_temp_new();
1022 /* The instruction description is as above, where the byte_mask
1023 is shifted left, and then we extract bits <15:8>. This can be
1024 emulated with a right-shift on the expanded byte mask. This
1025 requires extra care because for an input <2:0> == 0 we need a
1026 shift of 64 bits in order to generate a zero. This is done by
1027 splitting the shift into two parts, the variable shift - 1
1028 followed by a constant 1 shift. The code we expand below is
1029 equivalent to ~(B * 8) & 63. */
1031 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1032 tcg_gen_not_i64(shift
, shift
);
1033 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1034 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1035 tcg_gen_shr_i64(mask
, mask
, shift
);
1036 tcg_gen_shri_i64(mask
, mask
, 1);
1038 tcg_gen_andc_i64(vc
, va
, mask
);
1040 tcg_temp_free(mask
);
1041 tcg_temp_free(shift
);
1045 /* MSKBL, MSKWL, MSKLL, MSKQL */
1046 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1047 uint8_t lit
, uint8_t byte_mask
)
1050 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1052 TCGv shift
= tcg_temp_new();
1053 TCGv mask
= tcg_temp_new();
1055 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1056 tcg_gen_shli_i64(shift
, shift
, 3);
1057 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1058 tcg_gen_shl_i64(mask
, mask
, shift
);
1060 tcg_gen_andc_i64(vc
, va
, mask
);
1062 tcg_temp_free(mask
);
1063 tcg_temp_free(shift
);
1067 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1070 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1073 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1076 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1078 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1079 to internal cpu registers. */
1081 /* Unprivileged PAL call */
1082 if (palcode
>= 0x80 && palcode
< 0xC0) {
1086 /* No-op inside QEMU. */
1090 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1091 offsetof(CPUAlphaState
, unique
));
1095 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1096 offsetof(CPUAlphaState
, unique
));
1105 #ifndef CONFIG_USER_ONLY
1106 /* Privileged PAL code */
1107 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1111 /* No-op inside QEMU. */
1115 /* No-op inside QEMU. */
1119 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1120 offsetof(CPUAlphaState
, vptptr
));
1124 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1125 offsetof(CPUAlphaState
, sysval
));
1129 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1130 offsetof(CPUAlphaState
, sysval
));
1135 /* Note that we already know we're in kernel mode, so we know
1136 that PS only contains the 3 IPL bits. */
1137 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1139 /* But make sure and store only the 3 IPL bits from the user. */
1141 TCGv tmp
= tcg_temp_new();
1142 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1143 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1147 /* Allow interrupts to be recognized right away. */
1148 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1149 return DISAS_PC_UPDATED_NOCHAIN
;
1153 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1158 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1159 offsetof(CPUAlphaState
, usp
));
1163 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1164 offsetof(CPUAlphaState
, usp
));
1168 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1169 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1174 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1175 -offsetof(AlphaCPU
, env
) +
1176 offsetof(CPUState
, halted
));
1177 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1178 return gen_excp(ctx
, EXCP_HALTED
, 0);
1187 return gen_invalid(ctx
);
1190 #ifdef CONFIG_USER_ONLY
1191 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1194 TCGv tmp
= tcg_temp_new();
1195 uint64_t exc_addr
= ctx
->base
.pc_next
;
1196 uint64_t entry
= ctx
->palbr
;
1198 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1201 tcg_gen_movi_i64(tmp
, 1);
1202 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1205 tcg_gen_movi_i64(tmp
, exc_addr
);
1206 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1209 entry
+= (palcode
& 0x80
1210 ? 0x2000 + (palcode
- 0x80) * 64
1211 : 0x1000 + palcode
* 64);
1213 tcg_gen_movi_i64(cpu_pc
, entry
);
1214 return DISAS_PC_UPDATED
;
1219 #ifndef CONFIG_USER_ONLY
1221 #define PR_LONG 0x200000
1223 static int cpu_pr_data(int pr
)
1226 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1227 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1228 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1229 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1230 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1231 case 7: return offsetof(CPUAlphaState
, palbr
);
1232 case 8: return offsetof(CPUAlphaState
, ptbr
);
1233 case 9: return offsetof(CPUAlphaState
, vptptr
);
1234 case 10: return offsetof(CPUAlphaState
, unique
);
1235 case 11: return offsetof(CPUAlphaState
, sysval
);
1236 case 12: return offsetof(CPUAlphaState
, usp
);
1239 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1242 return offsetof(CPUAlphaState
, alarm_expire
);
1247 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1249 void (*helper
)(TCGv
);
1254 /* Accessing the "non-shadow" general registers. */
1255 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1256 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1259 case 250: /* WALLTIME */
1260 helper
= gen_helper_get_walltime
;
1262 case 249: /* VMTIME */
1263 helper
= gen_helper_get_vmtime
;
1265 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1268 return DISAS_PC_STALE
;
1275 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1278 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1282 /* The basic registers are data only, and unknown registers
1283 are read-zero, write-ignore. */
1284 data
= cpu_pr_data(regno
);
1286 tcg_gen_movi_i64(va
, 0);
1287 } else if (data
& PR_LONG
) {
1288 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1290 tcg_gen_ld_i64(va
, cpu_env
, data
);
1298 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1301 DisasJumpType ret
= DISAS_NEXT
;
1306 gen_helper_tbia(cpu_env
);
1311 gen_helper_tbis(cpu_env
, vb
);
1316 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1317 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1318 return gen_excp(ctx
, EXCP_HALTED
, 0);
1322 gen_helper_halt(vb
);
1323 return DISAS_PC_STALE
;
1327 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
1329 ret
= DISAS_PC_STALE
;
1331 gen_helper_set_alarm(cpu_env
, vb
);
1336 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1337 /* Changing the PAL base register implies un-chaining all of the TBs
1338 that ended with a CALL_PAL. Since the base register usually only
1339 changes during boot, flushing everything works well. */
1340 gen_helper_tb_flush(cpu_env
);
1341 return DISAS_PC_STALE
;
1344 /* Accessing the "non-shadow" general registers. */
1345 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1346 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1350 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1353 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1357 /* The basic registers are data only, and unknown registers
1358 are read-zero, write-ignore. */
1359 data
= cpu_pr_data(regno
);
1361 if (data
& PR_LONG
) {
1362 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1364 tcg_gen_st_i64(vb
, cpu_env
, data
);
1372 #endif /* !USER_ONLY*/
1374 #define REQUIRE_NO_LIT \
1381 #define REQUIRE_AMASK(FLAG) \
1383 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1388 #define REQUIRE_TB_FLAG(FLAG) \
1390 if ((ctx->tbflags & (FLAG)) == 0) { \
1395 #define REQUIRE_REG_31(WHICH) \
1397 if (WHICH != 31) { \
1402 #define REQUIRE_FEN \
1404 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1409 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1411 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1413 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1414 bool islit
, real_islit
;
1415 TCGv va
, vb
, vc
, tmp
, tmp2
;
1419 /* Decode all instruction fields */
1420 opc
= extract32(insn
, 26, 6);
1421 ra
= extract32(insn
, 21, 5);
1422 rb
= extract32(insn
, 16, 5);
1423 rc
= extract32(insn
, 0, 5);
1424 real_islit
= islit
= extract32(insn
, 12, 1);
1425 lit
= extract32(insn
, 13, 8);
1427 disp21
= sextract32(insn
, 0, 21);
1428 disp16
= sextract32(insn
, 0, 16);
1429 disp12
= sextract32(insn
, 0, 12);
1431 fn11
= extract32(insn
, 5, 11);
1432 fpfn
= extract32(insn
, 5, 6);
1433 fn7
= extract32(insn
, 5, 7);
1435 if (rb
== 31 && !islit
) {
1444 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1470 disp16
= (uint32_t)disp16
<< 16;
1474 va
= dest_gpr(ctx
, ra
);
1475 /* It's worth special-casing immediate loads. */
1477 tcg_gen_movi_i64(va
, disp16
);
1479 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1486 gen_load_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0, 0);
1490 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEQ
, 1, 0);
1495 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0, 0);
1500 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0);
1505 gen_store_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0);
1509 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEQ
, 1);
1513 vc
= dest_gpr(ctx
, rc
);
1514 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1518 /* Special case ADDL as SEXTL. */
1519 tcg_gen_ext32s_i64(vc
, vb
);
1523 /* Special case SUBQ as NEGQ. */
1524 tcg_gen_neg_i64(vc
, vb
);
1529 va
= load_gpr(ctx
, ra
);
1533 tcg_gen_add_i64(vc
, va
, vb
);
1534 tcg_gen_ext32s_i64(vc
, vc
);
1538 tmp
= tcg_temp_new();
1539 tcg_gen_shli_i64(tmp
, va
, 2);
1540 tcg_gen_add_i64(tmp
, tmp
, vb
);
1541 tcg_gen_ext32s_i64(vc
, tmp
);
1546 tcg_gen_sub_i64(vc
, va
, vb
);
1547 tcg_gen_ext32s_i64(vc
, vc
);
1551 tmp
= tcg_temp_new();
1552 tcg_gen_shli_i64(tmp
, va
, 2);
1553 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1554 tcg_gen_ext32s_i64(vc
, tmp
);
1560 /* Special case 0 >= X as X == 0. */
1561 gen_helper_cmpbe0(vc
, vb
);
1563 gen_helper_cmpbge(vc
, va
, vb
);
1568 tmp
= tcg_temp_new();
1569 tcg_gen_shli_i64(tmp
, va
, 3);
1570 tcg_gen_add_i64(tmp
, tmp
, vb
);
1571 tcg_gen_ext32s_i64(vc
, tmp
);
1576 tmp
= tcg_temp_new();
1577 tcg_gen_shli_i64(tmp
, va
, 3);
1578 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1579 tcg_gen_ext32s_i64(vc
, tmp
);
1584 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1588 tcg_gen_add_i64(vc
, va
, vb
);
1592 tmp
= tcg_temp_new();
1593 tcg_gen_shli_i64(tmp
, va
, 2);
1594 tcg_gen_add_i64(vc
, tmp
, vb
);
1599 tcg_gen_sub_i64(vc
, va
, vb
);
1603 tmp
= tcg_temp_new();
1604 tcg_gen_shli_i64(tmp
, va
, 2);
1605 tcg_gen_sub_i64(vc
, tmp
, vb
);
1610 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1614 tmp
= tcg_temp_new();
1615 tcg_gen_shli_i64(tmp
, va
, 3);
1616 tcg_gen_add_i64(vc
, tmp
, vb
);
1621 tmp
= tcg_temp_new();
1622 tcg_gen_shli_i64(tmp
, va
, 3);
1623 tcg_gen_sub_i64(vc
, tmp
, vb
);
1628 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1632 tmp
= tcg_temp_new();
1633 tcg_gen_ext32s_i64(tmp
, va
);
1634 tcg_gen_ext32s_i64(vc
, vb
);
1635 tcg_gen_add_i64(tmp
, tmp
, vc
);
1636 tcg_gen_ext32s_i64(vc
, tmp
);
1637 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1642 tmp
= tcg_temp_new();
1643 tcg_gen_ext32s_i64(tmp
, va
);
1644 tcg_gen_ext32s_i64(vc
, vb
);
1645 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1646 tcg_gen_ext32s_i64(vc
, tmp
);
1647 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1652 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1656 tmp
= tcg_temp_new();
1657 tmp2
= tcg_temp_new();
1658 tcg_gen_eqv_i64(tmp
, va
, vb
);
1659 tcg_gen_mov_i64(tmp2
, va
);
1660 tcg_gen_add_i64(vc
, va
, vb
);
1661 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1662 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1663 tcg_gen_shri_i64(tmp
, tmp
, 63);
1664 tcg_gen_movi_i64(tmp2
, 0);
1665 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1667 tcg_temp_free(tmp2
);
1671 tmp
= tcg_temp_new();
1672 tmp2
= tcg_temp_new();
1673 tcg_gen_xor_i64(tmp
, va
, vb
);
1674 tcg_gen_mov_i64(tmp2
, va
);
1675 tcg_gen_sub_i64(vc
, va
, vb
);
1676 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1677 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1678 tcg_gen_shri_i64(tmp
, tmp
, 63);
1679 tcg_gen_movi_i64(tmp2
, 0);
1680 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1682 tcg_temp_free(tmp2
);
1686 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1696 /* Special case BIS as NOP. */
1700 /* Special case BIS as MOV. */
1701 vc
= dest_gpr(ctx
, rc
);
1703 tcg_gen_movi_i64(vc
, lit
);
1705 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1711 vc
= dest_gpr(ctx
, rc
);
1712 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1714 if (fn7
== 0x28 && ra
== 31) {
1715 /* Special case ORNOT as NOT. */
1716 tcg_gen_not_i64(vc
, vb
);
1720 va
= load_gpr(ctx
, ra
);
1724 tcg_gen_and_i64(vc
, va
, vb
);
1728 tcg_gen_andc_i64(vc
, va
, vb
);
1732 tmp
= tcg_temp_new();
1733 tcg_gen_andi_i64(tmp
, va
, 1);
1734 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1735 vb
, load_gpr(ctx
, rc
));
1740 tmp
= tcg_temp_new();
1741 tcg_gen_andi_i64(tmp
, va
, 1);
1742 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1743 vb
, load_gpr(ctx
, rc
));
1748 tcg_gen_or_i64(vc
, va
, vb
);
1752 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1753 vb
, load_gpr(ctx
, rc
));
1757 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1758 vb
, load_gpr(ctx
, rc
));
1762 tcg_gen_orc_i64(vc
, va
, vb
);
1766 tcg_gen_xor_i64(vc
, va
, vb
);
1770 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1771 vb
, load_gpr(ctx
, rc
));
1775 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1776 vb
, load_gpr(ctx
, rc
));
1780 tcg_gen_eqv_i64(vc
, va
, vb
);
1785 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1789 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1790 vb
, load_gpr(ctx
, rc
));
1794 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1795 vb
, load_gpr(ctx
, rc
));
1800 tcg_gen_movi_i64(vc
, ctx
->implver
);
1808 vc
= dest_gpr(ctx
, rc
);
1809 va
= load_gpr(ctx
, ra
);
1813 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1817 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1821 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1825 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1829 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1833 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1837 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1841 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1845 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1850 gen_zapnoti(vc
, va
, ~lit
);
1852 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1858 gen_zapnoti(vc
, va
, lit
);
1860 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1865 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1870 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1872 tmp
= tcg_temp_new();
1873 vb
= load_gpr(ctx
, rb
);
1874 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1875 tcg_gen_shr_i64(vc
, va
, tmp
);
1881 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1886 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1888 tmp
= tcg_temp_new();
1889 vb
= load_gpr(ctx
, rb
);
1890 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1891 tcg_gen_shl_i64(vc
, va
, tmp
);
1897 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1902 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1904 tmp
= tcg_temp_new();
1905 vb
= load_gpr(ctx
, rb
);
1906 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1907 tcg_gen_sar_i64(vc
, va
, tmp
);
1913 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1917 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1921 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1925 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1929 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1933 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1937 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1941 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1945 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1953 vc
= dest_gpr(ctx
, rc
);
1954 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1955 va
= load_gpr(ctx
, ra
);
1959 tcg_gen_mul_i64(vc
, va
, vb
);
1960 tcg_gen_ext32s_i64(vc
, vc
);
1964 tcg_gen_mul_i64(vc
, va
, vb
);
1968 tmp
= tcg_temp_new();
1969 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1974 tmp
= tcg_temp_new();
1975 tcg_gen_ext32s_i64(tmp
, va
);
1976 tcg_gen_ext32s_i64(vc
, vb
);
1977 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1978 tcg_gen_ext32s_i64(vc
, tmp
);
1979 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1984 tmp
= tcg_temp_new();
1985 tmp2
= tcg_temp_new();
1986 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1987 tcg_gen_sari_i64(tmp2
, vc
, 63);
1988 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1990 tcg_temp_free(tmp2
);
1999 vc
= dest_fpr(ctx
, rc
);
2000 switch (fpfn
) { /* fn11 & 0x3F */
2005 t32
= tcg_temp_new_i32();
2006 va
= load_gpr(ctx
, ra
);
2007 tcg_gen_extrl_i64_i32(t32
, va
);
2008 gen_helper_memory_to_s(vc
, t32
);
2009 tcg_temp_free_i32(t32
);
2015 vb
= load_fpr(ctx
, rb
);
2016 gen_helper_sqrtf(vc
, cpu_env
, vb
);
2022 gen_sqrts(ctx
, rb
, rc
, fn11
);
2028 t32
= tcg_temp_new_i32();
2029 va
= load_gpr(ctx
, ra
);
2030 tcg_gen_extrl_i64_i32(t32
, va
);
2031 gen_helper_memory_to_f(vc
, t32
);
2032 tcg_temp_free_i32(t32
);
2038 va
= load_gpr(ctx
, ra
);
2039 tcg_gen_mov_i64(vc
, va
);
2045 vb
= load_fpr(ctx
, rb
);
2046 gen_helper_sqrtg(vc
, cpu_env
, vb
);
2052 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2060 /* VAX floating point */
2061 /* XXX: rounding mode and trap are ignored (!) */
2062 vc
= dest_fpr(ctx
, rc
);
2063 vb
= load_fpr(ctx
, rb
);
2064 va
= load_fpr(ctx
, ra
);
2065 switch (fpfn
) { /* fn11 & 0x3F */
2069 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2074 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2079 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2084 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2093 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2098 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2103 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2108 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2113 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2118 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2123 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2129 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2139 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2145 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2151 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2159 /* IEEE floating-point */
2160 switch (fpfn
) { /* fn11 & 0x3F */
2164 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2169 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2174 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2179 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2184 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2189 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2194 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2199 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2204 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2209 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2214 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2219 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2224 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2226 gen_cvtst(ctx
, rb
, rc
, fn11
);
2229 gen_cvtts(ctx
, rb
, rc
, fn11
);
2236 gen_cvttq(ctx
, rb
, rc
, fn11
);
2242 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2248 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2261 vc
= dest_fpr(ctx
, rc
);
2262 vb
= load_fpr(ctx
, rb
);
2269 /* Special case CPYS as FNOP. */
2271 vc
= dest_fpr(ctx
, rc
);
2272 va
= load_fpr(ctx
, ra
);
2274 /* Special case CPYS as FMOV. */
2275 tcg_gen_mov_i64(vc
, va
);
2277 vb
= load_fpr(ctx
, rb
);
2278 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2285 vc
= dest_fpr(ctx
, rc
);
2286 vb
= load_fpr(ctx
, rb
);
2287 va
= load_fpr(ctx
, ra
);
2288 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2293 vc
= dest_fpr(ctx
, rc
);
2294 vb
= load_fpr(ctx
, rb
);
2295 va
= load_fpr(ctx
, ra
);
2296 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2301 va
= load_fpr(ctx
, ra
);
2302 gen_helper_store_fpcr(cpu_env
, va
);
2303 if (ctx
->tb_rm
== QUAL_RM_D
) {
2304 /* Re-do the copy of the rounding mode to fp_status
2305 the next time we use dynamic rounding. */
2312 va
= dest_fpr(ctx
, ra
);
2313 gen_helper_load_fpcr(va
, cpu_env
);
2318 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2323 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2328 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2333 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2338 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2343 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2345 case 0x030: /* CVTQL */
2346 case 0x130: /* CVTQL/V */
2347 case 0x530: /* CVTQL/SV */
2350 vc
= dest_fpr(ctx
, rc
);
2351 vb
= load_fpr(ctx
, rb
);
2352 gen_helper_cvtql(vc
, cpu_env
, vb
);
2353 gen_fp_exc_raise(rc
, fn11
);
2361 switch ((uint16_t)disp16
) {
2372 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2376 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2388 va
= dest_gpr(ctx
, ra
);
2389 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2391 gen_helper_load_pcc(va
, cpu_env
);
2392 ret
= DISAS_PC_STALE
;
2394 gen_helper_load_pcc(va
, cpu_env
);
2422 /* HW_MFPR (PALcode) */
2423 #ifndef CONFIG_USER_ONLY
2424 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2425 va
= dest_gpr(ctx
, ra
);
2426 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2433 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2434 prediction stack action, which of course we don't implement. */
2435 vb
= load_gpr(ctx
, rb
);
2436 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2438 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2440 ret
= DISAS_PC_UPDATED
;
2444 /* HW_LD (PALcode) */
2445 #ifndef CONFIG_USER_ONLY
2446 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2448 TCGv addr
= tcg_temp_new();
2449 vb
= load_gpr(ctx
, rb
);
2450 va
= dest_gpr(ctx
, ra
);
2452 tcg_gen_addi_i64(addr
, vb
, disp12
);
2453 switch ((insn
>> 12) & 0xF) {
2455 /* Longword physical access (hw_ldl/p) */
2456 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2459 /* Quadword physical access (hw_ldq/p) */
2460 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2463 /* Longword physical access with lock (hw_ldl_l/p) */
2464 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
);
2465 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2466 tcg_gen_mov_i64(cpu_lock_value
, va
);
2469 /* Quadword physical access with lock (hw_ldq_l/p) */
2470 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEQ
);
2471 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2472 tcg_gen_mov_i64(cpu_lock_value
, va
);
2475 /* Longword virtual PTE fetch (hw_ldl/v) */
2478 /* Quadword virtual PTE fetch (hw_ldq/v) */
2488 /* Longword virtual access (hw_ldl) */
2491 /* Quadword virtual access (hw_ldq) */
2494 /* Longword virtual access with protection check (hw_ldl/w) */
2495 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LESL
);
2498 /* Quadword virtual access with protection check (hw_ldq/w) */
2499 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
, MO_LEQ
);
2502 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2505 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2508 /* Longword virtual access with alternate access mode and
2509 protection checks (hw_ldl/wa) */
2510 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LESL
);
2513 /* Quadword virtual access with alternate access mode and
2514 protection checks (hw_ldq/wa) */
2515 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
, MO_LEQ
);
2518 tcg_temp_free(addr
);
2526 vc
= dest_gpr(ctx
, rc
);
2531 va
= load_fpr(ctx
, ra
);
2532 tcg_gen_mov_i64(vc
, va
);
2534 } else if (fn7
== 0x78) {
2538 t32
= tcg_temp_new_i32();
2539 va
= load_fpr(ctx
, ra
);
2540 gen_helper_s_to_memory(t32
, va
);
2541 tcg_gen_ext_i32_i64(vc
, t32
);
2542 tcg_temp_free_i32(t32
);
2546 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2552 tcg_gen_ext8s_i64(vc
, vb
);
2558 tcg_gen_ext16s_i64(vc
, vb
);
2565 tcg_gen_ctpop_i64(vc
, vb
);
2571 va
= load_gpr(ctx
, ra
);
2572 gen_helper_perr(vc
, va
, vb
);
2579 tcg_gen_clzi_i64(vc
, vb
, 64);
2586 tcg_gen_ctzi_i64(vc
, vb
, 64);
2593 gen_helper_unpkbw(vc
, vb
);
2600 gen_helper_unpkbl(vc
, vb
);
2607 gen_helper_pkwb(vc
, vb
);
2614 gen_helper_pklb(vc
, vb
);
2619 va
= load_gpr(ctx
, ra
);
2620 gen_helper_minsb8(vc
, va
, vb
);
2625 va
= load_gpr(ctx
, ra
);
2626 gen_helper_minsw4(vc
, va
, vb
);
2631 va
= load_gpr(ctx
, ra
);
2632 gen_helper_minub8(vc
, va
, vb
);
2637 va
= load_gpr(ctx
, ra
);
2638 gen_helper_minuw4(vc
, va
, vb
);
2643 va
= load_gpr(ctx
, ra
);
2644 gen_helper_maxub8(vc
, va
, vb
);
2649 va
= load_gpr(ctx
, ra
);
2650 gen_helper_maxuw4(vc
, va
, vb
);
2655 va
= load_gpr(ctx
, ra
);
2656 gen_helper_maxsb8(vc
, va
, vb
);
2661 va
= load_gpr(ctx
, ra
);
2662 gen_helper_maxsw4(vc
, va
, vb
);
2670 /* HW_MTPR (PALcode) */
2671 #ifndef CONFIG_USER_ONLY
2672 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2673 vb
= load_gpr(ctx
, rb
);
2674 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2681 /* HW_RET (PALcode) */
2682 #ifndef CONFIG_USER_ONLY
2683 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2685 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2686 address from EXC_ADDR. This turns out to be useful for our
2687 emulation PALcode, so continue to accept it. */
2688 vb
= dest_sink(ctx
);
2689 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2691 vb
= load_gpr(ctx
, rb
);
2693 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2694 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2695 tmp
= tcg_temp_new();
2696 tcg_gen_andi_i64(tmp
, vb
, 1);
2697 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2699 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2700 /* Allow interrupts to be recognized right away. */
2701 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2708 /* HW_ST (PALcode) */
2709 #ifndef CONFIG_USER_ONLY
2710 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2712 switch ((insn
>> 12) & 0xF) {
2714 /* Longword physical access */
2715 va
= load_gpr(ctx
, ra
);
2716 vb
= load_gpr(ctx
, rb
);
2717 tmp
= tcg_temp_new();
2718 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2719 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
);
2723 /* Quadword physical access */
2724 va
= load_gpr(ctx
, ra
);
2725 vb
= load_gpr(ctx
, rb
);
2726 tmp
= tcg_temp_new();
2727 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2728 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEQ
);
2732 /* Longword physical access with lock */
2733 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2734 MMU_PHYS_IDX
, MO_LESL
);
2737 /* Quadword physical access with lock */
2738 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2739 MMU_PHYS_IDX
, MO_LEQ
);
2742 /* Longword virtual access */
2745 /* Quadword virtual access */
2766 /* Longword virtual access with alternate access mode */
2769 /* Quadword virtual access with alternate access mode */
2786 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldf
);
2791 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldg
);
2796 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_lds
);
2801 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldt
);
2806 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stf
);
2811 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stg
);
2816 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_sts
);
2821 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stt
);
2825 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 0);
2829 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEQ
, 0, 0);
2833 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 1);
2837 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEQ
, 0, 1);
2841 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUL
, 0);
2845 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEQ
, 0);
2849 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2850 ctx
->mem_idx
, MO_LESL
);
2854 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2855 ctx
->mem_idx
, MO_LEQ
);
2859 ret
= gen_bdirect(ctx
, ra
, disp21
);
2861 case 0x31: /* FBEQ */
2863 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2865 case 0x32: /* FBLT */
2867 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2869 case 0x33: /* FBLE */
2871 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2875 ret
= gen_bdirect(ctx
, ra
, disp21
);
2877 case 0x35: /* FBNE */
2879 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2881 case 0x36: /* FBGE */
2883 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2885 case 0x37: /* FBGT */
2887 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2891 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2895 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2899 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2903 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2907 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2911 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2915 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2919 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2922 ret
= gen_invalid(ctx
);
2925 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2932 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2934 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2935 CPUAlphaState
*env
= cpu
->env_ptr
;
2938 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2939 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2940 ctx
->implver
= env
->implver
;
2941 ctx
->amask
= env
->amask
;
2943 #ifdef CONFIG_USER_ONLY
2944 ctx
->ir
= cpu_std_ir
;
2946 ctx
->palbr
= env
->palbr
;
2947 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2950 /* ??? Every TB begins with unset rounding mode, to be initialized on
2951 the first fp insn of the TB. Alternately we could define a proper
2952 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2953 to reset the FP_STATUS to that default at the end of any TB that
2954 changes the default. We could even (gasp) dynamiclly figure out
2955 what default would be most efficient given the running program. */
2957 /* Similarly for flush-to-zero. */
2963 /* Bound the number of insns to execute to those left on the page. */
2964 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2965 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2968 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2972 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2974 tcg_gen_insn_start(dcbase
->pc_next
);
2977 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2979 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2980 CPUAlphaState
*env
= cpu
->env_ptr
;
2981 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
2983 ctx
->base
.pc_next
+= 4;
2984 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2986 free_context_temps(ctx
);
2987 translator_loop_temp_check(&ctx
->base
);
2990 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2992 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2994 switch (ctx
->base
.is_jmp
) {
2995 case DISAS_NORETURN
:
2997 case DISAS_TOO_MANY
:
2998 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
3000 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3001 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
3004 case DISAS_PC_STALE
:
3005 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
3007 case DISAS_PC_UPDATED
:
3008 if (!ctx
->base
.singlestep_enabled
) {
3009 tcg_gen_lookup_and_goto_ptr();
3013 case DISAS_PC_UPDATED_NOCHAIN
:
3014 if (ctx
->base
.singlestep_enabled
) {
3015 gen_excp_1(EXCP_DEBUG
, 0);
3017 tcg_gen_exit_tb(NULL
, 0);
3021 g_assert_not_reached();
3025 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
, CPUState
*cpu
)
3027 qemu_log("IN: %s\n", lookup_symbol(dcbase
->pc_first
));
3028 log_target_disas(cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
3031 static const TranslatorOps alpha_tr_ops
= {
3032 .init_disas_context
= alpha_tr_init_disas_context
,
3033 .tb_start
= alpha_tr_tb_start
,
3034 .insn_start
= alpha_tr_insn_start
,
3035 .translate_insn
= alpha_tr_translate_insn
,
3036 .tb_stop
= alpha_tr_tb_stop
,
3037 .disas_log
= alpha_tr_disas_log
,
3040 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
3043 translator_loop(&alpha_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
3046 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
,