2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "sysemu/cpus.h"
23 #include "disas/disas.h"
24 #include "qemu/host-utils.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
37 #undef ALPHA_DEBUG_DISAS
38 #define CONFIG_SOFTFLOAT_INLINE
40 #ifdef ALPHA_DEBUG_DISAS
41 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
43 # define LOG_DISAS(...) do { } while (0)
46 typedef struct DisasContext DisasContext
;
48 DisasContextBase base
;
50 #ifdef CONFIG_USER_ONLY
58 /* implver and amask values for this CPU. */
62 /* Current rounding mode for this TB. */
64 /* Current flush-to-zero setting for this TB. */
67 /* The set of registers active in the current context. */
70 /* Temporaries for $31 and $f31 as source and destination. */
75 #ifdef CONFIG_USER_ONLY
76 #define UNALIGN(C) (C)->unalign
78 #define UNALIGN(C) MO_ALIGN
81 /* Target-specific return values from translate_one, indicating the
82 state of the TB. Note that DISAS_NEXT indicates that we are not
84 #define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0
85 #define DISAS_PC_UPDATED DISAS_TARGET_1
86 #define DISAS_PC_STALE DISAS_TARGET_2
88 /* global register indexes */
89 static TCGv cpu_std_ir
[31];
90 static TCGv cpu_fir
[31];
92 static TCGv cpu_lock_addr
;
93 static TCGv cpu_lock_value
;
95 #ifndef CONFIG_USER_ONLY
96 static TCGv cpu_pal_ir
[31];
99 void alpha_translate_init(void)
101 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUAlphaState, V) }
103 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
104 static const GlobalVar vars
[] = {
112 /* Use the symbolic register names that match the disassembler. */
113 static const char greg_names
[31][4] = {
114 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
115 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
116 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
117 "t10", "t11", "ra", "t12", "at", "gp", "sp"
119 static const char freg_names
[31][4] = {
120 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
121 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
122 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
123 "f24", "f25", "f26", "f27", "f28", "f29", "f30"
125 #ifndef CONFIG_USER_ONLY
126 static const char shadow_names
[8][8] = {
127 "pal_t7", "pal_s0", "pal_s1", "pal_s2",
128 "pal_s3", "pal_s4", "pal_s5", "pal_t11"
134 for (i
= 0; i
< 31; i
++) {
135 cpu_std_ir
[i
] = tcg_global_mem_new_i64(cpu_env
,
136 offsetof(CPUAlphaState
, ir
[i
]),
140 for (i
= 0; i
< 31; i
++) {
141 cpu_fir
[i
] = tcg_global_mem_new_i64(cpu_env
,
142 offsetof(CPUAlphaState
, fir
[i
]),
146 #ifndef CONFIG_USER_ONLY
147 memcpy(cpu_pal_ir
, cpu_std_ir
, sizeof(cpu_pal_ir
));
148 for (i
= 0; i
< 8; i
++) {
149 int r
= (i
== 7 ? 25 : i
+ 8);
150 cpu_pal_ir
[r
] = tcg_global_mem_new_i64(cpu_env
,
151 offsetof(CPUAlphaState
,
157 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
158 const GlobalVar
*v
= &vars
[i
];
159 *v
->var
= tcg_global_mem_new_i64(cpu_env
, v
->ofs
, v
->name
);
163 static TCGv
load_zero(DisasContext
*ctx
)
166 ctx
->zero
= tcg_constant_i64(0);
171 static TCGv
dest_sink(DisasContext
*ctx
)
174 ctx
->sink
= tcg_temp_new();
179 static void free_context_temps(DisasContext
*ctx
)
182 tcg_gen_discard_i64(ctx
->sink
);
187 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
189 if (likely(reg
< 31)) {
192 return load_zero(ctx
);
196 static TCGv
load_gpr_lit(DisasContext
*ctx
, unsigned reg
,
197 uint8_t lit
, bool islit
)
200 return tcg_constant_i64(lit
);
201 } else if (likely(reg
< 31)) {
204 return load_zero(ctx
);
208 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
210 if (likely(reg
< 31)) {
213 return dest_sink(ctx
);
217 static TCGv
load_fpr(DisasContext
*ctx
, unsigned reg
)
219 if (likely(reg
< 31)) {
222 return load_zero(ctx
);
226 static TCGv
dest_fpr(DisasContext
*ctx
, unsigned reg
)
228 if (likely(reg
< 31)) {
231 return dest_sink(ctx
);
235 static int get_flag_ofs(unsigned shift
)
237 int ofs
= offsetof(CPUAlphaState
, flags
);
239 ofs
+= 3 - (shift
/ 8);
246 static void ld_flag_byte(TCGv val
, unsigned shift
)
248 tcg_gen_ld8u_i64(val
, cpu_env
, get_flag_ofs(shift
));
251 static void st_flag_byte(TCGv val
, unsigned shift
)
253 tcg_gen_st8_i64(val
, cpu_env
, get_flag_ofs(shift
));
256 static void gen_excp_1(int exception
, int error_code
)
260 tmp1
= tcg_constant_i32(exception
);
261 tmp2
= tcg_constant_i32(error_code
);
262 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
265 static DisasJumpType
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
267 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
268 gen_excp_1(exception
, error_code
);
269 return DISAS_NORETURN
;
272 static inline DisasJumpType
gen_invalid(DisasContext
*ctx
)
274 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
277 static void gen_ldf(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
279 TCGv_i32 tmp32
= tcg_temp_new_i32();
280 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
281 gen_helper_memory_to_f(dest
, tmp32
);
284 static void gen_ldg(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
286 TCGv tmp
= tcg_temp_new();
287 tcg_gen_qemu_ld_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
288 gen_helper_memory_to_g(dest
, tmp
);
291 static void gen_lds(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
293 TCGv_i32 tmp32
= tcg_temp_new_i32();
294 tcg_gen_qemu_ld_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
295 gen_helper_memory_to_s(dest
, tmp32
);
298 static void gen_ldt(DisasContext
*ctx
, TCGv dest
, TCGv addr
)
300 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
303 static void gen_load_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
304 void (*func
)(DisasContext
*, TCGv
, TCGv
))
306 /* Loads to $f31 are prefetches, which we can treat as nops. */
307 if (likely(ra
!= 31)) {
308 TCGv addr
= tcg_temp_new();
309 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
310 func(ctx
, cpu_fir
[ra
], addr
);
314 static void gen_load_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
315 MemOp op
, bool clear
, bool locked
)
319 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
320 prefetches, which we can treat as nops. No worries about
321 missed exceptions here. */
322 if (unlikely(ra
== 31)) {
326 addr
= tcg_temp_new();
327 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
329 tcg_gen_andi_i64(addr
, addr
, ~0x7);
330 } else if (!locked
) {
335 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mem_idx
, op
);
338 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
339 tcg_gen_mov_i64(cpu_lock_value
, dest
);
343 static void gen_stf(DisasContext
*ctx
, TCGv src
, TCGv addr
)
345 TCGv_i32 tmp32
= tcg_temp_new_i32();
346 gen_helper_f_to_memory(tmp32
, addr
);
347 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
350 static void gen_stg(DisasContext
*ctx
, TCGv src
, TCGv addr
)
352 TCGv tmp
= tcg_temp_new();
353 gen_helper_g_to_memory(tmp
, src
);
354 tcg_gen_qemu_st_i64(tmp
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
357 static void gen_sts(DisasContext
*ctx
, TCGv src
, TCGv addr
)
359 TCGv_i32 tmp32
= tcg_temp_new_i32();
360 gen_helper_s_to_memory(tmp32
, src
);
361 tcg_gen_qemu_st_i32(tmp32
, addr
, ctx
->mem_idx
, MO_LEUL
| UNALIGN(ctx
));
364 static void gen_stt(DisasContext
*ctx
, TCGv src
, TCGv addr
)
366 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, MO_LEUQ
| UNALIGN(ctx
));
369 static void gen_store_fp(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
370 void (*func
)(DisasContext
*, TCGv
, TCGv
))
372 TCGv addr
= tcg_temp_new();
373 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
374 func(ctx
, load_fpr(ctx
, ra
), addr
);
377 static void gen_store_int(DisasContext
*ctx
, int ra
, int rb
, int32_t disp16
,
378 MemOp op
, bool clear
)
382 addr
= tcg_temp_new();
383 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
385 tcg_gen_andi_i64(addr
, addr
, ~0x7);
390 src
= load_gpr(ctx
, ra
);
391 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mem_idx
, op
);
394 static DisasJumpType
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
395 int32_t disp16
, int mem_idx
,
398 TCGLabel
*lab_fail
, *lab_done
;
401 addr
= tcg_temp_new_i64();
402 tcg_gen_addi_i64(addr
, load_gpr(ctx
, rb
), disp16
);
403 free_context_temps(ctx
);
405 lab_fail
= gen_new_label();
406 lab_done
= gen_new_label();
407 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
409 val
= tcg_temp_new_i64();
410 tcg_gen_atomic_cmpxchg_i64(val
, cpu_lock_addr
, cpu_lock_value
,
411 load_gpr(ctx
, ra
), mem_idx
, op
);
412 free_context_temps(ctx
);
415 tcg_gen_setcond_i64(TCG_COND_EQ
, ctx
->ir
[ra
], val
, cpu_lock_value
);
417 tcg_gen_br(lab_done
);
419 gen_set_label(lab_fail
);
421 tcg_gen_movi_i64(ctx
->ir
[ra
], 0);
424 gen_set_label(lab_done
);
425 tcg_gen_movi_i64(cpu_lock_addr
, -1);
429 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
431 return translator_use_goto_tb(&ctx
->base
, dest
);
434 static DisasJumpType
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
436 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
439 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
442 /* Notice branch-to-next; used to initialize RA with the PC. */
445 } else if (use_goto_tb(ctx
, dest
)) {
447 tcg_gen_movi_i64(cpu_pc
, dest
);
448 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
449 return DISAS_NORETURN
;
451 tcg_gen_movi_i64(cpu_pc
, dest
);
452 return DISAS_PC_UPDATED
;
456 static DisasJumpType
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
457 TCGv cmp
, int32_t disp
)
459 uint64_t dest
= ctx
->base
.pc_next
+ (disp
<< 2);
460 TCGLabel
*lab_true
= gen_new_label();
462 if (use_goto_tb(ctx
, dest
)) {
463 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
466 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
467 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
469 gen_set_label(lab_true
);
471 tcg_gen_movi_i64(cpu_pc
, dest
);
472 tcg_gen_exit_tb(ctx
->base
.tb
, 1);
474 return DISAS_NORETURN
;
476 TCGv_i64 z
= load_zero(ctx
);
477 TCGv_i64 d
= tcg_constant_i64(dest
);
478 TCGv_i64 p
= tcg_constant_i64(ctx
->base
.pc_next
);
480 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
481 return DISAS_PC_UPDATED
;
485 static DisasJumpType
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
486 int32_t disp
, int mask
)
489 TCGv tmp
= tcg_temp_new();
492 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, ra
), 1);
493 ret
= gen_bcond_internal(ctx
, cond
, tmp
, disp
);
496 return gen_bcond_internal(ctx
, cond
, load_gpr(ctx
, ra
), disp
);
499 /* Fold -0.0 for comparison with COND. */
501 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
503 uint64_t mzero
= 1ull << 63;
508 /* For <= or >, the -0.0 value directly compares the way we want. */
509 tcg_gen_mov_i64(dest
, src
);
514 /* For == or !=, we can simply mask off the sign bit and compare. */
515 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
520 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
521 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
522 tcg_gen_neg_i64(dest
, dest
);
523 tcg_gen_and_i64(dest
, dest
, src
);
531 static DisasJumpType
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
534 TCGv cmp_tmp
= tcg_temp_new();
537 gen_fold_mzero(cond
, cmp_tmp
, load_fpr(ctx
, ra
));
538 ret
= gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
542 static void gen_fcmov(DisasContext
*ctx
, TCGCond cond
, int ra
, int rb
, int rc
)
547 vb
= load_fpr(ctx
, rb
);
549 gen_fold_mzero(cond
, va
, load_fpr(ctx
, ra
));
551 tcg_gen_movcond_i64(cond
, dest_fpr(ctx
, rc
), va
, z
, vb
, load_fpr(ctx
, rc
));
554 #define QUAL_RM_N 0x080 /* Round mode nearest even */
555 #define QUAL_RM_C 0x000 /* Round mode chopped */
556 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
557 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
558 #define QUAL_RM_MASK 0x0c0
560 #define QUAL_U 0x100 /* Underflow enable (fp output) */
561 #define QUAL_V 0x100 /* Overflow enable (int output) */
562 #define QUAL_S 0x400 /* Software completion enable */
563 #define QUAL_I 0x200 /* Inexact detection enable */
565 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
569 fn11
&= QUAL_RM_MASK
;
570 if (fn11
== ctx
->tb_rm
) {
575 tmp
= tcg_temp_new_i32();
578 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
581 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
584 tcg_gen_movi_i32(tmp
, float_round_down
);
587 tcg_gen_ld8u_i32(tmp
, cpu_env
,
588 offsetof(CPUAlphaState
, fpcr_dyn_round
));
592 #if defined(CONFIG_SOFTFLOAT_INLINE)
593 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
594 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
595 sets the one field. */
596 tcg_gen_st8_i32(tmp
, cpu_env
,
597 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
599 gen_helper_setroundmode(tmp
);
603 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
608 if (fn11
== ctx
->tb_ftz
) {
613 tmp
= tcg_temp_new_i32();
615 /* Underflow is enabled, use the FPCR setting. */
616 tcg_gen_ld8u_i32(tmp
, cpu_env
,
617 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
619 /* Underflow is disabled, force flush-to-zero. */
620 tcg_gen_movi_i32(tmp
, 1);
623 #if defined(CONFIG_SOFTFLOAT_INLINE)
624 tcg_gen_st8_i32(tmp
, cpu_env
,
625 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
627 gen_helper_setflushzero(tmp
);
631 static TCGv
gen_ieee_input(DisasContext
*ctx
, int reg
, int fn11
, int is_cmp
)
635 if (unlikely(reg
== 31)) {
636 val
= load_zero(ctx
);
639 if ((fn11
& QUAL_S
) == 0) {
641 gen_helper_ieee_input_cmp(cpu_env
, val
);
643 gen_helper_ieee_input(cpu_env
, val
);
646 #ifndef CONFIG_USER_ONLY
647 /* In system mode, raise exceptions for denormals like real
648 hardware. In user mode, proceed as if the OS completion
649 handler is handling the denormal as per spec. */
650 gen_helper_ieee_input_s(cpu_env
, val
);
657 static void gen_fp_exc_raise(int rc
, int fn11
)
659 /* ??? We ought to be able to do something with imprecise exceptions.
660 E.g. notice we're still in the trap shadow of something within the
661 TB and do not generate the code to signal the exception; end the TB
662 when an exception is forced to arrive, either by consumption of a
663 register value or TRAPB or EXCB. */
667 if (!(fn11
& QUAL_U
)) {
668 /* Note that QUAL_U == QUAL_V, so ignore either. */
669 ignore
|= FPCR_UNF
| FPCR_IOV
;
671 if (!(fn11
& QUAL_I
)) {
674 ign
= tcg_constant_i32(ignore
);
676 /* ??? Pass in the regno of the destination so that the helper can
677 set EXC_MASK, which contains a bitmask of destination registers
678 that have caused arithmetic traps. A simple userspace emulation
679 does not require this. We do need it for a guest kernel's entArith,
680 or if we were to do something clever with imprecise exceptions. */
681 reg
= tcg_constant_i32(rc
+ 32);
683 gen_helper_fp_exc_raise_s(cpu_env
, ign
, reg
);
685 gen_helper_fp_exc_raise(cpu_env
, ign
, reg
);
689 static void gen_cvtlq(TCGv vc
, TCGv vb
)
691 TCGv tmp
= tcg_temp_new();
693 /* The arithmetic right shift here, plus the sign-extended mask below
694 yields a sign-extended result without an explicit ext32s_i64. */
695 tcg_gen_shri_i64(tmp
, vb
, 29);
696 tcg_gen_sari_i64(vc
, vb
, 32);
697 tcg_gen_deposit_i64(vc
, vc
, tmp
, 0, 30);
700 static void gen_ieee_arith2(DisasContext
*ctx
,
701 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
702 int rb
, int rc
, int fn11
)
706 gen_qual_roundmode(ctx
, fn11
);
707 gen_qual_flushzero(ctx
, fn11
);
709 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
710 helper(dest_fpr(ctx
, rc
), cpu_env
, vb
);
712 gen_fp_exc_raise(rc
, fn11
);
715 #define IEEE_ARITH2(name) \
716 static inline void glue(gen_, name)(DisasContext *ctx, \
717 int rb, int rc, int fn11) \
719 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
726 static void gen_cvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
730 /* No need to set flushzero, since we have an integer output. */
731 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
732 vc
= dest_fpr(ctx
, rc
);
734 /* Almost all integer conversions use cropped rounding;
735 special case that. */
736 if ((fn11
& QUAL_RM_MASK
) == QUAL_RM_C
) {
737 gen_helper_cvttq_c(vc
, cpu_env
, vb
);
739 gen_qual_roundmode(ctx
, fn11
);
740 gen_helper_cvttq(vc
, cpu_env
, vb
);
742 gen_fp_exc_raise(rc
, fn11
);
745 static void gen_ieee_intcvt(DisasContext
*ctx
,
746 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
747 int rb
, int rc
, int fn11
)
751 gen_qual_roundmode(ctx
, fn11
);
752 vb
= load_fpr(ctx
, rb
);
753 vc
= dest_fpr(ctx
, rc
);
755 /* The only exception that can be raised by integer conversion
756 is inexact. Thus we only need to worry about exceptions when
757 inexact handling is requested. */
759 helper(vc
, cpu_env
, vb
);
760 gen_fp_exc_raise(rc
, fn11
);
762 helper(vc
, cpu_env
, vb
);
766 #define IEEE_INTCVT(name) \
767 static inline void glue(gen_, name)(DisasContext *ctx, \
768 int rb, int rc, int fn11) \
770 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
775 static void gen_cpy_mask(TCGv vc
, TCGv va
, TCGv vb
, bool inv_a
, uint64_t mask
)
777 TCGv vmask
= tcg_constant_i64(mask
);
778 TCGv tmp
= tcg_temp_new_i64();
781 tcg_gen_andc_i64(tmp
, vmask
, va
);
783 tcg_gen_and_i64(tmp
, va
, vmask
);
786 tcg_gen_andc_i64(vc
, vb
, vmask
);
787 tcg_gen_or_i64(vc
, vc
, tmp
);
790 static void gen_ieee_arith3(DisasContext
*ctx
,
791 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
792 int ra
, int rb
, int rc
, int fn11
)
796 gen_qual_roundmode(ctx
, fn11
);
797 gen_qual_flushzero(ctx
, fn11
);
799 va
= gen_ieee_input(ctx
, ra
, fn11
, 0);
800 vb
= gen_ieee_input(ctx
, rb
, fn11
, 0);
801 vc
= dest_fpr(ctx
, rc
);
802 helper(vc
, cpu_env
, va
, vb
);
804 gen_fp_exc_raise(rc
, fn11
);
807 #define IEEE_ARITH3(name) \
808 static inline void glue(gen_, name)(DisasContext *ctx, \
809 int ra, int rb, int rc, int fn11) \
811 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
822 static void gen_ieee_compare(DisasContext
*ctx
,
823 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
824 int ra
, int rb
, int rc
, int fn11
)
828 va
= gen_ieee_input(ctx
, ra
, fn11
, 1);
829 vb
= gen_ieee_input(ctx
, rb
, fn11
, 1);
830 vc
= dest_fpr(ctx
, rc
);
831 helper(vc
, cpu_env
, va
, vb
);
833 gen_fp_exc_raise(rc
, fn11
);
836 #define IEEE_CMP3(name) \
837 static inline void glue(gen_, name)(DisasContext *ctx, \
838 int ra, int rb, int rc, int fn11) \
840 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
847 static inline uint64_t zapnot_mask(uint8_t lit
)
852 for (i
= 0; i
< 8; ++i
) {
853 if ((lit
>> i
) & 1) {
854 mask
|= 0xffull
<< (i
* 8);
860 /* Implement zapnot with an immediate operand, which expands to some
861 form of immediate AND. This is a basic building block in the
862 definition of many of the other byte manipulation instructions. */
863 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
867 tcg_gen_movi_i64(dest
, 0);
870 tcg_gen_ext8u_i64(dest
, src
);
873 tcg_gen_ext16u_i64(dest
, src
);
876 tcg_gen_ext32u_i64(dest
, src
);
879 tcg_gen_mov_i64(dest
, src
);
882 tcg_gen_andi_i64(dest
, src
, zapnot_mask(lit
));
887 /* EXTWH, EXTLH, EXTQH */
888 static void gen_ext_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
889 uint8_t lit
, uint8_t byte_mask
)
892 int pos
= (64 - lit
* 8) & 0x3f;
893 int len
= cto32(byte_mask
) * 8;
895 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
- pos
);
897 tcg_gen_movi_i64(vc
, 0);
900 TCGv tmp
= tcg_temp_new();
901 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rb
), 3);
902 tcg_gen_neg_i64(tmp
, tmp
);
903 tcg_gen_andi_i64(tmp
, tmp
, 0x3f);
904 tcg_gen_shl_i64(vc
, va
, tmp
);
906 gen_zapnoti(vc
, vc
, byte_mask
);
909 /* EXTBL, EXTWL, EXTLL, EXTQL */
910 static void gen_ext_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
911 uint8_t lit
, uint8_t byte_mask
)
914 int pos
= (lit
& 7) * 8;
915 int len
= cto32(byte_mask
) * 8;
916 if (pos
+ len
>= 64) {
919 tcg_gen_extract_i64(vc
, va
, pos
, len
);
921 TCGv tmp
= tcg_temp_new();
922 tcg_gen_andi_i64(tmp
, load_gpr(ctx
, rb
), 7);
923 tcg_gen_shli_i64(tmp
, tmp
, 3);
924 tcg_gen_shr_i64(vc
, va
, tmp
);
925 gen_zapnoti(vc
, vc
, byte_mask
);
929 /* INSWH, INSLH, INSQH */
930 static void gen_ins_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
931 uint8_t lit
, uint8_t byte_mask
)
934 int pos
= 64 - (lit
& 7) * 8;
935 int len
= cto32(byte_mask
) * 8;
937 tcg_gen_extract_i64(vc
, va
, pos
, len
- pos
);
939 tcg_gen_movi_i64(vc
, 0);
942 TCGv tmp
= tcg_temp_new();
943 TCGv shift
= tcg_temp_new();
945 /* The instruction description has us left-shift the byte mask
946 and extract bits <15:8> and apply that zap at the end. This
947 is equivalent to simply performing the zap first and shifting
949 gen_zapnoti(tmp
, va
, byte_mask
);
951 /* If (B & 7) == 0, we need to shift by 64 and leave a zero. Do this
952 portably by splitting the shift into two parts: shift_count-1 and 1.
953 Arrange for the -1 by using ones-complement instead of
954 twos-complement in the negation: ~(B * 8) & 63. */
956 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
957 tcg_gen_not_i64(shift
, shift
);
958 tcg_gen_andi_i64(shift
, shift
, 0x3f);
960 tcg_gen_shr_i64(vc
, tmp
, shift
);
961 tcg_gen_shri_i64(vc
, vc
, 1);
965 /* INSBL, INSWL, INSLL, INSQL */
966 static void gen_ins_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
967 uint8_t lit
, uint8_t byte_mask
)
970 int pos
= (lit
& 7) * 8;
971 int len
= cto32(byte_mask
) * 8;
972 if (pos
+ len
> 64) {
975 tcg_gen_deposit_z_i64(vc
, va
, pos
, len
);
977 TCGv tmp
= tcg_temp_new();
978 TCGv shift
= tcg_temp_new();
980 /* The instruction description has us left-shift the byte mask
981 and extract bits <15:8> and apply that zap at the end. This
982 is equivalent to simply performing the zap first and shifting
984 gen_zapnoti(tmp
, va
, byte_mask
);
986 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
987 tcg_gen_shli_i64(shift
, shift
, 3);
988 tcg_gen_shl_i64(vc
, tmp
, shift
);
992 /* MSKWH, MSKLH, MSKQH */
993 static void gen_msk_h(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
994 uint8_t lit
, uint8_t byte_mask
)
997 gen_zapnoti(vc
, va
, ~((byte_mask
<< (lit
& 7)) >> 8));
999 TCGv shift
= tcg_temp_new();
1000 TCGv mask
= tcg_temp_new();
1002 /* The instruction description is as above, where the byte_mask
1003 is shifted left, and then we extract bits <15:8>. This can be
1004 emulated with a right-shift on the expanded byte mask. This
1005 requires extra care because for an input <2:0> == 0 we need a
1006 shift of 64 bits in order to generate a zero. This is done by
1007 splitting the shift into two parts, the variable shift - 1
1008 followed by a constant 1 shift. The code we expand below is
1009 equivalent to ~(B * 8) & 63. */
1011 tcg_gen_shli_i64(shift
, load_gpr(ctx
, rb
), 3);
1012 tcg_gen_not_i64(shift
, shift
);
1013 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1014 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1015 tcg_gen_shr_i64(mask
, mask
, shift
);
1016 tcg_gen_shri_i64(mask
, mask
, 1);
1018 tcg_gen_andc_i64(vc
, va
, mask
);
1022 /* MSKBL, MSKWL, MSKLL, MSKQL */
1023 static void gen_msk_l(DisasContext
*ctx
, TCGv vc
, TCGv va
, int rb
, bool islit
,
1024 uint8_t lit
, uint8_t byte_mask
)
1027 gen_zapnoti(vc
, va
, ~(byte_mask
<< (lit
& 7)));
1029 TCGv shift
= tcg_temp_new();
1030 TCGv mask
= tcg_temp_new();
1032 tcg_gen_andi_i64(shift
, load_gpr(ctx
, rb
), 7);
1033 tcg_gen_shli_i64(shift
, shift
, 3);
1034 tcg_gen_movi_i64(mask
, zapnot_mask(byte_mask
));
1035 tcg_gen_shl_i64(mask
, mask
, shift
);
1037 tcg_gen_andc_i64(vc
, va
, mask
);
1041 static void gen_rx(DisasContext
*ctx
, int ra
, int set
)
1044 ld_flag_byte(ctx
->ir
[ra
], ENV_FLAG_RX_SHIFT
);
1047 st_flag_byte(tcg_constant_i64(set
), ENV_FLAG_RX_SHIFT
);
1050 static DisasJumpType
gen_call_pal(DisasContext
*ctx
, int palcode
)
1052 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1053 to internal cpu registers. */
1055 /* Unprivileged PAL call */
1056 if (palcode
>= 0x80 && palcode
< 0xC0) {
1060 /* No-op inside QEMU. */
1064 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1065 offsetof(CPUAlphaState
, unique
));
1069 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1070 offsetof(CPUAlphaState
, unique
));
1079 #ifndef CONFIG_USER_ONLY
1080 /* Privileged PAL code */
1081 if (palcode
< 0x40 && (ctx
->tbflags
& ENV_FLAG_PS_USER
) == 0) {
1085 /* No-op inside QEMU. */
1089 /* No-op inside QEMU. */
1093 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1094 offsetof(CPUAlphaState
, vptptr
));
1098 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1099 offsetof(CPUAlphaState
, sysval
));
1103 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1104 offsetof(CPUAlphaState
, sysval
));
1109 /* Note that we already know we're in kernel mode, so we know
1110 that PS only contains the 3 IPL bits. */
1111 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1113 /* But make sure and store only the 3 IPL bits from the user. */
1115 TCGv tmp
= tcg_temp_new();
1116 tcg_gen_andi_i64(tmp
, ctx
->ir
[IR_A0
], PS_INT_MASK
);
1117 st_flag_byte(tmp
, ENV_FLAG_PS_SHIFT
);
1120 /* Allow interrupts to be recognized right away. */
1121 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
1122 return DISAS_PC_UPDATED_NOCHAIN
;
1126 ld_flag_byte(ctx
->ir
[IR_V0
], ENV_FLAG_PS_SHIFT
);
1131 tcg_gen_st_i64(ctx
->ir
[IR_A0
], cpu_env
,
1132 offsetof(CPUAlphaState
, usp
));
1136 tcg_gen_ld_i64(ctx
->ir
[IR_V0
], cpu_env
,
1137 offsetof(CPUAlphaState
, usp
));
1141 tcg_gen_ld32s_i64(ctx
->ir
[IR_V0
], cpu_env
,
1142 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1147 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1148 -offsetof(AlphaCPU
, env
) +
1149 offsetof(CPUState
, halted
));
1150 tcg_gen_movi_i64(ctx
->ir
[IR_V0
], 0);
1151 return gen_excp(ctx
, EXCP_HALTED
, 0);
1160 return gen_invalid(ctx
);
1163 #ifdef CONFIG_USER_ONLY
1164 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1167 TCGv tmp
= tcg_temp_new();
1168 uint64_t exc_addr
= ctx
->base
.pc_next
;
1169 uint64_t entry
= ctx
->palbr
;
1171 if (ctx
->tbflags
& ENV_FLAG_PAL_MODE
) {
1174 tcg_gen_movi_i64(tmp
, 1);
1175 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
1178 tcg_gen_movi_i64(tmp
, exc_addr
);
1179 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
1181 entry
+= (palcode
& 0x80
1182 ? 0x2000 + (palcode
- 0x80) * 64
1183 : 0x1000 + palcode
* 64);
1185 tcg_gen_movi_i64(cpu_pc
, entry
);
1186 return DISAS_PC_UPDATED
;
1191 #ifndef CONFIG_USER_ONLY
1193 #define PR_LONG 0x200000
1195 static int cpu_pr_data(int pr
)
1198 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1199 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1200 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1201 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1202 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1203 case 7: return offsetof(CPUAlphaState
, palbr
);
1204 case 8: return offsetof(CPUAlphaState
, ptbr
);
1205 case 9: return offsetof(CPUAlphaState
, vptptr
);
1206 case 10: return offsetof(CPUAlphaState
, unique
);
1207 case 11: return offsetof(CPUAlphaState
, sysval
);
1208 case 12: return offsetof(CPUAlphaState
, usp
);
1211 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1214 return offsetof(CPUAlphaState
, alarm_expire
);
1219 static DisasJumpType
gen_mfpr(DisasContext
*ctx
, TCGv va
, int regno
)
1221 void (*helper
)(TCGv
);
1226 /* Accessing the "non-shadow" general registers. */
1227 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1228 tcg_gen_mov_i64(va
, cpu_std_ir
[regno
]);
1231 case 250: /* WALLTIME */
1232 helper
= gen_helper_get_walltime
;
1234 case 249: /* VMTIME */
1235 helper
= gen_helper_get_vmtime
;
1237 if (translator_io_start(&ctx
->base
)) {
1239 return DISAS_PC_STALE
;
1246 ld_flag_byte(va
, ENV_FLAG_PS_SHIFT
);
1249 ld_flag_byte(va
, ENV_FLAG_FEN_SHIFT
);
1253 /* The basic registers are data only, and unknown registers
1254 are read-zero, write-ignore. */
1255 data
= cpu_pr_data(regno
);
1257 tcg_gen_movi_i64(va
, 0);
1258 } else if (data
& PR_LONG
) {
1259 tcg_gen_ld32s_i64(va
, cpu_env
, data
& ~PR_LONG
);
1261 tcg_gen_ld_i64(va
, cpu_env
, data
);
1269 static DisasJumpType
gen_mtpr(DisasContext
*ctx
, TCGv vb
, int regno
)
1272 DisasJumpType ret
= DISAS_NEXT
;
1277 gen_helper_tbia(cpu_env
);
1282 gen_helper_tbis(cpu_env
, vb
);
1287 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
1288 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, halted
));
1289 return gen_excp(ctx
, EXCP_HALTED
, 0);
1293 gen_helper_halt(vb
);
1294 return DISAS_PC_STALE
;
1298 if (translator_io_start(&ctx
->base
)) {
1299 ret
= DISAS_PC_STALE
;
1301 gen_helper_set_alarm(cpu_env
, vb
);
1306 tcg_gen_st_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1307 /* Changing the PAL base register implies un-chaining all of the TBs
1308 that ended with a CALL_PAL. Since the base register usually only
1309 changes during boot, flushing everything works well. */
1310 gen_helper_tb_flush(cpu_env
);
1311 return DISAS_PC_STALE
;
1314 /* Accessing the "non-shadow" general registers. */
1315 regno
= regno
== 39 ? 25 : regno
- 32 + 8;
1316 tcg_gen_mov_i64(cpu_std_ir
[regno
], vb
);
1320 st_flag_byte(vb
, ENV_FLAG_PS_SHIFT
);
1323 st_flag_byte(vb
, ENV_FLAG_FEN_SHIFT
);
1327 /* The basic registers are data only, and unknown registers
1328 are read-zero, write-ignore. */
1329 data
= cpu_pr_data(regno
);
1331 if (data
& PR_LONG
) {
1332 tcg_gen_st32_i64(vb
, cpu_env
, data
& ~PR_LONG
);
1334 tcg_gen_st_i64(vb
, cpu_env
, data
);
1342 #endif /* !USER_ONLY*/
1344 #define REQUIRE_NO_LIT \
1351 #define REQUIRE_AMASK(FLAG) \
1353 if ((ctx->amask & AMASK_##FLAG) == 0) { \
1358 #define REQUIRE_TB_FLAG(FLAG) \
1360 if ((ctx->tbflags & (FLAG)) == 0) { \
1365 #define REQUIRE_REG_31(WHICH) \
1367 if (WHICH != 31) { \
1372 #define REQUIRE_FEN \
1374 if (!(ctx->tbflags & ENV_FLAG_FEN)) { \
1379 static DisasJumpType
translate_one(DisasContext
*ctx
, uint32_t insn
)
1381 int32_t disp21
, disp16
, disp12
__attribute__((unused
));
1383 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, lit
;
1384 bool islit
, real_islit
;
1385 TCGv va
, vb
, vc
, tmp
, tmp2
;
1389 /* Decode all instruction fields */
1390 opc
= extract32(insn
, 26, 6);
1391 ra
= extract32(insn
, 21, 5);
1392 rb
= extract32(insn
, 16, 5);
1393 rc
= extract32(insn
, 0, 5);
1394 real_islit
= islit
= extract32(insn
, 12, 1);
1395 lit
= extract32(insn
, 13, 8);
1397 disp21
= sextract32(insn
, 0, 21);
1398 disp16
= sextract32(insn
, 0, 16);
1399 disp12
= sextract32(insn
, 0, 12);
1401 fn11
= extract32(insn
, 5, 11);
1402 fpfn
= extract32(insn
, 5, 6);
1403 fn7
= extract32(insn
, 5, 7);
1405 if (rb
== 31 && !islit
) {
1414 ret
= gen_call_pal(ctx
, insn
& 0x03ffffff);
1440 disp16
= (uint32_t)disp16
<< 16;
1444 va
= dest_gpr(ctx
, ra
);
1445 /* It's worth special-casing immediate loads. */
1447 tcg_gen_movi_i64(va
, disp16
);
1449 tcg_gen_addi_i64(va
, load_gpr(ctx
, rb
), disp16
);
1456 gen_load_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0, 0);
1460 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1, 0);
1465 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0, 0);
1470 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUW
, 0);
1475 gen_store_int(ctx
, ra
, rb
, disp16
, MO_UB
, 0);
1479 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 1);
1483 vc
= dest_gpr(ctx
, rc
);
1484 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1488 /* Special case ADDL as SEXTL. */
1489 tcg_gen_ext32s_i64(vc
, vb
);
1493 /* Special case SUBQ as NEGQ. */
1494 tcg_gen_neg_i64(vc
, vb
);
1499 va
= load_gpr(ctx
, ra
);
1503 tcg_gen_add_i64(vc
, va
, vb
);
1504 tcg_gen_ext32s_i64(vc
, vc
);
1508 tmp
= tcg_temp_new();
1509 tcg_gen_shli_i64(tmp
, va
, 2);
1510 tcg_gen_add_i64(tmp
, tmp
, vb
);
1511 tcg_gen_ext32s_i64(vc
, tmp
);
1515 tcg_gen_sub_i64(vc
, va
, vb
);
1516 tcg_gen_ext32s_i64(vc
, vc
);
1520 tmp
= tcg_temp_new();
1521 tcg_gen_shli_i64(tmp
, va
, 2);
1522 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1523 tcg_gen_ext32s_i64(vc
, tmp
);
1528 /* Special case 0 >= X as X == 0. */
1529 gen_helper_cmpbe0(vc
, vb
);
1531 gen_helper_cmpbge(vc
, va
, vb
);
1536 tmp
= tcg_temp_new();
1537 tcg_gen_shli_i64(tmp
, va
, 3);
1538 tcg_gen_add_i64(tmp
, tmp
, vb
);
1539 tcg_gen_ext32s_i64(vc
, tmp
);
1543 tmp
= tcg_temp_new();
1544 tcg_gen_shli_i64(tmp
, va
, 3);
1545 tcg_gen_sub_i64(tmp
, tmp
, vb
);
1546 tcg_gen_ext32s_i64(vc
, tmp
);
1550 tcg_gen_setcond_i64(TCG_COND_LTU
, vc
, va
, vb
);
1554 tcg_gen_add_i64(vc
, va
, vb
);
1558 tmp
= tcg_temp_new();
1559 tcg_gen_shli_i64(tmp
, va
, 2);
1560 tcg_gen_add_i64(vc
, tmp
, vb
);
1564 tcg_gen_sub_i64(vc
, va
, vb
);
1568 tmp
= tcg_temp_new();
1569 tcg_gen_shli_i64(tmp
, va
, 2);
1570 tcg_gen_sub_i64(vc
, tmp
, vb
);
1574 tcg_gen_setcond_i64(TCG_COND_EQ
, vc
, va
, vb
);
1578 tmp
= tcg_temp_new();
1579 tcg_gen_shli_i64(tmp
, va
, 3);
1580 tcg_gen_add_i64(vc
, tmp
, vb
);
1584 tmp
= tcg_temp_new();
1585 tcg_gen_shli_i64(tmp
, va
, 3);
1586 tcg_gen_sub_i64(vc
, tmp
, vb
);
1590 tcg_gen_setcond_i64(TCG_COND_LEU
, vc
, va
, vb
);
1594 tmp
= tcg_temp_new();
1595 tcg_gen_ext32s_i64(tmp
, va
);
1596 tcg_gen_ext32s_i64(vc
, vb
);
1597 tcg_gen_add_i64(tmp
, tmp
, vc
);
1598 tcg_gen_ext32s_i64(vc
, tmp
);
1599 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1603 tmp
= tcg_temp_new();
1604 tcg_gen_ext32s_i64(tmp
, va
);
1605 tcg_gen_ext32s_i64(vc
, vb
);
1606 tcg_gen_sub_i64(tmp
, tmp
, vc
);
1607 tcg_gen_ext32s_i64(vc
, tmp
);
1608 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1612 tcg_gen_setcond_i64(TCG_COND_LT
, vc
, va
, vb
);
1616 tmp
= tcg_temp_new();
1617 tmp2
= tcg_temp_new();
1618 tcg_gen_eqv_i64(tmp
, va
, vb
);
1619 tcg_gen_mov_i64(tmp2
, va
);
1620 tcg_gen_add_i64(vc
, va
, vb
);
1621 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1622 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1623 tcg_gen_shri_i64(tmp
, tmp
, 63);
1624 tcg_gen_movi_i64(tmp2
, 0);
1625 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1629 tmp
= tcg_temp_new();
1630 tmp2
= tcg_temp_new();
1631 tcg_gen_xor_i64(tmp
, va
, vb
);
1632 tcg_gen_mov_i64(tmp2
, va
);
1633 tcg_gen_sub_i64(vc
, va
, vb
);
1634 tcg_gen_xor_i64(tmp2
, tmp2
, vc
);
1635 tcg_gen_and_i64(tmp
, tmp
, tmp2
);
1636 tcg_gen_shri_i64(tmp
, tmp
, 63);
1637 tcg_gen_movi_i64(tmp2
, 0);
1638 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1642 tcg_gen_setcond_i64(TCG_COND_LE
, vc
, va
, vb
);
1652 /* Special case BIS as NOP. */
1656 /* Special case BIS as MOV. */
1657 vc
= dest_gpr(ctx
, rc
);
1659 tcg_gen_movi_i64(vc
, lit
);
1661 tcg_gen_mov_i64(vc
, load_gpr(ctx
, rb
));
1667 vc
= dest_gpr(ctx
, rc
);
1668 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1670 if (fn7
== 0x28 && ra
== 31) {
1671 /* Special case ORNOT as NOT. */
1672 tcg_gen_not_i64(vc
, vb
);
1676 va
= load_gpr(ctx
, ra
);
1680 tcg_gen_and_i64(vc
, va
, vb
);
1684 tcg_gen_andc_i64(vc
, va
, vb
);
1688 tmp
= tcg_temp_new();
1689 tcg_gen_andi_i64(tmp
, va
, 1);
1690 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, tmp
, load_zero(ctx
),
1691 vb
, load_gpr(ctx
, rc
));
1695 tmp
= tcg_temp_new();
1696 tcg_gen_andi_i64(tmp
, va
, 1);
1697 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, tmp
, load_zero(ctx
),
1698 vb
, load_gpr(ctx
, rc
));
1702 tcg_gen_or_i64(vc
, va
, vb
);
1706 tcg_gen_movcond_i64(TCG_COND_EQ
, vc
, va
, load_zero(ctx
),
1707 vb
, load_gpr(ctx
, rc
));
1711 tcg_gen_movcond_i64(TCG_COND_NE
, vc
, va
, load_zero(ctx
),
1712 vb
, load_gpr(ctx
, rc
));
1716 tcg_gen_orc_i64(vc
, va
, vb
);
1720 tcg_gen_xor_i64(vc
, va
, vb
);
1724 tcg_gen_movcond_i64(TCG_COND_LT
, vc
, va
, load_zero(ctx
),
1725 vb
, load_gpr(ctx
, rc
));
1729 tcg_gen_movcond_i64(TCG_COND_GE
, vc
, va
, load_zero(ctx
),
1730 vb
, load_gpr(ctx
, rc
));
1734 tcg_gen_eqv_i64(vc
, va
, vb
);
1739 tcg_gen_andi_i64(vc
, vb
, ~ctx
->amask
);
1743 tcg_gen_movcond_i64(TCG_COND_LE
, vc
, va
, load_zero(ctx
),
1744 vb
, load_gpr(ctx
, rc
));
1748 tcg_gen_movcond_i64(TCG_COND_GT
, vc
, va
, load_zero(ctx
),
1749 vb
, load_gpr(ctx
, rc
));
1754 tcg_gen_movi_i64(vc
, ctx
->implver
);
1762 vc
= dest_gpr(ctx
, rc
);
1763 va
= load_gpr(ctx
, ra
);
1767 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1771 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1775 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x01);
1779 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1783 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1787 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1791 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1795 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1799 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1804 gen_zapnoti(vc
, va
, ~lit
);
1806 gen_helper_zap(vc
, va
, load_gpr(ctx
, rb
));
1812 gen_zapnoti(vc
, va
, lit
);
1814 gen_helper_zapnot(vc
, va
, load_gpr(ctx
, rb
));
1819 gen_msk_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1824 tcg_gen_shri_i64(vc
, va
, lit
& 0x3f);
1826 tmp
= tcg_temp_new();
1827 vb
= load_gpr(ctx
, rb
);
1828 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1829 tcg_gen_shr_i64(vc
, va
, tmp
);
1834 gen_ext_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1839 tcg_gen_shli_i64(vc
, va
, lit
& 0x3f);
1841 tmp
= tcg_temp_new();
1842 vb
= load_gpr(ctx
, rb
);
1843 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1844 tcg_gen_shl_i64(vc
, va
, tmp
);
1849 gen_ins_l(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1854 tcg_gen_sari_i64(vc
, va
, lit
& 0x3f);
1856 tmp
= tcg_temp_new();
1857 vb
= load_gpr(ctx
, rb
);
1858 tcg_gen_andi_i64(tmp
, vb
, 0x3f);
1859 tcg_gen_sar_i64(vc
, va
, tmp
);
1864 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1868 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1872 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x03);
1876 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1880 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1884 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0x0f);
1888 gen_msk_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1892 gen_ins_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1896 gen_ext_h(ctx
, vc
, va
, rb
, islit
, lit
, 0xff);
1904 vc
= dest_gpr(ctx
, rc
);
1905 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
1906 va
= load_gpr(ctx
, ra
);
1910 tcg_gen_mul_i64(vc
, va
, vb
);
1911 tcg_gen_ext32s_i64(vc
, vc
);
1915 tcg_gen_mul_i64(vc
, va
, vb
);
1919 tmp
= tcg_temp_new();
1920 tcg_gen_mulu2_i64(tmp
, vc
, va
, vb
);
1924 tmp
= tcg_temp_new();
1925 tcg_gen_ext32s_i64(tmp
, va
);
1926 tcg_gen_ext32s_i64(vc
, vb
);
1927 tcg_gen_mul_i64(tmp
, tmp
, vc
);
1928 tcg_gen_ext32s_i64(vc
, tmp
);
1929 gen_helper_check_overflow(cpu_env
, vc
, tmp
);
1933 tmp
= tcg_temp_new();
1934 tmp2
= tcg_temp_new();
1935 tcg_gen_muls2_i64(vc
, tmp
, va
, vb
);
1936 tcg_gen_sari_i64(tmp2
, vc
, 63);
1937 gen_helper_check_overflow(cpu_env
, tmp
, tmp2
);
1946 vc
= dest_fpr(ctx
, rc
);
1947 switch (fpfn
) { /* fn11 & 0x3F */
1952 t32
= tcg_temp_new_i32();
1953 va
= load_gpr(ctx
, ra
);
1954 tcg_gen_extrl_i64_i32(t32
, va
);
1955 gen_helper_memory_to_s(vc
, t32
);
1961 vb
= load_fpr(ctx
, rb
);
1962 gen_helper_sqrtf(vc
, cpu_env
, vb
);
1968 gen_sqrts(ctx
, rb
, rc
, fn11
);
1974 t32
= tcg_temp_new_i32();
1975 va
= load_gpr(ctx
, ra
);
1976 tcg_gen_extrl_i64_i32(t32
, va
);
1977 gen_helper_memory_to_f(vc
, t32
);
1983 va
= load_gpr(ctx
, ra
);
1984 tcg_gen_mov_i64(vc
, va
);
1990 vb
= load_fpr(ctx
, rb
);
1991 gen_helper_sqrtg(vc
, cpu_env
, vb
);
1997 gen_sqrtt(ctx
, rb
, rc
, fn11
);
2005 /* VAX floating point */
2006 /* XXX: rounding mode and trap are ignored (!) */
2007 vc
= dest_fpr(ctx
, rc
);
2008 vb
= load_fpr(ctx
, rb
);
2009 va
= load_fpr(ctx
, ra
);
2010 switch (fpfn
) { /* fn11 & 0x3F */
2014 gen_helper_addf(vc
, cpu_env
, va
, vb
);
2019 gen_helper_subf(vc
, cpu_env
, va
, vb
);
2024 gen_helper_mulf(vc
, cpu_env
, va
, vb
);
2029 gen_helper_divf(vc
, cpu_env
, va
, vb
);
2038 gen_helper_addg(vc
, cpu_env
, va
, vb
);
2043 gen_helper_subg(vc
, cpu_env
, va
, vb
);
2048 gen_helper_mulg(vc
, cpu_env
, va
, vb
);
2053 gen_helper_divg(vc
, cpu_env
, va
, vb
);
2058 gen_helper_cmpgeq(vc
, cpu_env
, va
, vb
);
2063 gen_helper_cmpglt(vc
, cpu_env
, va
, vb
);
2068 gen_helper_cmpgle(vc
, cpu_env
, va
, vb
);
2074 gen_helper_cvtgf(vc
, cpu_env
, vb
);
2084 gen_helper_cvtgq(vc
, cpu_env
, vb
);
2090 gen_helper_cvtqf(vc
, cpu_env
, vb
);
2096 gen_helper_cvtqg(vc
, cpu_env
, vb
);
2104 /* IEEE floating-point */
2105 switch (fpfn
) { /* fn11 & 0x3F */
2109 gen_adds(ctx
, ra
, rb
, rc
, fn11
);
2114 gen_subs(ctx
, ra
, rb
, rc
, fn11
);
2119 gen_muls(ctx
, ra
, rb
, rc
, fn11
);
2124 gen_divs(ctx
, ra
, rb
, rc
, fn11
);
2129 gen_addt(ctx
, ra
, rb
, rc
, fn11
);
2134 gen_subt(ctx
, ra
, rb
, rc
, fn11
);
2139 gen_mult(ctx
, ra
, rb
, rc
, fn11
);
2144 gen_divt(ctx
, ra
, rb
, rc
, fn11
);
2149 gen_cmptun(ctx
, ra
, rb
, rc
, fn11
);
2154 gen_cmpteq(ctx
, ra
, rb
, rc
, fn11
);
2159 gen_cmptlt(ctx
, ra
, rb
, rc
, fn11
);
2164 gen_cmptle(ctx
, ra
, rb
, rc
, fn11
);
2169 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2171 gen_cvtst(ctx
, rb
, rc
, fn11
);
2174 gen_cvtts(ctx
, rb
, rc
, fn11
);
2181 gen_cvttq(ctx
, rb
, rc
, fn11
);
2187 gen_cvtqs(ctx
, rb
, rc
, fn11
);
2193 gen_cvtqt(ctx
, rb
, rc
, fn11
);
2206 vc
= dest_fpr(ctx
, rc
);
2207 vb
= load_fpr(ctx
, rb
);
2214 /* Special case CPYS as FNOP. */
2216 vc
= dest_fpr(ctx
, rc
);
2217 va
= load_fpr(ctx
, ra
);
2219 /* Special case CPYS as FMOV. */
2220 tcg_gen_mov_i64(vc
, va
);
2222 vb
= load_fpr(ctx
, rb
);
2223 gen_cpy_mask(vc
, va
, vb
, 0, 0x8000000000000000ULL
);
2230 vc
= dest_fpr(ctx
, rc
);
2231 vb
= load_fpr(ctx
, rb
);
2232 va
= load_fpr(ctx
, ra
);
2233 gen_cpy_mask(vc
, va
, vb
, 1, 0x8000000000000000ULL
);
2238 vc
= dest_fpr(ctx
, rc
);
2239 vb
= load_fpr(ctx
, rb
);
2240 va
= load_fpr(ctx
, ra
);
2241 gen_cpy_mask(vc
, va
, vb
, 0, 0xFFF0000000000000ULL
);
2246 va
= load_fpr(ctx
, ra
);
2247 gen_helper_store_fpcr(cpu_env
, va
);
2248 if (ctx
->tb_rm
== QUAL_RM_D
) {
2249 /* Re-do the copy of the rounding mode to fp_status
2250 the next time we use dynamic rounding. */
2257 va
= dest_fpr(ctx
, ra
);
2258 gen_helper_load_fpcr(va
, cpu_env
);
2263 gen_fcmov(ctx
, TCG_COND_EQ
, ra
, rb
, rc
);
2268 gen_fcmov(ctx
, TCG_COND_NE
, ra
, rb
, rc
);
2273 gen_fcmov(ctx
, TCG_COND_LT
, ra
, rb
, rc
);
2278 gen_fcmov(ctx
, TCG_COND_GE
, ra
, rb
, rc
);
2283 gen_fcmov(ctx
, TCG_COND_LE
, ra
, rb
, rc
);
2288 gen_fcmov(ctx
, TCG_COND_GT
, ra
, rb
, rc
);
2290 case 0x030: /* CVTQL */
2291 case 0x130: /* CVTQL/V */
2292 case 0x530: /* CVTQL/SV */
2295 vc
= dest_fpr(ctx
, rc
);
2296 vb
= load_fpr(ctx
, rb
);
2297 gen_helper_cvtql(vc
, cpu_env
, vb
);
2298 gen_fp_exc_raise(rc
, fn11
);
2306 switch ((uint16_t)disp16
) {
2317 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
2321 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2333 va
= dest_gpr(ctx
, ra
);
2334 if (translator_io_start(&ctx
->base
)) {
2335 ret
= DISAS_PC_STALE
;
2337 gen_helper_load_pcc(va
, cpu_env
);
2364 /* HW_MFPR (PALcode) */
2365 #ifndef CONFIG_USER_ONLY
2366 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2367 va
= dest_gpr(ctx
, ra
);
2368 ret
= gen_mfpr(ctx
, va
, insn
& 0xffff);
2375 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2376 prediction stack action, which of course we don't implement. */
2377 vb
= load_gpr(ctx
, rb
);
2378 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2380 tcg_gen_movi_i64(ctx
->ir
[ra
], ctx
->base
.pc_next
);
2382 ret
= DISAS_PC_UPDATED
;
2386 /* HW_LD (PALcode) */
2387 #ifndef CONFIG_USER_ONLY
2388 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2390 TCGv addr
= tcg_temp_new();
2391 vb
= load_gpr(ctx
, rb
);
2392 va
= dest_gpr(ctx
, ra
);
2394 tcg_gen_addi_i64(addr
, vb
, disp12
);
2395 switch ((insn
>> 12) & 0xF) {
2397 /* Longword physical access (hw_ldl/p) */
2398 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2401 /* Quadword physical access (hw_ldq/p) */
2402 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2405 /* Longword physical access with lock (hw_ldl_l/p) */
2406 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2407 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2408 tcg_gen_mov_i64(cpu_lock_value
, va
);
2411 /* Quadword physical access with lock (hw_ldq_l/p) */
2412 tcg_gen_qemu_ld_i64(va
, addr
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2413 tcg_gen_mov_i64(cpu_lock_addr
, addr
);
2414 tcg_gen_mov_i64(cpu_lock_value
, va
);
2417 /* Longword virtual PTE fetch (hw_ldl/v) */
2420 /* Quadword virtual PTE fetch (hw_ldq/v) */
2430 /* Longword virtual access (hw_ldl) */
2433 /* Quadword virtual access (hw_ldq) */
2436 /* Longword virtual access with protection check (hw_ldl/w) */
2437 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
,
2438 MO_LESL
| MO_ALIGN
);
2441 /* Quadword virtual access with protection check (hw_ldq/w) */
2442 tcg_gen_qemu_ld_i64(va
, addr
, MMU_KERNEL_IDX
,
2443 MO_LEUQ
| MO_ALIGN
);
2446 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2449 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2452 /* Longword virtual access with alternate access mode and
2453 protection checks (hw_ldl/wa) */
2454 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
,
2455 MO_LESL
| MO_ALIGN
);
2458 /* Quadword virtual access with alternate access mode and
2459 protection checks (hw_ldq/wa) */
2460 tcg_gen_qemu_ld_i64(va
, addr
, MMU_USER_IDX
,
2461 MO_LEUQ
| MO_ALIGN
);
2471 vc
= dest_gpr(ctx
, rc
);
2476 va
= load_fpr(ctx
, ra
);
2477 tcg_gen_mov_i64(vc
, va
);
2479 } else if (fn7
== 0x78) {
2483 t32
= tcg_temp_new_i32();
2484 va
= load_fpr(ctx
, ra
);
2485 gen_helper_s_to_memory(t32
, va
);
2486 tcg_gen_ext_i32_i64(vc
, t32
);
2490 vb
= load_gpr_lit(ctx
, rb
, lit
, islit
);
2496 tcg_gen_ext8s_i64(vc
, vb
);
2502 tcg_gen_ext16s_i64(vc
, vb
);
2509 tcg_gen_ctpop_i64(vc
, vb
);
2515 va
= load_gpr(ctx
, ra
);
2516 gen_helper_perr(vc
, va
, vb
);
2523 tcg_gen_clzi_i64(vc
, vb
, 64);
2530 tcg_gen_ctzi_i64(vc
, vb
, 64);
2537 gen_helper_unpkbw(vc
, vb
);
2544 gen_helper_unpkbl(vc
, vb
);
2551 gen_helper_pkwb(vc
, vb
);
2558 gen_helper_pklb(vc
, vb
);
2563 va
= load_gpr(ctx
, ra
);
2564 gen_helper_minsb8(vc
, va
, vb
);
2569 va
= load_gpr(ctx
, ra
);
2570 gen_helper_minsw4(vc
, va
, vb
);
2575 va
= load_gpr(ctx
, ra
);
2576 gen_helper_minub8(vc
, va
, vb
);
2581 va
= load_gpr(ctx
, ra
);
2582 gen_helper_minuw4(vc
, va
, vb
);
2587 va
= load_gpr(ctx
, ra
);
2588 gen_helper_maxub8(vc
, va
, vb
);
2593 va
= load_gpr(ctx
, ra
);
2594 gen_helper_maxuw4(vc
, va
, vb
);
2599 va
= load_gpr(ctx
, ra
);
2600 gen_helper_maxsb8(vc
, va
, vb
);
2605 va
= load_gpr(ctx
, ra
);
2606 gen_helper_maxsw4(vc
, va
, vb
);
2614 /* HW_MTPR (PALcode) */
2615 #ifndef CONFIG_USER_ONLY
2616 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2617 vb
= load_gpr(ctx
, rb
);
2618 ret
= gen_mtpr(ctx
, vb
, insn
& 0xffff);
2625 /* HW_RET (PALcode) */
2626 #ifndef CONFIG_USER_ONLY
2627 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2629 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
2630 address from EXC_ADDR. This turns out to be useful for our
2631 emulation PALcode, so continue to accept it. */
2632 vb
= dest_sink(ctx
);
2633 tcg_gen_ld_i64(vb
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
2635 vb
= load_gpr(ctx
, rb
);
2637 tcg_gen_movi_i64(cpu_lock_addr
, -1);
2638 st_flag_byte(load_zero(ctx
), ENV_FLAG_RX_SHIFT
);
2639 tmp
= tcg_temp_new();
2640 tcg_gen_andi_i64(tmp
, vb
, 1);
2641 st_flag_byte(tmp
, ENV_FLAG_PAL_SHIFT
);
2642 tcg_gen_andi_i64(cpu_pc
, vb
, ~3);
2643 /* Allow interrupts to be recognized right away. */
2644 ret
= DISAS_PC_UPDATED_NOCHAIN
;
2651 /* HW_ST (PALcode) */
2652 #ifndef CONFIG_USER_ONLY
2653 REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE
);
2655 switch ((insn
>> 12) & 0xF) {
2657 /* Longword physical access */
2658 va
= load_gpr(ctx
, ra
);
2659 vb
= load_gpr(ctx
, rb
);
2660 tmp
= tcg_temp_new();
2661 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2662 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2665 /* Quadword physical access */
2666 va
= load_gpr(ctx
, ra
);
2667 vb
= load_gpr(ctx
, rb
);
2668 tmp
= tcg_temp_new();
2669 tcg_gen_addi_i64(tmp
, vb
, disp12
);
2670 tcg_gen_qemu_st_i64(va
, tmp
, MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2673 /* Longword physical access with lock */
2674 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2675 MMU_PHYS_IDX
, MO_LESL
| MO_ALIGN
);
2678 /* Quadword physical access with lock */
2679 ret
= gen_store_conditional(ctx
, ra
, rb
, disp12
,
2680 MMU_PHYS_IDX
, MO_LEUQ
| MO_ALIGN
);
2683 /* Longword virtual access */
2686 /* Quadword virtual access */
2707 /* Longword virtual access with alternate access mode */
2710 /* Quadword virtual access with alternate access mode */
2727 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldf
);
2732 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldg
);
2737 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_lds
);
2742 gen_load_fp(ctx
, ra
, rb
, disp16
, gen_ldt
);
2747 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stf
);
2752 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stg
);
2757 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_sts
);
2762 gen_store_fp(ctx
, ra
, rb
, disp16
, gen_stt
);
2766 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
, 0, 0);
2770 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0, 0);
2774 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LESL
| MO_ALIGN
, 0, 1);
2778 gen_load_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
| MO_ALIGN
, 0, 1);
2782 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUL
, 0);
2786 gen_store_int(ctx
, ra
, rb
, disp16
, MO_LEUQ
, 0);
2790 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2791 ctx
->mem_idx
, MO_LESL
| MO_ALIGN
);
2795 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
,
2796 ctx
->mem_idx
, MO_LEUQ
| MO_ALIGN
);
2800 ret
= gen_bdirect(ctx
, ra
, disp21
);
2802 case 0x31: /* FBEQ */
2804 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2806 case 0x32: /* FBLT */
2808 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2810 case 0x33: /* FBLE */
2812 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2816 ret
= gen_bdirect(ctx
, ra
, disp21
);
2818 case 0x35: /* FBNE */
2820 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2822 case 0x36: /* FBGE */
2824 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2826 case 0x37: /* FBGT */
2828 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2832 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2836 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2840 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2844 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2848 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2852 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2856 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2860 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2863 ret
= gen_invalid(ctx
);
2866 ret
= gen_excp(ctx
, EXCP_FEN
, 0);
2873 static void alpha_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
2875 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2876 CPUAlphaState
*env
= cpu
->env_ptr
;
2879 ctx
->tbflags
= ctx
->base
.tb
->flags
;
2880 ctx
->mem_idx
= cpu_mmu_index(env
, false);
2881 ctx
->implver
= env
->implver
;
2882 ctx
->amask
= env
->amask
;
2884 #ifdef CONFIG_USER_ONLY
2885 ctx
->ir
= cpu_std_ir
;
2886 ctx
->unalign
= (ctx
->tbflags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
2888 ctx
->palbr
= env
->palbr
;
2889 ctx
->ir
= (ctx
->tbflags
& ENV_FLAG_PAL_MODE
? cpu_pal_ir
: cpu_std_ir
);
2892 /* ??? Every TB begins with unset rounding mode, to be initialized on
2893 the first fp insn of the TB. Alternately we could define a proper
2894 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2895 to reset the FP_STATUS to that default at the end of any TB that
2896 changes the default. We could even (gasp) dynamically figure out
2897 what default would be most efficient given the running program. */
2899 /* Similarly for flush-to-zero. */
2905 /* Bound the number of insns to execute to those left on the page. */
2906 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
2907 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
2910 static void alpha_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
2914 static void alpha_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
2916 tcg_gen_insn_start(dcbase
->pc_next
);
2919 static void alpha_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
2921 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2922 CPUAlphaState
*env
= cpu
->env_ptr
;
2923 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
2925 ctx
->base
.pc_next
+= 4;
2926 ctx
->base
.is_jmp
= translate_one(ctx
, insn
);
2928 free_context_temps(ctx
);
2931 static void alpha_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
2933 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
2935 switch (ctx
->base
.is_jmp
) {
2936 case DISAS_NORETURN
:
2938 case DISAS_TOO_MANY
:
2939 if (use_goto_tb(ctx
, ctx
->base
.pc_next
)) {
2941 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2942 tcg_gen_exit_tb(ctx
->base
.tb
, 0);
2945 case DISAS_PC_STALE
:
2946 tcg_gen_movi_i64(cpu_pc
, ctx
->base
.pc_next
);
2948 case DISAS_PC_UPDATED
:
2949 tcg_gen_lookup_and_goto_ptr();
2951 case DISAS_PC_UPDATED_NOCHAIN
:
2952 tcg_gen_exit_tb(NULL
, 0);
2955 g_assert_not_reached();
2959 static void alpha_tr_disas_log(const DisasContextBase
*dcbase
,
2960 CPUState
*cpu
, FILE *logfile
)
2962 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
2963 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
2966 static const TranslatorOps alpha_tr_ops
= {
2967 .init_disas_context
= alpha_tr_init_disas_context
,
2968 .tb_start
= alpha_tr_tb_start
,
2969 .insn_start
= alpha_tr_insn_start
,
2970 .translate_insn
= alpha_tr_translate_insn
,
2971 .tb_stop
= alpha_tr_tb_stop
,
2972 .disas_log
= alpha_tr_disas_log
,
2975 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
2976 target_ulong pc
, void *host_pc
)
2979 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &alpha_tr_ops
, &dc
.base
);