2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
45 /* Current rounding mode for this TB. */
47 /* Current flush-to-zero setting for this TB. */
51 /* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
57 /* We have emitted one or more goto_tb. No fixup required. */
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
74 /* global register indexes */
75 static TCGv_ptr cpu_env
;
76 static TCGv cpu_ir
[31];
77 static TCGv cpu_fir
[31];
79 static TCGv cpu_lock_addr
;
80 static TCGv cpu_lock_st_addr
;
81 static TCGv cpu_lock_value
;
82 static TCGv cpu_unique
;
83 #ifndef CONFIG_USER_ONLY
84 static TCGv cpu_sysval
;
89 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
91 #include "gen-icount.h"
93 static void alpha_translate_init(void)
97 static int done_init
= 0;
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
105 for (i
= 0; i
< 31; i
++) {
106 sprintf(p
, "ir%d", i
);
107 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUAlphaState
, ir
[i
]), p
);
109 p
+= (i
< 10) ? 4 : 5;
111 sprintf(p
, "fir%d", i
);
112 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
113 offsetof(CPUAlphaState
, fir
[i
]), p
);
114 p
+= (i
< 10) ? 5 : 6;
117 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
118 offsetof(CPUAlphaState
, pc
), "pc");
120 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
121 offsetof(CPUAlphaState
, lock_addr
),
123 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
124 offsetof(CPUAlphaState
, lock_st_addr
),
126 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUAlphaState
, lock_value
),
130 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, unique
), "unique");
132 #ifndef CONFIG_USER_ONLY
133 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
134 offsetof(CPUAlphaState
, sysval
), "sysval");
135 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUAlphaState
, usp
), "usp");
139 /* register helpers */
146 static void gen_excp_1(int exception
, int error_code
)
150 tmp1
= tcg_const_i32(exception
);
151 tmp2
= tcg_const_i32(error_code
);
152 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
153 tcg_temp_free_i32(tmp2
);
154 tcg_temp_free_i32(tmp1
);
157 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
159 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
160 gen_excp_1(exception
, error_code
);
161 return EXIT_NORETURN
;
164 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
166 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
169 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
171 TCGv tmp
= tcg_temp_new();
172 TCGv_i32 tmp32
= tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
174 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
175 gen_helper_memory_to_f(t0
, tmp32
);
176 tcg_temp_free_i32(tmp32
);
180 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
182 TCGv tmp
= tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
184 gen_helper_memory_to_g(t0
, tmp
);
188 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
190 TCGv tmp
= tcg_temp_new();
191 TCGv_i32 tmp32
= tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
193 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
194 gen_helper_memory_to_s(t0
, tmp32
);
195 tcg_temp_free_i32(tmp32
);
199 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
201 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
202 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
203 tcg_gen_mov_i64(cpu_lock_value
, t0
);
206 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
208 tcg_gen_qemu_ld64(t0
, t1
, flags
);
209 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
210 tcg_gen_mov_i64(cpu_lock_value
, t0
);
213 static inline void gen_load_mem(DisasContext
*ctx
,
214 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
216 int ra
, int rb
, int32_t disp16
, int fp
,
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra
== 31)) {
228 addr
= tcg_temp_new();
230 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
232 tcg_gen_andi_i64(addr
, addr
, ~0x7);
238 tcg_gen_movi_i64(addr
, disp16
);
241 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
242 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
247 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
249 TCGv_i32 tmp32
= tcg_temp_new_i32();
250 TCGv tmp
= tcg_temp_new();
251 gen_helper_f_to_memory(tmp32
, t0
);
252 tcg_gen_extu_i32_i64(tmp
, tmp32
);
253 tcg_gen_qemu_st32(tmp
, t1
, flags
);
255 tcg_temp_free_i32(tmp32
);
258 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
260 TCGv tmp
= tcg_temp_new();
261 gen_helper_g_to_memory(tmp
, t0
);
262 tcg_gen_qemu_st64(tmp
, t1
, flags
);
266 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
268 TCGv_i32 tmp32
= tcg_temp_new_i32();
269 TCGv tmp
= tcg_temp_new();
270 gen_helper_s_to_memory(tmp32
, t0
);
271 tcg_gen_extu_i32_i64(tmp
, tmp32
);
272 tcg_gen_qemu_st32(tmp
, t1
, flags
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_store_mem(DisasContext
*ctx
,
278 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
280 int ra
, int rb
, int32_t disp16
, int fp
,
285 addr
= tcg_temp_new();
287 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
289 tcg_gen_andi_i64(addr
, addr
, ~0x7);
295 tcg_gen_movi_i64(addr
, disp16
);
299 va
= tcg_const_i64(0);
301 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
303 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
311 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
312 int32_t disp16
, int quad
)
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
322 #if defined(CONFIG_USER_ONLY)
323 addr
= cpu_lock_st_addr
;
325 addr
= tcg_temp_local_new();
329 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
331 tcg_gen_movi_i64(addr
, disp16
);
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
343 int lab_fail
, lab_done
;
346 lab_fail
= gen_new_label();
347 lab_done
= gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
350 val
= tcg_temp_new();
352 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
354 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
356 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
359 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
361 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
363 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
364 tcg_gen_br(lab_done
);
366 gen_set_label(lab_fail
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
369 gen_set_label(lab_done
);
370 tcg_gen_movi_i64(cpu_lock_addr
, -1);
378 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
383 && !ctx
->env
->singlestep_enabled
384 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
387 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
389 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
392 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
395 /* Notice branch-to-next; used to initialize RA with the PC. */
398 } else if (use_goto_tb(ctx
, dest
)) {
400 tcg_gen_movi_i64(cpu_pc
, dest
);
401 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
404 tcg_gen_movi_i64(cpu_pc
, dest
);
405 return EXIT_PC_UPDATED
;
409 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
410 TCGv cmp
, int32_t disp
)
412 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
413 int lab_true
= gen_new_label();
415 if (use_goto_tb(ctx
, dest
)) {
416 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
419 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
420 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
422 gen_set_label(lab_true
);
424 tcg_gen_movi_i64(cpu_pc
, dest
);
425 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
429 int lab_over
= gen_new_label();
431 /* ??? Consider using either
434 movcond pc, cond, 0, tmp, pc
441 The current diamond subgraph surely isn't efficient. */
443 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
444 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
445 tcg_gen_br(lab_over
);
446 gen_set_label(lab_true
);
447 tcg_gen_movi_i64(cpu_pc
, dest
);
448 gen_set_label(lab_over
);
450 return EXIT_PC_UPDATED
;
454 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
455 int32_t disp
, int mask
)
459 if (unlikely(ra
== 31)) {
460 cmp_tmp
= tcg_const_i64(0);
462 cmp_tmp
= tcg_temp_new();
464 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
466 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
470 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
473 /* Fold -0.0 for comparison with COND. */
475 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
477 uint64_t mzero
= 1ull << 63;
482 /* For <= or >, the -0.0 value directly compares the way we want. */
483 tcg_gen_mov_i64(dest
, src
);
488 /* For == or !=, we can simply mask off the sign bit and compare. */
489 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
496 tcg_gen_neg_i64(dest
, dest
);
497 tcg_gen_and_i64(dest
, dest
, src
);
505 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
510 if (unlikely(ra
== 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
513 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
516 cmp_tmp
= tcg_temp_new();
517 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
518 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
521 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
522 int islit
, uint8_t lit
, int mask
)
524 TCGCond inv_cond
= tcg_invert_cond(cond
);
527 if (unlikely(rc
== 31))
530 l1
= gen_new_label();
534 TCGv tmp
= tcg_temp_new();
535 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
536 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
539 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp
= tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
548 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
550 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
554 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
559 if (unlikely(rc
== 31)) {
563 cmp_tmp
= tcg_temp_new();
564 if (unlikely(ra
== 31)) {
565 tcg_gen_movi_i64(cmp_tmp
, 0);
567 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
570 l1
= gen_new_label();
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
572 tcg_temp_free(cmp_tmp
);
575 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
577 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
581 #define QUAL_RM_N 0x080 /* Round mode nearest even */
582 #define QUAL_RM_C 0x000 /* Round mode chopped */
583 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
584 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585 #define QUAL_RM_MASK 0x0c0
587 #define QUAL_U 0x100 /* Underflow enable (fp output) */
588 #define QUAL_V 0x100 /* Overflow enable (int output) */
589 #define QUAL_S 0x400 /* Software completion enable */
590 #define QUAL_I 0x200 /* Inexact detection enable */
592 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
596 fn11
&= QUAL_RM_MASK
;
597 if (fn11
== ctx
->tb_rm
) {
602 tmp
= tcg_temp_new_i32();
605 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
608 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
611 tcg_gen_movi_i32(tmp
, float_round_down
);
614 tcg_gen_ld8u_i32(tmp
, cpu_env
,
615 offsetof(CPUAlphaState
, fpcr_dyn_round
));
619 #if defined(CONFIG_SOFTFLOAT_INLINE)
620 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622 sets the one field. */
623 tcg_gen_st8_i32(tmp
, cpu_env
,
624 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
626 gen_helper_setroundmode(tmp
);
629 tcg_temp_free_i32(tmp
);
632 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
637 if (fn11
== ctx
->tb_ftz
) {
642 tmp
= tcg_temp_new_i32();
644 /* Underflow is enabled, use the FPCR setting. */
645 tcg_gen_ld8u_i32(tmp
, cpu_env
,
646 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
648 /* Underflow is disabled, force flush-to-zero. */
649 tcg_gen_movi_i32(tmp
, 1);
652 #if defined(CONFIG_SOFTFLOAT_INLINE)
653 tcg_gen_st8_i32(tmp
, cpu_env
,
654 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
656 gen_helper_setflushzero(tmp
);
659 tcg_temp_free_i32(tmp
);
662 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
666 val
= tcg_const_i64(0);
668 if ((fn11
& QUAL_S
) == 0) {
670 gen_helper_ieee_input_cmp(cpu_env
, cpu_fir
[reg
]);
672 gen_helper_ieee_input(cpu_env
, cpu_fir
[reg
]);
675 val
= tcg_temp_new();
676 tcg_gen_mov_i64(val
, cpu_fir
[reg
]);
681 static void gen_fp_exc_clear(void)
683 #if defined(CONFIG_SOFTFLOAT_INLINE)
684 TCGv_i32 zero
= tcg_const_i32(0);
685 tcg_gen_st8_i32(zero
, cpu_env
,
686 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
687 tcg_temp_free_i32(zero
);
689 gen_helper_fp_exc_clear(cpu_env
);
693 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
695 /* ??? We ought to be able to do something with imprecise exceptions.
696 E.g. notice we're still in the trap shadow of something within the
697 TB and do not generate the code to signal the exception; end the TB
698 when an exception is forced to arrive, either by consumption of a
699 register value or TRAPB or EXCB. */
700 TCGv_i32 exc
= tcg_temp_new_i32();
703 #if defined(CONFIG_SOFTFLOAT_INLINE)
704 tcg_gen_ld8u_i32(exc
, cpu_env
,
705 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
707 gen_helper_fp_exc_get(exc
, cpu_env
);
711 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
714 /* ??? Pass in the regno of the destination so that the helper can
715 set EXC_MASK, which contains a bitmask of destination registers
716 that have caused arithmetic traps. A simple userspace emulation
717 does not require this. We do need it for a guest kernel's entArith,
718 or if we were to do something clever with imprecise exceptions. */
719 reg
= tcg_const_i32(rc
+ 32);
722 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
724 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
727 tcg_temp_free_i32(reg
);
728 tcg_temp_free_i32(exc
);
731 static inline void gen_fp_exc_raise(int rc
, int fn11
)
733 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
736 static void gen_fcvtlq(int rb
, int rc
)
738 if (unlikely(rc
== 31)) {
741 if (unlikely(rb
== 31)) {
742 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
744 TCGv tmp
= tcg_temp_new();
746 /* The arithmetic right shift here, plus the sign-extended mask below
747 yields a sign-extended result without an explicit ext32s_i64. */
748 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
749 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
750 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
751 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
752 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
758 static void gen_fcvtql(int rb
, int rc
)
760 if (unlikely(rc
== 31)) {
763 if (unlikely(rb
== 31)) {
764 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
766 TCGv tmp
= tcg_temp_new();
768 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
769 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
770 tcg_gen_shli_i64(tmp
, tmp
, 32);
771 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
772 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
778 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
781 int lab
= gen_new_label();
782 TCGv tmp
= tcg_temp_new();
784 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
785 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
786 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
793 #define FARITH2(name) \
794 static inline void glue(gen_f, name)(int rb, int rc) \
796 if (unlikely(rc == 31)) { \
800 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
802 TCGv tmp = tcg_const_i64(0); \
803 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
804 tcg_temp_free(tmp); \
808 /* ??? VAX instruction qualifiers ignored. */
816 static void gen_ieee_arith2(DisasContext
*ctx
,
817 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
818 int rb
, int rc
, int fn11
)
822 /* ??? This is wrong: the instruction is not a nop, it still may
824 if (unlikely(rc
== 31)) {
828 gen_qual_roundmode(ctx
, fn11
);
829 gen_qual_flushzero(ctx
, fn11
);
832 vb
= gen_ieee_input(rb
, fn11
, 0);
833 helper(cpu_fir
[rc
], cpu_env
, vb
);
836 gen_fp_exc_raise(rc
, fn11
);
839 #define IEEE_ARITH2(name) \
840 static inline void glue(gen_f, name)(DisasContext *ctx, \
841 int rb, int rc, int fn11) \
843 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
850 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
855 /* ??? This is wrong: the instruction is not a nop, it still may
857 if (unlikely(rc
== 31)) {
861 /* No need to set flushzero, since we have an integer output. */
863 vb
= gen_ieee_input(rb
, fn11
, 0);
865 /* Almost all integer conversions use cropped rounding, and most
866 also do not have integer overflow enabled. Special case that. */
869 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
871 case QUAL_V
| QUAL_RM_C
:
872 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
873 ignore
= float_flag_inexact
;
875 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
876 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
879 gen_qual_roundmode(ctx
, fn11
);
880 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
881 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
882 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
887 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
890 static void gen_ieee_intcvt(DisasContext
*ctx
,
891 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
892 int rb
, int rc
, int fn11
)
896 /* ??? This is wrong: the instruction is not a nop, it still may
898 if (unlikely(rc
== 31)) {
902 gen_qual_roundmode(ctx
, fn11
);
905 vb
= tcg_const_i64(0);
910 /* The only exception that can be raised by integer conversion
911 is inexact. Thus we only need to worry about exceptions when
912 inexact handling is requested. */
915 helper(cpu_fir
[rc
], cpu_env
, vb
);
916 gen_fp_exc_raise(rc
, fn11
);
918 helper(cpu_fir
[rc
], cpu_env
, vb
);
926 #define IEEE_INTCVT(name) \
927 static inline void glue(gen_f, name)(DisasContext *ctx, \
928 int rb, int rc, int fn11) \
930 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
935 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
940 if (unlikely(rc
== 31)) {
944 vmask
= tcg_const_i64(mask
);
954 va
= tcg_temp_new_i64();
955 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
957 tcg_gen_andc_i64(va
, vmask
, va
);
959 tcg_gen_and_i64(va
, va
, vmask
);
967 vb
= tcg_temp_new_i64();
968 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
971 switch (za
<< 1 | zb
) {
973 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
976 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
979 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
982 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
986 tcg_temp_free(vmask
);
995 static inline void gen_fcpys(int ra
, int rb
, int rc
)
997 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
1000 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
1002 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1005 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1007 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1010 #define FARITH3(name) \
1011 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1015 if (unlikely(rc == 31)) { \
1019 va = tcg_const_i64(0); \
1024 vb = tcg_const_i64(0); \
1029 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1032 tcg_temp_free(va); \
1035 tcg_temp_free(vb); \
1039 /* ??? VAX instruction qualifiers ignored. */
1052 static void gen_ieee_arith3(DisasContext
*ctx
,
1053 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1054 int ra
, int rb
, int rc
, int fn11
)
1058 /* ??? This is wrong: the instruction is not a nop, it still may
1059 raise exceptions. */
1060 if (unlikely(rc
== 31)) {
1064 gen_qual_roundmode(ctx
, fn11
);
1065 gen_qual_flushzero(ctx
, fn11
);
1068 va
= gen_ieee_input(ra
, fn11
, 0);
1069 vb
= gen_ieee_input(rb
, fn11
, 0);
1070 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1074 gen_fp_exc_raise(rc
, fn11
);
1077 #define IEEE_ARITH3(name) \
1078 static inline void glue(gen_f, name)(DisasContext *ctx, \
1079 int ra, int rb, int rc, int fn11) \
1081 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1092 static void gen_ieee_compare(DisasContext
*ctx
,
1093 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1094 int ra
, int rb
, int rc
, int fn11
)
1098 /* ??? This is wrong: the instruction is not a nop, it still may
1099 raise exceptions. */
1100 if (unlikely(rc
== 31)) {
1106 va
= gen_ieee_input(ra
, fn11
, 1);
1107 vb
= gen_ieee_input(rb
, fn11
, 1);
1108 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1112 gen_fp_exc_raise(rc
, fn11
);
1115 #define IEEE_CMP3(name) \
1116 static inline void glue(gen_f, name)(DisasContext *ctx, \
1117 int ra, int rb, int rc, int fn11) \
1119 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1126 static inline uint64_t zapnot_mask(uint8_t lit
)
1131 for (i
= 0; i
< 8; ++i
) {
1133 mask
|= 0xffull
<< (i
* 8);
1138 /* Implement zapnot with an immediate operand, which expands to some
1139 form of immediate AND. This is a basic building block in the
1140 definition of many of the other byte manipulation instructions. */
1141 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1145 tcg_gen_movi_i64(dest
, 0);
1148 tcg_gen_ext8u_i64(dest
, src
);
1151 tcg_gen_ext16u_i64(dest
, src
);
1154 tcg_gen_ext32u_i64(dest
, src
);
1157 tcg_gen_mov_i64(dest
, src
);
1160 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1165 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1167 if (unlikely(rc
== 31))
1169 else if (unlikely(ra
== 31))
1170 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1172 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1174 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1177 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1179 if (unlikely(rc
== 31))
1181 else if (unlikely(ra
== 31))
1182 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1184 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1186 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1190 /* EXTWH, EXTLH, EXTQH */
1191 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1192 uint8_t lit
, uint8_t byte_mask
)
1194 if (unlikely(rc
== 31))
1196 else if (unlikely(ra
== 31))
1197 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1200 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1201 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1203 TCGv tmp1
= tcg_temp_new();
1204 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1205 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1206 tcg_gen_neg_i64(tmp1
, tmp1
);
1207 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1208 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1209 tcg_temp_free(tmp1
);
1211 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1215 /* EXTBL, EXTWL, EXTLL, EXTQL */
1216 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1217 uint8_t lit
, uint8_t byte_mask
)
1219 if (unlikely(rc
== 31))
1221 else if (unlikely(ra
== 31))
1222 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1225 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1227 TCGv tmp
= tcg_temp_new();
1228 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1229 tcg_gen_shli_i64(tmp
, tmp
, 3);
1230 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1233 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1237 /* INSWH, INSLH, INSQH */
1238 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1239 uint8_t lit
, uint8_t byte_mask
)
1241 if (unlikely(rc
== 31))
1243 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1244 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1246 TCGv tmp
= tcg_temp_new();
1248 /* The instruction description has us left-shift the byte mask
1249 and extract bits <15:8> and apply that zap at the end. This
1250 is equivalent to simply performing the zap first and shifting
1252 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1255 /* Note that we have handled the lit==0 case above. */
1256 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1258 TCGv shift
= tcg_temp_new();
1260 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1261 Do this portably by splitting the shift into two parts:
1262 shift_count-1 and 1. Arrange for the -1 by using
1263 ones-complement instead of twos-complement in the negation:
1264 ~((B & 7) * 8) & 63. */
1266 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1267 tcg_gen_shli_i64(shift
, shift
, 3);
1268 tcg_gen_not_i64(shift
, shift
);
1269 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1271 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1272 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1273 tcg_temp_free(shift
);
1279 /* INSBL, INSWL, INSLL, INSQL */
1280 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1281 uint8_t lit
, uint8_t byte_mask
)
1283 if (unlikely(rc
== 31))
1285 else if (unlikely(ra
== 31))
1286 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1288 TCGv tmp
= tcg_temp_new();
1290 /* The instruction description has us left-shift the byte mask
1291 the same number of byte slots as the data and apply the zap
1292 at the end. This is equivalent to simply performing the zap
1293 first and shifting afterward. */
1294 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1297 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1299 TCGv shift
= tcg_temp_new();
1300 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1301 tcg_gen_shli_i64(shift
, shift
, 3);
1302 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1303 tcg_temp_free(shift
);
1309 /* MSKWH, MSKLH, MSKQH */
1310 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1311 uint8_t lit
, uint8_t byte_mask
)
1313 if (unlikely(rc
== 31))
1315 else if (unlikely(ra
== 31))
1316 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1318 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1320 TCGv shift
= tcg_temp_new();
1321 TCGv mask
= tcg_temp_new();
1323 /* The instruction description is as above, where the byte_mask
1324 is shifted left, and then we extract bits <15:8>. This can be
1325 emulated with a right-shift on the expanded byte mask. This
1326 requires extra care because for an input <2:0> == 0 we need a
1327 shift of 64 bits in order to generate a zero. This is done by
1328 splitting the shift into two parts, the variable shift - 1
1329 followed by a constant 1 shift. The code we expand below is
1330 equivalent to ~((B & 7) * 8) & 63. */
1332 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1333 tcg_gen_shli_i64(shift
, shift
, 3);
1334 tcg_gen_not_i64(shift
, shift
);
1335 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1336 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1337 tcg_gen_shr_i64(mask
, mask
, shift
);
1338 tcg_gen_shri_i64(mask
, mask
, 1);
1340 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1342 tcg_temp_free(mask
);
1343 tcg_temp_free(shift
);
1347 /* MSKBL, MSKWL, MSKLL, MSKQL */
1348 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1349 uint8_t lit
, uint8_t byte_mask
)
1351 if (unlikely(rc
== 31))
1353 else if (unlikely(ra
== 31))
1354 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1356 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1358 TCGv shift
= tcg_temp_new();
1359 TCGv mask
= tcg_temp_new();
1361 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1362 tcg_gen_shli_i64(shift
, shift
, 3);
1363 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1364 tcg_gen_shl_i64(mask
, mask
, shift
);
1366 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1368 tcg_temp_free(mask
);
1369 tcg_temp_free(shift
);
1373 /* Code to call arith3 helpers */
1374 #define ARITH3(name) \
1375 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1378 if (unlikely(rc == 31)) \
1383 TCGv tmp = tcg_const_i64(lit); \
1384 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1385 tcg_temp_free(tmp); \
1387 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1389 TCGv tmp1 = tcg_const_i64(0); \
1391 TCGv tmp2 = tcg_const_i64(lit); \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1393 tcg_temp_free(tmp2); \
1395 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1396 tcg_temp_free(tmp1); \
1411 /* Code to call arith3 helpers */
1412 #define ARITH3_EX(name) \
1413 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1414 int islit, uint8_t lit) \
1416 if (unlikely(rc == 31)) { \
1421 TCGv tmp = tcg_const_i64(lit); \
1422 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1424 tcg_temp_free(tmp); \
1426 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1427 cpu_ir[ra], cpu_ir[rb]); \
1430 TCGv tmp1 = tcg_const_i64(0); \
1432 TCGv tmp2 = tcg_const_i64(lit); \
1433 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1434 tcg_temp_free(tmp2); \
1436 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1438 tcg_temp_free(tmp1); \
1448 #define MVIOP2(name) \
1449 static inline void glue(gen_, name)(int rb, int rc) \
1451 if (unlikely(rc == 31)) \
1453 if (unlikely(rb == 31)) \
1454 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1456 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1463 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1464 int islit
, uint8_t lit
)
1468 if (unlikely(rc
== 31)) {
1473 va
= tcg_const_i64(0);
1478 vb
= tcg_const_i64(lit
);
1483 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1493 static void gen_rx(int ra
, int set
)
1498 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1501 tmp
= tcg_const_i32(set
);
1502 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1503 tcg_temp_free_i32(tmp
);
1506 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1508 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1509 to internal cpu registers. */
1511 /* Unprivileged PAL call */
1512 if (palcode
>= 0x80 && palcode
< 0xC0) {
1516 /* No-op inside QEMU. */
1520 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1524 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1527 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1532 #ifndef CONFIG_USER_ONLY
1533 /* Privileged PAL code */
1534 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1538 /* No-op inside QEMU. */
1542 /* No-op inside QEMU. */
1546 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1550 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1554 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1561 /* Note that we already know we're in kernel mode, so we know
1562 that PS only contains the 3 IPL bits. */
1563 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1565 /* But make sure and store only the 3 IPL bits from the user. */
1566 tmp
= tcg_temp_new();
1567 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1568 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1575 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1579 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1583 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1587 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1588 offsetof(CPUAlphaState
, cpu_index
));
1592 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1598 return gen_invalid(ctx
);
1601 #ifndef CONFIG_USER_ONLY
1603 #define PR_BYTE 0x100000
1604 #define PR_LONG 0x200000
1606 static int cpu_pr_data(int pr
)
1609 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1610 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1611 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1612 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1613 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1614 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1615 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1616 case 7: return offsetof(CPUAlphaState
, palbr
);
1617 case 8: return offsetof(CPUAlphaState
, ptbr
);
1618 case 9: return offsetof(CPUAlphaState
, vptptr
);
1619 case 10: return offsetof(CPUAlphaState
, unique
);
1620 case 11: return offsetof(CPUAlphaState
, sysval
);
1621 case 12: return offsetof(CPUAlphaState
, usp
);
1624 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1626 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1629 return offsetof(CPUAlphaState
, alarm_expire
);
1634 static ExitStatus
gen_mfpr(int ra
, int regno
)
1636 int data
= cpu_pr_data(regno
);
1638 /* In our emulated PALcode, these processor registers have no
1639 side effects from reading. */
1648 gen_helper_get_time(cpu_ir
[ra
]);
1650 return EXIT_PC_STALE
;
1652 gen_helper_get_time(cpu_ir
[ra
]);
1657 /* The basic registers are data only, and unknown registers
1658 are read-zero, write-ignore. */
1660 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1661 } else if (data
& PR_BYTE
) {
1662 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1663 } else if (data
& PR_LONG
) {
1664 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1666 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1671 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1677 tmp
= tcg_const_i64(0);
1685 gen_helper_tbia(cpu_env
);
1690 gen_helper_tbis(cpu_env
, tmp
);
1695 tmp
= tcg_const_i64(1);
1696 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, halted
));
1697 return gen_excp(ctx
, EXCP_HLT
, 0);
1701 gen_helper_halt(tmp
);
1702 return EXIT_PC_STALE
;
1706 gen_helper_set_alarm(cpu_env
, tmp
);
1710 /* The basic registers are data only, and unknown registers
1711 are read-zero, write-ignore. */
1712 data
= cpu_pr_data(regno
);
1714 if (data
& PR_BYTE
) {
1715 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1716 } else if (data
& PR_LONG
) {
1717 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1719 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1731 #endif /* !USER_ONLY*/
1733 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1736 int32_t disp21
, disp16
;
1737 #ifndef CONFIG_USER_ONLY
1741 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1745 /* Decode all instruction fields */
1747 ra
= (insn
>> 21) & 0x1F;
1748 rb
= (insn
>> 16) & 0x1F;
1750 real_islit
= islit
= (insn
>> 12) & 1;
1751 if (rb
== 31 && !islit
) {
1755 lit
= (insn
>> 13) & 0xFF;
1756 palcode
= insn
& 0x03FFFFFF;
1757 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1758 disp16
= (int16_t)(insn
& 0x0000FFFF);
1759 #ifndef CONFIG_USER_ONLY
1760 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1762 fn11
= (insn
>> 5) & 0x000007FF;
1764 fn7
= (insn
>> 5) & 0x0000007F;
1765 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1766 opc
, ra
, rb
, rc
, disp16
);
1772 ret
= gen_call_pal(ctx
, palcode
);
1797 if (likely(ra
!= 31)) {
1799 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1801 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1806 if (likely(ra
!= 31)) {
1808 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1810 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1815 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1816 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1822 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1826 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1827 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1833 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1837 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1841 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1847 if (likely(rc
!= 31)) {
1850 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1851 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1853 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1854 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1858 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1860 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1866 if (likely(rc
!= 31)) {
1868 TCGv tmp
= tcg_temp_new();
1869 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1871 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1873 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1874 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1878 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1880 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1886 if (likely(rc
!= 31)) {
1889 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1891 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1892 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1895 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1897 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1898 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1904 if (likely(rc
!= 31)) {
1906 TCGv tmp
= tcg_temp_new();
1907 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1909 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1911 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1912 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1916 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1918 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1919 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1926 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1930 if (likely(rc
!= 31)) {
1932 TCGv tmp
= tcg_temp_new();
1933 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1935 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1937 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1938 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1942 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1944 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1950 if (likely(rc
!= 31)) {
1952 TCGv tmp
= tcg_temp_new();
1953 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1955 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1957 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1958 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1962 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1964 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1965 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1972 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1976 if (likely(rc
!= 31)) {
1979 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1981 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1984 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1986 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1992 if (likely(rc
!= 31)) {
1994 TCGv tmp
= tcg_temp_new();
1995 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1997 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1999 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2003 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2005 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2011 if (likely(rc
!= 31)) {
2014 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2016 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2019 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2021 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2027 if (likely(rc
!= 31)) {
2029 TCGv tmp
= tcg_temp_new();
2030 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2032 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2034 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2038 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2040 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2046 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2050 if (likely(rc
!= 31)) {
2052 TCGv tmp
= tcg_temp_new();
2053 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2055 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2057 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2061 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2063 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2069 if (likely(rc
!= 31)) {
2071 TCGv tmp
= tcg_temp_new();
2072 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2074 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2076 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2080 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2082 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2088 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2092 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2096 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2100 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2104 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2108 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2112 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2122 if (likely(rc
!= 31)) {
2124 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2126 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2128 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2133 if (likely(rc
!= 31)) {
2136 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2138 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2140 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2145 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2149 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2153 if (likely(rc
!= 31)) {
2156 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2158 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2161 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2163 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2169 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2173 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2177 if (likely(rc
!= 31)) {
2180 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2182 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2185 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2187 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2193 if (likely(rc
!= 31)) {
2196 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2198 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2201 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2203 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2209 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2213 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2217 if (likely(rc
!= 31)) {
2220 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2222 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2225 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2227 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2233 if (likely(rc
!= 31)) {
2234 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2237 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2239 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2245 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2249 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2254 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2264 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2268 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2272 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2276 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2280 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2284 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2288 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2292 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2296 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2300 gen_zap(ra
, rb
, rc
, islit
, lit
);
2304 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2308 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2312 if (likely(rc
!= 31)) {
2315 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2317 TCGv shift
= tcg_temp_new();
2318 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2319 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2320 tcg_temp_free(shift
);
2323 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2328 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2332 if (likely(rc
!= 31)) {
2335 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2337 TCGv shift
= tcg_temp_new();
2338 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2339 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2340 tcg_temp_free(shift
);
2343 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2348 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2352 if (likely(rc
!= 31)) {
2355 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2357 TCGv shift
= tcg_temp_new();
2358 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2359 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2360 tcg_temp_free(shift
);
2363 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2368 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2372 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2376 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2380 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2384 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2388 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2392 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2396 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2400 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2410 if (likely(rc
!= 31)) {
2412 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2415 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2417 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2418 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2424 if (likely(rc
!= 31)) {
2426 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2428 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2430 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2435 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2439 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2443 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2450 switch (fpfn
) { /* fn11 & 0x3F */
2453 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2456 if (likely(rc
!= 31)) {
2458 TCGv_i32 tmp
= tcg_temp_new_i32();
2459 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2460 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2461 tcg_temp_free_i32(tmp
);
2463 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2468 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2475 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2476 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2482 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2485 if (likely(rc
!= 31)) {
2487 TCGv_i32 tmp
= tcg_temp_new_i32();
2488 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2489 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2490 tcg_temp_free_i32(tmp
);
2492 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2497 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2500 if (likely(rc
!= 31)) {
2502 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2504 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2509 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2516 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2517 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2526 /* VAX floating point */
2527 /* XXX: rounding mode and trap are ignored (!) */
2528 switch (fpfn
) { /* fn11 & 0x3F */
2531 gen_faddf(ra
, rb
, rc
);
2535 gen_fsubf(ra
, rb
, rc
);
2539 gen_fmulf(ra
, rb
, rc
);
2543 gen_fdivf(ra
, rb
, rc
);
2555 gen_faddg(ra
, rb
, rc
);
2559 gen_fsubg(ra
, rb
, rc
);
2563 gen_fmulg(ra
, rb
, rc
);
2567 gen_fdivg(ra
, rb
, rc
);
2571 gen_fcmpgeq(ra
, rb
, rc
);
2575 gen_fcmpglt(ra
, rb
, rc
);
2579 gen_fcmpgle(ra
, rb
, rc
);
2610 /* IEEE floating-point */
2611 switch (fpfn
) { /* fn11 & 0x3F */
2614 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2618 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2622 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2626 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2630 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2634 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2638 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2642 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2646 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2650 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2654 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2658 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2661 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2663 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2666 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2671 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2675 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2679 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2692 if (likely(rc
!= 31)) {
2696 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2698 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2701 gen_fcpys(ra
, rb
, rc
);
2707 gen_fcpysn(ra
, rb
, rc
);
2711 gen_fcpyse(ra
, rb
, rc
);
2715 if (likely(ra
!= 31))
2716 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2718 TCGv tmp
= tcg_const_i64(0);
2719 gen_helper_store_fpcr(cpu_env
, tmp
);
2725 if (likely(ra
!= 31))
2726 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2730 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2734 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2738 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2742 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2746 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2750 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2760 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2761 /v doesn't do. The only thing I can think is that /sv is a
2762 valid instruction merely for completeness in the ISA. */
2763 gen_fcvtql_v(ctx
, rb
, rc
);
2770 switch ((uint16_t)disp16
) {
2800 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2802 ret
= EXIT_PC_STALE
;
2804 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2828 /* HW_MFPR (PALcode) */
2829 #ifndef CONFIG_USER_ONLY
2830 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2831 return gen_mfpr(ra
, insn
& 0xffff);
2836 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2837 prediction stack action, which of course we don't implement. */
2839 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2841 tcg_gen_movi_i64(cpu_pc
, 0);
2844 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2846 ret
= EXIT_PC_UPDATED
;
2849 /* HW_LD (PALcode) */
2850 #ifndef CONFIG_USER_ONLY
2851 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2858 addr
= tcg_temp_new();
2860 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2862 tcg_gen_movi_i64(addr
, disp12
);
2863 switch ((insn
>> 12) & 0xF) {
2865 /* Longword physical access (hw_ldl/p) */
2866 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2869 /* Quadword physical access (hw_ldq/p) */
2870 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2873 /* Longword physical access with lock (hw_ldl_l/p) */
2874 gen_helper_ldl_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2877 /* Quadword physical access with lock (hw_ldq_l/p) */
2878 gen_helper_ldq_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2881 /* Longword virtual PTE fetch (hw_ldl/v) */
2884 /* Quadword virtual PTE fetch (hw_ldq/v) */
2888 /* Incpu_ir[ra]id */
2891 /* Incpu_ir[ra]id */
2894 /* Longword virtual access (hw_ldl) */
2897 /* Quadword virtual access (hw_ldq) */
2900 /* Longword virtual access with protection check (hw_ldl/w) */
2901 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2904 /* Quadword virtual access with protection check (hw_ldq/w) */
2905 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2908 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2911 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2914 /* Longword virtual access with alternate access mode and
2915 protection checks (hw_ldl/wa) */
2916 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2919 /* Quadword virtual access with alternate access mode and
2920 protection checks (hw_ldq/wa) */
2921 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2924 tcg_temp_free(addr
);
2933 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2936 if (likely(rc
!= 31)) {
2938 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2940 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2945 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2946 if (likely(rc
!= 31)) {
2948 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2950 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2958 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2959 if (likely(rc
!= 31)) {
2961 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2963 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2971 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2972 gen_perr(ra
, rb
, rc
, islit
, lit
);
2978 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2979 if (likely(rc
!= 31)) {
2981 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2983 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2991 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2992 if (likely(rc
!= 31)) {
2994 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2996 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3004 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3005 if (real_islit
|| ra
!= 31) {
3014 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3015 if (real_islit
|| ra
!= 31) {
3024 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3025 if (real_islit
|| ra
!= 31) {
3034 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3035 if (real_islit
|| ra
!= 31) {
3044 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3045 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3051 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3052 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3058 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3059 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3065 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3066 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3072 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3073 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3079 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3080 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3086 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3087 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3093 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3094 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3100 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3103 if (likely(rc
!= 31)) {
3105 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3107 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3112 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3116 TCGv_i32 tmp1
= tcg_temp_new_i32();
3118 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3120 TCGv tmp2
= tcg_const_i64(0);
3121 gen_helper_s_to_memory(tmp1
, tmp2
);
3122 tcg_temp_free(tmp2
);
3124 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3125 tcg_temp_free_i32(tmp1
);
3133 /* HW_MTPR (PALcode) */
3134 #ifndef CONFIG_USER_ONLY
3135 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3136 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3141 /* HW_RET (PALcode) */
3142 #ifndef CONFIG_USER_ONLY
3143 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3145 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3146 address from EXC_ADDR. This turns out to be useful for our
3147 emulation PALcode, so continue to accept it. */
3148 TCGv tmp
= tcg_temp_new();
3149 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3150 gen_helper_hw_ret(cpu_env
, tmp
);
3153 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3155 ret
= EXIT_PC_UPDATED
;
3161 /* HW_ST (PALcode) */
3162 #ifndef CONFIG_USER_ONLY
3163 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3165 addr
= tcg_temp_new();
3167 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3169 tcg_gen_movi_i64(addr
, disp12
);
3173 val
= tcg_temp_new();
3174 tcg_gen_movi_i64(val
, 0);
3176 switch ((insn
>> 12) & 0xF) {
3178 /* Longword physical access */
3179 gen_helper_stl_phys(addr
, val
);
3182 /* Quadword physical access */
3183 gen_helper_stq_phys(addr
, val
);
3186 /* Longword physical access with lock */
3187 gen_helper_stl_c_phys(val
, cpu_env
, addr
, val
);
3190 /* Quadword physical access with lock */
3191 gen_helper_stq_c_phys(val
, cpu_env
, addr
, val
);
3194 /* Longword virtual access */
3197 /* Quadword virtual access */
3218 /* Longword virtual access with alternate access mode */
3221 /* Quadword virtual access with alternate access mode */
3232 tcg_temp_free(addr
);
3239 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3243 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3247 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3251 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3255 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3259 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3263 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3267 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3271 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3275 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3279 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3283 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3287 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3291 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3295 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3299 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3303 ret
= gen_bdirect(ctx
, ra
, disp21
);
3305 case 0x31: /* FBEQ */
3306 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3308 case 0x32: /* FBLT */
3309 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3311 case 0x33: /* FBLE */
3312 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3316 ret
= gen_bdirect(ctx
, ra
, disp21
);
3318 case 0x35: /* FBNE */
3319 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3321 case 0x36: /* FBGE */
3322 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3324 case 0x37: /* FBGT */
3325 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3329 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3333 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3337 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3341 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3345 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3349 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3353 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3357 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3360 ret
= gen_invalid(ctx
);
3367 static inline void gen_intermediate_code_internal(CPUAlphaState
*env
,
3368 TranslationBlock
*tb
,
3371 DisasContext ctx
, *ctxp
= &ctx
;
3372 target_ulong pc_start
;
3374 uint16_t *gen_opc_end
;
3382 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3387 ctx
.mem_idx
= cpu_mmu_index(env
);
3389 /* ??? Every TB begins with unset rounding mode, to be initialized on
3390 the first fp insn of the TB. Alternately we could define a proper
3391 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3392 to reset the FP_STATUS to that default at the end of any TB that
3393 changes the default. We could even (gasp) dynamiclly figure out
3394 what default would be most efficient given the running program. */
3396 /* Similarly for flush-to-zero. */
3400 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3402 max_insns
= CF_COUNT_MASK
;
3406 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3407 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3408 if (bp
->pc
== ctx
.pc
) {
3409 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3415 j
= gen_opc_ptr
- gen_opc_buf
;
3419 gen_opc_instr_start
[lj
++] = 0;
3421 gen_opc_pc
[lj
] = ctx
.pc
;
3422 gen_opc_instr_start
[lj
] = 1;
3423 gen_opc_icount
[lj
] = num_insns
;
3425 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3427 insn
= cpu_ldl_code(env
, ctx
.pc
);
3430 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3431 tcg_gen_debug_insn_start(ctx
.pc
);
3435 ret
= translate_one(ctxp
, insn
);
3437 /* If we reach a page boundary, are single stepping,
3438 or exhaust instruction count, stop generation. */
3440 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3441 || gen_opc_ptr
>= gen_opc_end
3442 || num_insns
>= max_insns
3444 || env
->singlestep_enabled
)) {
3445 ret
= EXIT_PC_STALE
;
3447 } while (ret
== NO_EXIT
);
3449 if (tb
->cflags
& CF_LAST_IO
) {
3458 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3460 case EXIT_PC_UPDATED
:
3461 if (env
->singlestep_enabled
) {
3462 gen_excp_1(EXCP_DEBUG
, 0);
3471 gen_icount_end(tb
, num_insns
);
3472 *gen_opc_ptr
= INDEX_op_end
;
3474 j
= gen_opc_ptr
- gen_opc_buf
;
3477 gen_opc_instr_start
[lj
++] = 0;
3479 tb
->size
= ctx
.pc
- pc_start
;
3480 tb
->icount
= num_insns
;
3484 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3485 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3486 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3492 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3494 gen_intermediate_code_internal(env
, tb
, 0);
3497 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3499 gen_intermediate_code_internal(env
, tb
, 1);
3507 static const struct cpu_def_t cpu_defs
[] = {
3508 { "ev4", IMPLVER_2106x
, 0 },
3509 { "ev5", IMPLVER_21164
, 0 },
3510 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3511 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3512 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3513 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3514 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3515 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3516 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3517 { "21064", IMPLVER_2106x
, 0 },
3518 { "21164", IMPLVER_21164
, 0 },
3519 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3520 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3521 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3522 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3523 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3526 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3530 int implver
, amask
, i
, max
;
3532 cpu
= ALPHA_CPU(object_new(TYPE_ALPHA_CPU
));
3535 alpha_translate_init();
3537 /* Default to ev67; no reason not to emulate insns by default. */
3538 implver
= IMPLVER_21264
;
3539 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3540 | AMASK_TRAP
| AMASK_PREFETCH
);
3542 max
= ARRAY_SIZE(cpu_defs
);
3543 for (i
= 0; i
< max
; i
++) {
3544 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3545 implver
= cpu_defs
[i
].implver
;
3546 amask
= cpu_defs
[i
].amask
;
3550 env
->implver
= implver
;
3553 qemu_init_vcpu(env
);
3557 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3559 env
->pc
= gen_opc_pc
[pc_pos
];