2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
45 /* Current rounding mode for this TB. */
47 /* Current flush-to-zero setting for this TB. */
51 /* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
57 /* We have emitted one or more goto_tb. No fixup required. */
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
74 /* global register indexes */
75 static TCGv_ptr cpu_env
;
76 static TCGv cpu_ir
[31];
77 static TCGv cpu_fir
[31];
79 static TCGv cpu_lock_addr
;
80 static TCGv cpu_lock_st_addr
;
81 static TCGv cpu_lock_value
;
82 static TCGv cpu_unique
;
83 #ifndef CONFIG_USER_ONLY
84 static TCGv cpu_sysval
;
89 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
91 #include "gen-icount.h"
93 static void alpha_translate_init(void)
97 static int done_init
= 0;
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
105 for (i
= 0; i
< 31; i
++) {
106 sprintf(p
, "ir%d", i
);
107 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUAlphaState
, ir
[i
]), p
);
109 p
+= (i
< 10) ? 4 : 5;
111 sprintf(p
, "fir%d", i
);
112 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
113 offsetof(CPUAlphaState
, fir
[i
]), p
);
114 p
+= (i
< 10) ? 5 : 6;
117 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
118 offsetof(CPUAlphaState
, pc
), "pc");
120 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
121 offsetof(CPUAlphaState
, lock_addr
),
123 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
124 offsetof(CPUAlphaState
, lock_st_addr
),
126 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUAlphaState
, lock_value
),
130 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, unique
), "unique");
132 #ifndef CONFIG_USER_ONLY
133 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
134 offsetof(CPUAlphaState
, sysval
), "sysval");
135 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUAlphaState
, usp
), "usp");
139 /* register helpers */
146 static void gen_excp_1(int exception
, int error_code
)
150 tmp1
= tcg_const_i32(exception
);
151 tmp2
= tcg_const_i32(error_code
);
152 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
153 tcg_temp_free_i32(tmp2
);
154 tcg_temp_free_i32(tmp1
);
157 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
159 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
160 gen_excp_1(exception
, error_code
);
161 return EXIT_NORETURN
;
164 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
166 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
169 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
171 TCGv tmp
= tcg_temp_new();
172 TCGv_i32 tmp32
= tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
174 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
175 gen_helper_memory_to_f(t0
, tmp32
);
176 tcg_temp_free_i32(tmp32
);
180 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
182 TCGv tmp
= tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
184 gen_helper_memory_to_g(t0
, tmp
);
188 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
190 TCGv tmp
= tcg_temp_new();
191 TCGv_i32 tmp32
= tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
193 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
194 gen_helper_memory_to_s(t0
, tmp32
);
195 tcg_temp_free_i32(tmp32
);
199 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
201 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
202 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
203 tcg_gen_mov_i64(cpu_lock_value
, t0
);
206 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
208 tcg_gen_qemu_ld64(t0
, t1
, flags
);
209 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
210 tcg_gen_mov_i64(cpu_lock_value
, t0
);
213 static inline void gen_load_mem(DisasContext
*ctx
,
214 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
216 int ra
, int rb
, int32_t disp16
, int fp
,
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra
== 31)) {
228 addr
= tcg_temp_new();
230 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
232 tcg_gen_andi_i64(addr
, addr
, ~0x7);
238 tcg_gen_movi_i64(addr
, disp16
);
241 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
242 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
247 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
249 TCGv_i32 tmp32
= tcg_temp_new_i32();
250 TCGv tmp
= tcg_temp_new();
251 gen_helper_f_to_memory(tmp32
, t0
);
252 tcg_gen_extu_i32_i64(tmp
, tmp32
);
253 tcg_gen_qemu_st32(tmp
, t1
, flags
);
255 tcg_temp_free_i32(tmp32
);
258 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
260 TCGv tmp
= tcg_temp_new();
261 gen_helper_g_to_memory(tmp
, t0
);
262 tcg_gen_qemu_st64(tmp
, t1
, flags
);
266 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
268 TCGv_i32 tmp32
= tcg_temp_new_i32();
269 TCGv tmp
= tcg_temp_new();
270 gen_helper_s_to_memory(tmp32
, t0
);
271 tcg_gen_extu_i32_i64(tmp
, tmp32
);
272 tcg_gen_qemu_st32(tmp
, t1
, flags
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_store_mem(DisasContext
*ctx
,
278 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
280 int ra
, int rb
, int32_t disp16
, int fp
,
285 addr
= tcg_temp_new();
287 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
289 tcg_gen_andi_i64(addr
, addr
, ~0x7);
295 tcg_gen_movi_i64(addr
, disp16
);
299 va
= tcg_const_i64(0);
301 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
303 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
311 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
312 int32_t disp16
, int quad
)
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
322 #if defined(CONFIG_USER_ONLY)
323 addr
= cpu_lock_st_addr
;
325 addr
= tcg_temp_local_new();
329 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
331 tcg_gen_movi_i64(addr
, disp16
);
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
343 int lab_fail
, lab_done
;
346 lab_fail
= gen_new_label();
347 lab_done
= gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
350 val
= tcg_temp_new();
352 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
354 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
356 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
359 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
361 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
363 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
364 tcg_gen_br(lab_done
);
366 gen_set_label(lab_fail
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
369 gen_set_label(lab_done
);
370 tcg_gen_movi_i64(cpu_lock_addr
, -1);
378 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
383 && !ctx
->env
->singlestep_enabled
384 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
387 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
389 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
392 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
395 /* Notice branch-to-next; used to initialize RA with the PC. */
398 } else if (use_goto_tb(ctx
, dest
)) {
400 tcg_gen_movi_i64(cpu_pc
, dest
);
401 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
404 tcg_gen_movi_i64(cpu_pc
, dest
);
405 return EXIT_PC_UPDATED
;
409 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
410 TCGv cmp
, int32_t disp
)
412 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
413 int lab_true
= gen_new_label();
415 if (use_goto_tb(ctx
, dest
)) {
416 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
419 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
420 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
422 gen_set_label(lab_true
);
424 tcg_gen_movi_i64(cpu_pc
, dest
);
425 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
429 int lab_over
= gen_new_label();
431 /* ??? Consider using either
434 movcond pc, cond, 0, tmp, pc
441 The current diamond subgraph surely isn't efficient. */
443 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
444 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
445 tcg_gen_br(lab_over
);
446 gen_set_label(lab_true
);
447 tcg_gen_movi_i64(cpu_pc
, dest
);
448 gen_set_label(lab_over
);
450 return EXIT_PC_UPDATED
;
454 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
455 int32_t disp
, int mask
)
459 if (unlikely(ra
== 31)) {
460 cmp_tmp
= tcg_const_i64(0);
462 cmp_tmp
= tcg_temp_new();
464 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
466 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
470 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
473 /* Fold -0.0 for comparison with COND. */
475 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
477 uint64_t mzero
= 1ull << 63;
482 /* For <= or >, the -0.0 value directly compares the way we want. */
483 tcg_gen_mov_i64(dest
, src
);
488 /* For == or !=, we can simply mask off the sign bit and compare. */
489 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
496 tcg_gen_neg_i64(dest
, dest
);
497 tcg_gen_and_i64(dest
, dest
, src
);
505 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
510 if (unlikely(ra
== 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
513 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
516 cmp_tmp
= tcg_temp_new();
517 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
518 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
521 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
522 int islit
, uint8_t lit
, int mask
)
524 TCGCond inv_cond
= tcg_invert_cond(cond
);
527 if (unlikely(rc
== 31))
530 l1
= gen_new_label();
534 TCGv tmp
= tcg_temp_new();
535 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
536 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
539 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp
= tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
548 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
550 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
554 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
559 if (unlikely(rc
== 31)) {
563 cmp_tmp
= tcg_temp_new();
564 if (unlikely(ra
== 31)) {
565 tcg_gen_movi_i64(cmp_tmp
, 0);
567 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
570 l1
= gen_new_label();
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
572 tcg_temp_free(cmp_tmp
);
575 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
577 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
581 #define QUAL_RM_N 0x080 /* Round mode nearest even */
582 #define QUAL_RM_C 0x000 /* Round mode chopped */
583 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
584 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585 #define QUAL_RM_MASK 0x0c0
587 #define QUAL_U 0x100 /* Underflow enable (fp output) */
588 #define QUAL_V 0x100 /* Overflow enable (int output) */
589 #define QUAL_S 0x400 /* Software completion enable */
590 #define QUAL_I 0x200 /* Inexact detection enable */
592 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
596 fn11
&= QUAL_RM_MASK
;
597 if (fn11
== ctx
->tb_rm
) {
602 tmp
= tcg_temp_new_i32();
605 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
608 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
611 tcg_gen_movi_i32(tmp
, float_round_down
);
614 tcg_gen_ld8u_i32(tmp
, cpu_env
,
615 offsetof(CPUAlphaState
, fpcr_dyn_round
));
619 #if defined(CONFIG_SOFTFLOAT_INLINE)
620 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
621 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
622 sets the one field. */
623 tcg_gen_st8_i32(tmp
, cpu_env
,
624 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
626 gen_helper_setroundmode(tmp
);
629 tcg_temp_free_i32(tmp
);
632 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
637 if (fn11
== ctx
->tb_ftz
) {
642 tmp
= tcg_temp_new_i32();
644 /* Underflow is enabled, use the FPCR setting. */
645 tcg_gen_ld8u_i32(tmp
, cpu_env
,
646 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
648 /* Underflow is disabled, force flush-to-zero. */
649 tcg_gen_movi_i32(tmp
, 1);
652 #if defined(CONFIG_SOFTFLOAT_INLINE)
653 tcg_gen_st8_i32(tmp
, cpu_env
,
654 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
656 gen_helper_setflushzero(tmp
);
659 tcg_temp_free_i32(tmp
);
662 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
664 TCGv val
= tcg_temp_new();
666 tcg_gen_movi_i64(val
, 0);
667 } else if (fn11
& QUAL_S
) {
668 gen_helper_ieee_input_s(val
, cpu_env
, cpu_fir
[reg
]);
670 gen_helper_ieee_input_cmp(val
, cpu_env
, cpu_fir
[reg
]);
672 gen_helper_ieee_input(val
, cpu_env
, cpu_fir
[reg
]);
677 static void gen_fp_exc_clear(void)
679 #if defined(CONFIG_SOFTFLOAT_INLINE)
680 TCGv_i32 zero
= tcg_const_i32(0);
681 tcg_gen_st8_i32(zero
, cpu_env
,
682 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
683 tcg_temp_free_i32(zero
);
685 gen_helper_fp_exc_clear(cpu_env
);
689 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
691 /* ??? We ought to be able to do something with imprecise exceptions.
692 E.g. notice we're still in the trap shadow of something within the
693 TB and do not generate the code to signal the exception; end the TB
694 when an exception is forced to arrive, either by consumption of a
695 register value or TRAPB or EXCB. */
696 TCGv_i32 exc
= tcg_temp_new_i32();
699 #if defined(CONFIG_SOFTFLOAT_INLINE)
700 tcg_gen_ld8u_i32(exc
, cpu_env
,
701 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
703 gen_helper_fp_exc_get(exc
, cpu_env
);
707 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
710 /* ??? Pass in the regno of the destination so that the helper can
711 set EXC_MASK, which contains a bitmask of destination registers
712 that have caused arithmetic traps. A simple userspace emulation
713 does not require this. We do need it for a guest kernel's entArith,
714 or if we were to do something clever with imprecise exceptions. */
715 reg
= tcg_const_i32(rc
+ 32);
718 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
720 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
723 tcg_temp_free_i32(reg
);
724 tcg_temp_free_i32(exc
);
727 static inline void gen_fp_exc_raise(int rc
, int fn11
)
729 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
732 static void gen_fcvtlq(int rb
, int rc
)
734 if (unlikely(rc
== 31)) {
737 if (unlikely(rb
== 31)) {
738 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
740 TCGv tmp
= tcg_temp_new();
742 /* The arithmetic right shift here, plus the sign-extended mask below
743 yields a sign-extended result without an explicit ext32s_i64. */
744 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
745 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
746 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
747 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
748 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
754 static void gen_fcvtql(int rb
, int rc
)
756 if (unlikely(rc
== 31)) {
759 if (unlikely(rb
== 31)) {
760 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
762 TCGv tmp
= tcg_temp_new();
764 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
765 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
766 tcg_gen_shli_i64(tmp
, tmp
, 32);
767 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
768 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
774 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
777 int lab
= gen_new_label();
778 TCGv tmp
= tcg_temp_new();
780 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
781 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
782 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
789 #define FARITH2(name) \
790 static inline void glue(gen_f, name)(int rb, int rc) \
792 if (unlikely(rc == 31)) { \
796 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
798 TCGv tmp = tcg_const_i64(0); \
799 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
800 tcg_temp_free(tmp); \
804 /* ??? VAX instruction qualifiers ignored. */
812 static void gen_ieee_arith2(DisasContext
*ctx
,
813 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
814 int rb
, int rc
, int fn11
)
818 /* ??? This is wrong: the instruction is not a nop, it still may
820 if (unlikely(rc
== 31)) {
824 gen_qual_roundmode(ctx
, fn11
);
825 gen_qual_flushzero(ctx
, fn11
);
828 vb
= gen_ieee_input(rb
, fn11
, 0);
829 helper(cpu_fir
[rc
], cpu_env
, vb
);
832 gen_fp_exc_raise(rc
, fn11
);
835 #define IEEE_ARITH2(name) \
836 static inline void glue(gen_f, name)(DisasContext *ctx, \
837 int rb, int rc, int fn11) \
839 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
846 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
851 /* ??? This is wrong: the instruction is not a nop, it still may
853 if (unlikely(rc
== 31)) {
857 /* No need to set flushzero, since we have an integer output. */
859 vb
= gen_ieee_input(rb
, fn11
, 0);
861 /* Almost all integer conversions use cropped rounding, and most
862 also do not have integer overflow enabled. Special case that. */
865 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
867 case QUAL_V
| QUAL_RM_C
:
868 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
869 ignore
= float_flag_inexact
;
871 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
872 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
875 gen_qual_roundmode(ctx
, fn11
);
876 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
877 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
878 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
883 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
886 static void gen_ieee_intcvt(DisasContext
*ctx
,
887 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
888 int rb
, int rc
, int fn11
)
892 /* ??? This is wrong: the instruction is not a nop, it still may
894 if (unlikely(rc
== 31)) {
898 gen_qual_roundmode(ctx
, fn11
);
901 vb
= tcg_const_i64(0);
906 /* The only exception that can be raised by integer conversion
907 is inexact. Thus we only need to worry about exceptions when
908 inexact handling is requested. */
911 helper(cpu_fir
[rc
], cpu_env
, vb
);
912 gen_fp_exc_raise(rc
, fn11
);
914 helper(cpu_fir
[rc
], cpu_env
, vb
);
922 #define IEEE_INTCVT(name) \
923 static inline void glue(gen_f, name)(DisasContext *ctx, \
924 int rb, int rc, int fn11) \
926 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
931 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
936 if (unlikely(rc
== 31)) {
940 vmask
= tcg_const_i64(mask
);
950 va
= tcg_temp_new_i64();
951 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
953 tcg_gen_andc_i64(va
, vmask
, va
);
955 tcg_gen_and_i64(va
, va
, vmask
);
963 vb
= tcg_temp_new_i64();
964 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
967 switch (za
<< 1 | zb
) {
969 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
972 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
975 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
978 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
982 tcg_temp_free(vmask
);
991 static inline void gen_fcpys(int ra
, int rb
, int rc
)
993 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
996 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
998 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1001 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1003 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1006 #define FARITH3(name) \
1007 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1011 if (unlikely(rc == 31)) { \
1015 va = tcg_const_i64(0); \
1020 vb = tcg_const_i64(0); \
1025 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1028 tcg_temp_free(va); \
1031 tcg_temp_free(vb); \
1035 /* ??? VAX instruction qualifiers ignored. */
1048 static void gen_ieee_arith3(DisasContext
*ctx
,
1049 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1050 int ra
, int rb
, int rc
, int fn11
)
1054 /* ??? This is wrong: the instruction is not a nop, it still may
1055 raise exceptions. */
1056 if (unlikely(rc
== 31)) {
1060 gen_qual_roundmode(ctx
, fn11
);
1061 gen_qual_flushzero(ctx
, fn11
);
1064 va
= gen_ieee_input(ra
, fn11
, 0);
1065 vb
= gen_ieee_input(rb
, fn11
, 0);
1066 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1070 gen_fp_exc_raise(rc
, fn11
);
1073 #define IEEE_ARITH3(name) \
1074 static inline void glue(gen_f, name)(DisasContext *ctx, \
1075 int ra, int rb, int rc, int fn11) \
1077 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1088 static void gen_ieee_compare(DisasContext
*ctx
,
1089 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1090 int ra
, int rb
, int rc
, int fn11
)
1094 /* ??? This is wrong: the instruction is not a nop, it still may
1095 raise exceptions. */
1096 if (unlikely(rc
== 31)) {
1102 va
= gen_ieee_input(ra
, fn11
, 1);
1103 vb
= gen_ieee_input(rb
, fn11
, 1);
1104 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1108 gen_fp_exc_raise(rc
, fn11
);
1111 #define IEEE_CMP3(name) \
1112 static inline void glue(gen_f, name)(DisasContext *ctx, \
1113 int ra, int rb, int rc, int fn11) \
1115 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1122 static inline uint64_t zapnot_mask(uint8_t lit
)
1127 for (i
= 0; i
< 8; ++i
) {
1129 mask
|= 0xffull
<< (i
* 8);
1134 /* Implement zapnot with an immediate operand, which expands to some
1135 form of immediate AND. This is a basic building block in the
1136 definition of many of the other byte manipulation instructions. */
1137 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1141 tcg_gen_movi_i64(dest
, 0);
1144 tcg_gen_ext8u_i64(dest
, src
);
1147 tcg_gen_ext16u_i64(dest
, src
);
1150 tcg_gen_ext32u_i64(dest
, src
);
1153 tcg_gen_mov_i64(dest
, src
);
1156 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1161 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1163 if (unlikely(rc
== 31))
1165 else if (unlikely(ra
== 31))
1166 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1168 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1170 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1173 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1175 if (unlikely(rc
== 31))
1177 else if (unlikely(ra
== 31))
1178 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1180 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1182 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1186 /* EXTWH, EXTLH, EXTQH */
1187 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1188 uint8_t lit
, uint8_t byte_mask
)
1190 if (unlikely(rc
== 31))
1192 else if (unlikely(ra
== 31))
1193 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1196 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1197 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1199 TCGv tmp1
= tcg_temp_new();
1200 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1201 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1202 tcg_gen_neg_i64(tmp1
, tmp1
);
1203 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1204 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1205 tcg_temp_free(tmp1
);
1207 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1211 /* EXTBL, EXTWL, EXTLL, EXTQL */
1212 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1213 uint8_t lit
, uint8_t byte_mask
)
1215 if (unlikely(rc
== 31))
1217 else if (unlikely(ra
== 31))
1218 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1221 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1223 TCGv tmp
= tcg_temp_new();
1224 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1225 tcg_gen_shli_i64(tmp
, tmp
, 3);
1226 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1229 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1233 /* INSWH, INSLH, INSQH */
1234 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1235 uint8_t lit
, uint8_t byte_mask
)
1237 if (unlikely(rc
== 31))
1239 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1240 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1242 TCGv tmp
= tcg_temp_new();
1244 /* The instruction description has us left-shift the byte mask
1245 and extract bits <15:8> and apply that zap at the end. This
1246 is equivalent to simply performing the zap first and shifting
1248 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1251 /* Note that we have handled the lit==0 case above. */
1252 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1254 TCGv shift
= tcg_temp_new();
1256 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1257 Do this portably by splitting the shift into two parts:
1258 shift_count-1 and 1. Arrange for the -1 by using
1259 ones-complement instead of twos-complement in the negation:
1260 ~((B & 7) * 8) & 63. */
1262 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1263 tcg_gen_shli_i64(shift
, shift
, 3);
1264 tcg_gen_not_i64(shift
, shift
);
1265 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1267 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1268 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1269 tcg_temp_free(shift
);
1275 /* INSBL, INSWL, INSLL, INSQL */
1276 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1277 uint8_t lit
, uint8_t byte_mask
)
1279 if (unlikely(rc
== 31))
1281 else if (unlikely(ra
== 31))
1282 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1284 TCGv tmp
= tcg_temp_new();
1286 /* The instruction description has us left-shift the byte mask
1287 the same number of byte slots as the data and apply the zap
1288 at the end. This is equivalent to simply performing the zap
1289 first and shifting afterward. */
1290 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1293 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1295 TCGv shift
= tcg_temp_new();
1296 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1297 tcg_gen_shli_i64(shift
, shift
, 3);
1298 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1299 tcg_temp_free(shift
);
1305 /* MSKWH, MSKLH, MSKQH */
1306 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1307 uint8_t lit
, uint8_t byte_mask
)
1309 if (unlikely(rc
== 31))
1311 else if (unlikely(ra
== 31))
1312 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1314 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1316 TCGv shift
= tcg_temp_new();
1317 TCGv mask
= tcg_temp_new();
1319 /* The instruction description is as above, where the byte_mask
1320 is shifted left, and then we extract bits <15:8>. This can be
1321 emulated with a right-shift on the expanded byte mask. This
1322 requires extra care because for an input <2:0> == 0 we need a
1323 shift of 64 bits in order to generate a zero. This is done by
1324 splitting the shift into two parts, the variable shift - 1
1325 followed by a constant 1 shift. The code we expand below is
1326 equivalent to ~((B & 7) * 8) & 63. */
1328 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1329 tcg_gen_shli_i64(shift
, shift
, 3);
1330 tcg_gen_not_i64(shift
, shift
);
1331 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1332 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1333 tcg_gen_shr_i64(mask
, mask
, shift
);
1334 tcg_gen_shri_i64(mask
, mask
, 1);
1336 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1338 tcg_temp_free(mask
);
1339 tcg_temp_free(shift
);
1343 /* MSKBL, MSKWL, MSKLL, MSKQL */
1344 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1345 uint8_t lit
, uint8_t byte_mask
)
1347 if (unlikely(rc
== 31))
1349 else if (unlikely(ra
== 31))
1350 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1352 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1354 TCGv shift
= tcg_temp_new();
1355 TCGv mask
= tcg_temp_new();
1357 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1358 tcg_gen_shli_i64(shift
, shift
, 3);
1359 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1360 tcg_gen_shl_i64(mask
, mask
, shift
);
1362 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1364 tcg_temp_free(mask
);
1365 tcg_temp_free(shift
);
1369 /* Code to call arith3 helpers */
1370 #define ARITH3(name) \
1371 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1374 if (unlikely(rc == 31)) \
1379 TCGv tmp = tcg_const_i64(lit); \
1380 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1381 tcg_temp_free(tmp); \
1383 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1385 TCGv tmp1 = tcg_const_i64(0); \
1387 TCGv tmp2 = tcg_const_i64(lit); \
1388 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1389 tcg_temp_free(tmp2); \
1391 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1392 tcg_temp_free(tmp1); \
1407 /* Code to call arith3 helpers */
1408 #define ARITH3_EX(name) \
1409 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1410 int islit, uint8_t lit) \
1412 if (unlikely(rc == 31)) { \
1417 TCGv tmp = tcg_const_i64(lit); \
1418 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1420 tcg_temp_free(tmp); \
1422 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1423 cpu_ir[ra], cpu_ir[rb]); \
1426 TCGv tmp1 = tcg_const_i64(0); \
1428 TCGv tmp2 = tcg_const_i64(lit); \
1429 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1430 tcg_temp_free(tmp2); \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1434 tcg_temp_free(tmp1); \
1444 #define MVIOP2(name) \
1445 static inline void glue(gen_, name)(int rb, int rc) \
1447 if (unlikely(rc == 31)) \
1449 if (unlikely(rb == 31)) \
1450 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1452 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1459 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1460 int islit
, uint8_t lit
)
1464 if (unlikely(rc
== 31)) {
1469 va
= tcg_const_i64(0);
1474 vb
= tcg_const_i64(lit
);
1479 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1489 static void gen_rx(int ra
, int set
)
1494 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1497 tmp
= tcg_const_i32(set
);
1498 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1499 tcg_temp_free_i32(tmp
);
1502 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1504 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1505 to internal cpu registers. */
1507 /* Unprivileged PAL call */
1508 if (palcode
>= 0x80 && palcode
< 0xC0) {
1512 /* No-op inside QEMU. */
1516 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1520 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1523 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1528 #ifndef CONFIG_USER_ONLY
1529 /* Privileged PAL code */
1530 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1534 /* No-op inside QEMU. */
1538 /* No-op inside QEMU. */
1542 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1546 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1550 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1557 /* Note that we already know we're in kernel mode, so we know
1558 that PS only contains the 3 IPL bits. */
1559 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1561 /* But make sure and store only the 3 IPL bits from the user. */
1562 tmp
= tcg_temp_new();
1563 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1564 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1571 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1575 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1579 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1583 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1584 offsetof(CPUAlphaState
, cpu_index
));
1588 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1594 return gen_invalid(ctx
);
1597 #ifndef CONFIG_USER_ONLY
1599 #define PR_BYTE 0x100000
1600 #define PR_LONG 0x200000
1602 static int cpu_pr_data(int pr
)
1605 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1606 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1607 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1608 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1609 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1610 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1611 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1612 case 7: return offsetof(CPUAlphaState
, palbr
);
1613 case 8: return offsetof(CPUAlphaState
, ptbr
);
1614 case 9: return offsetof(CPUAlphaState
, vptptr
);
1615 case 10: return offsetof(CPUAlphaState
, unique
);
1616 case 11: return offsetof(CPUAlphaState
, sysval
);
1617 case 12: return offsetof(CPUAlphaState
, usp
);
1620 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1622 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1625 return offsetof(CPUAlphaState
, alarm_expire
);
1630 static ExitStatus
gen_mfpr(int ra
, int regno
)
1632 int data
= cpu_pr_data(regno
);
1634 /* In our emulated PALcode, these processor registers have no
1635 side effects from reading. */
1644 gen_helper_get_time(cpu_ir
[ra
]);
1646 return EXIT_PC_STALE
;
1648 gen_helper_get_time(cpu_ir
[ra
]);
1653 /* The basic registers are data only, and unknown registers
1654 are read-zero, write-ignore. */
1656 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1657 } else if (data
& PR_BYTE
) {
1658 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1659 } else if (data
& PR_LONG
) {
1660 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1662 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1667 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1673 tmp
= tcg_const_i64(0);
1681 gen_helper_tbia(cpu_env
);
1686 gen_helper_tbis(cpu_env
, tmp
);
1691 tmp
= tcg_const_i64(1);
1692 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, halted
));
1693 return gen_excp(ctx
, EXCP_HLT
, 0);
1697 gen_helper_halt(tmp
);
1698 return EXIT_PC_STALE
;
1702 gen_helper_set_alarm(cpu_env
, tmp
);
1706 /* The basic registers are data only, and unknown registers
1707 are read-zero, write-ignore. */
1708 data
= cpu_pr_data(regno
);
1710 if (data
& PR_BYTE
) {
1711 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1712 } else if (data
& PR_LONG
) {
1713 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1715 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1727 #endif /* !USER_ONLY*/
1729 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1732 int32_t disp21
, disp16
;
1733 #ifndef CONFIG_USER_ONLY
1737 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1741 /* Decode all instruction fields */
1743 ra
= (insn
>> 21) & 0x1F;
1744 rb
= (insn
>> 16) & 0x1F;
1746 real_islit
= islit
= (insn
>> 12) & 1;
1747 if (rb
== 31 && !islit
) {
1751 lit
= (insn
>> 13) & 0xFF;
1752 palcode
= insn
& 0x03FFFFFF;
1753 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1754 disp16
= (int16_t)(insn
& 0x0000FFFF);
1755 #ifndef CONFIG_USER_ONLY
1756 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1758 fn11
= (insn
>> 5) & 0x000007FF;
1760 fn7
= (insn
>> 5) & 0x0000007F;
1761 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1762 opc
, ra
, rb
, rc
, disp16
);
1768 ret
= gen_call_pal(ctx
, palcode
);
1793 if (likely(ra
!= 31)) {
1795 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1797 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1802 if (likely(ra
!= 31)) {
1804 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1806 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1811 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1812 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1818 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1822 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1823 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1829 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1833 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1837 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1843 if (likely(rc
!= 31)) {
1846 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1847 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1849 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1850 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1854 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1856 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1862 if (likely(rc
!= 31)) {
1864 TCGv tmp
= tcg_temp_new();
1865 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1867 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1869 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1870 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1874 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1876 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1882 if (likely(rc
!= 31)) {
1885 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1887 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1888 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1891 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1893 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1894 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1900 if (likely(rc
!= 31)) {
1902 TCGv tmp
= tcg_temp_new();
1903 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1905 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1907 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1908 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1912 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1914 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1915 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1922 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1926 if (likely(rc
!= 31)) {
1928 TCGv tmp
= tcg_temp_new();
1929 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1931 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1933 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1934 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1938 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1940 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1946 if (likely(rc
!= 31)) {
1948 TCGv tmp
= tcg_temp_new();
1949 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1951 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1953 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1954 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1958 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1960 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1961 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1968 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1972 if (likely(rc
!= 31)) {
1975 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1977 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1980 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1982 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1988 if (likely(rc
!= 31)) {
1990 TCGv tmp
= tcg_temp_new();
1991 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1993 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1995 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1999 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2001 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2007 if (likely(rc
!= 31)) {
2010 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2012 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2015 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2017 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2023 if (likely(rc
!= 31)) {
2025 TCGv tmp
= tcg_temp_new();
2026 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2028 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2030 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2034 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2036 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2042 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2046 if (likely(rc
!= 31)) {
2048 TCGv tmp
= tcg_temp_new();
2049 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2051 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2053 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2057 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2059 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2065 if (likely(rc
!= 31)) {
2067 TCGv tmp
= tcg_temp_new();
2068 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2070 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2072 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2076 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2078 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2084 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2088 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2092 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2096 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2100 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2104 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2108 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2118 if (likely(rc
!= 31)) {
2120 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2122 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2124 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2129 if (likely(rc
!= 31)) {
2132 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2134 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2136 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2141 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2145 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2149 if (likely(rc
!= 31)) {
2152 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2154 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2157 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2159 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2165 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2169 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2173 if (likely(rc
!= 31)) {
2176 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2178 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2181 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2183 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2189 if (likely(rc
!= 31)) {
2192 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2194 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2197 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2199 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2205 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2209 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2213 if (likely(rc
!= 31)) {
2216 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2218 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2221 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2223 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2229 if (likely(rc
!= 31)) {
2230 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2233 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2235 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2241 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2245 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2250 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2260 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2264 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2268 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2272 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2276 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2280 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2284 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2288 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2292 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2296 gen_zap(ra
, rb
, rc
, islit
, lit
);
2300 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2304 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2308 if (likely(rc
!= 31)) {
2311 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2313 TCGv shift
= tcg_temp_new();
2314 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2315 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2316 tcg_temp_free(shift
);
2319 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2324 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2328 if (likely(rc
!= 31)) {
2331 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2333 TCGv shift
= tcg_temp_new();
2334 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2335 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2336 tcg_temp_free(shift
);
2339 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2344 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2348 if (likely(rc
!= 31)) {
2351 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2353 TCGv shift
= tcg_temp_new();
2354 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2355 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2356 tcg_temp_free(shift
);
2359 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2364 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2368 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2372 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2376 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2380 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2384 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2388 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2392 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2396 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2406 if (likely(rc
!= 31)) {
2408 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2411 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2413 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2414 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2420 if (likely(rc
!= 31)) {
2422 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2424 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2426 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2431 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2435 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2439 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2446 switch (fpfn
) { /* fn11 & 0x3F */
2449 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2452 if (likely(rc
!= 31)) {
2454 TCGv_i32 tmp
= tcg_temp_new_i32();
2455 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2456 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2457 tcg_temp_free_i32(tmp
);
2459 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2464 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2471 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2472 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2478 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2481 if (likely(rc
!= 31)) {
2483 TCGv_i32 tmp
= tcg_temp_new_i32();
2484 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2485 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2486 tcg_temp_free_i32(tmp
);
2488 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2493 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2496 if (likely(rc
!= 31)) {
2498 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2500 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2505 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2512 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2513 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2522 /* VAX floating point */
2523 /* XXX: rounding mode and trap are ignored (!) */
2524 switch (fpfn
) { /* fn11 & 0x3F */
2527 gen_faddf(ra
, rb
, rc
);
2531 gen_fsubf(ra
, rb
, rc
);
2535 gen_fmulf(ra
, rb
, rc
);
2539 gen_fdivf(ra
, rb
, rc
);
2551 gen_faddg(ra
, rb
, rc
);
2555 gen_fsubg(ra
, rb
, rc
);
2559 gen_fmulg(ra
, rb
, rc
);
2563 gen_fdivg(ra
, rb
, rc
);
2567 gen_fcmpgeq(ra
, rb
, rc
);
2571 gen_fcmpglt(ra
, rb
, rc
);
2575 gen_fcmpgle(ra
, rb
, rc
);
2606 /* IEEE floating-point */
2607 switch (fpfn
) { /* fn11 & 0x3F */
2610 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2614 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2618 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2622 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2626 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2630 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2634 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2638 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2642 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2646 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2650 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2654 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2657 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2659 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2662 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2667 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2671 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2675 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2688 if (likely(rc
!= 31)) {
2692 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2694 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2697 gen_fcpys(ra
, rb
, rc
);
2703 gen_fcpysn(ra
, rb
, rc
);
2707 gen_fcpyse(ra
, rb
, rc
);
2711 if (likely(ra
!= 31))
2712 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2714 TCGv tmp
= tcg_const_i64(0);
2715 gen_helper_store_fpcr(cpu_env
, tmp
);
2721 if (likely(ra
!= 31))
2722 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2726 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2730 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2734 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2738 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2742 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2746 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2756 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2757 /v doesn't do. The only thing I can think is that /sv is a
2758 valid instruction merely for completeness in the ISA. */
2759 gen_fcvtql_v(ctx
, rb
, rc
);
2766 switch ((uint16_t)disp16
) {
2796 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2798 ret
= EXIT_PC_STALE
;
2800 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2824 /* HW_MFPR (PALcode) */
2825 #ifndef CONFIG_USER_ONLY
2826 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2827 return gen_mfpr(ra
, insn
& 0xffff);
2832 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2833 prediction stack action, which of course we don't implement. */
2835 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2837 tcg_gen_movi_i64(cpu_pc
, 0);
2840 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2842 ret
= EXIT_PC_UPDATED
;
2845 /* HW_LD (PALcode) */
2846 #ifndef CONFIG_USER_ONLY
2847 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2854 addr
= tcg_temp_new();
2856 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2858 tcg_gen_movi_i64(addr
, disp12
);
2859 switch ((insn
>> 12) & 0xF) {
2861 /* Longword physical access (hw_ldl/p) */
2862 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2865 /* Quadword physical access (hw_ldq/p) */
2866 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2869 /* Longword physical access with lock (hw_ldl_l/p) */
2870 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2873 /* Quadword physical access with lock (hw_ldq_l/p) */
2874 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2877 /* Longword virtual PTE fetch (hw_ldl/v) */
2880 /* Quadword virtual PTE fetch (hw_ldq/v) */
2884 /* Incpu_ir[ra]id */
2887 /* Incpu_ir[ra]id */
2890 /* Longword virtual access (hw_ldl) */
2893 /* Quadword virtual access (hw_ldq) */
2896 /* Longword virtual access with protection check (hw_ldl/w) */
2897 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2900 /* Quadword virtual access with protection check (hw_ldq/w) */
2901 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2904 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2907 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2910 /* Longword virtual access with alternate access mode and
2911 protection checks (hw_ldl/wa) */
2912 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2915 /* Quadword virtual access with alternate access mode and
2916 protection checks (hw_ldq/wa) */
2917 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2920 tcg_temp_free(addr
);
2929 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2932 if (likely(rc
!= 31)) {
2934 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2936 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2941 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2942 if (likely(rc
!= 31)) {
2944 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2946 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2954 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2955 if (likely(rc
!= 31)) {
2957 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2959 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2967 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2968 gen_perr(ra
, rb
, rc
, islit
, lit
);
2974 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2975 if (likely(rc
!= 31)) {
2977 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2979 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2987 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2988 if (likely(rc
!= 31)) {
2990 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2992 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3000 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3001 if (real_islit
|| ra
!= 31) {
3010 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3011 if (real_islit
|| ra
!= 31) {
3020 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3021 if (real_islit
|| ra
!= 31) {
3030 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3031 if (real_islit
|| ra
!= 31) {
3040 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3041 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3047 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3048 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3054 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3055 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3061 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3062 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3068 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3069 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3075 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3076 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3082 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3083 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3089 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3090 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3096 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3099 if (likely(rc
!= 31)) {
3101 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3103 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3108 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3112 TCGv_i32 tmp1
= tcg_temp_new_i32();
3114 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3116 TCGv tmp2
= tcg_const_i64(0);
3117 gen_helper_s_to_memory(tmp1
, tmp2
);
3118 tcg_temp_free(tmp2
);
3120 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3121 tcg_temp_free_i32(tmp1
);
3129 /* HW_MTPR (PALcode) */
3130 #ifndef CONFIG_USER_ONLY
3131 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3132 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3137 /* HW_RET (PALcode) */
3138 #ifndef CONFIG_USER_ONLY
3139 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3141 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3142 address from EXC_ADDR. This turns out to be useful for our
3143 emulation PALcode, so continue to accept it. */
3144 TCGv tmp
= tcg_temp_new();
3145 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3146 gen_helper_hw_ret(cpu_env
, tmp
);
3149 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3151 ret
= EXIT_PC_UPDATED
;
3157 /* HW_ST (PALcode) */
3158 #ifndef CONFIG_USER_ONLY
3159 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3161 addr
= tcg_temp_new();
3163 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3165 tcg_gen_movi_i64(addr
, disp12
);
3169 val
= tcg_temp_new();
3170 tcg_gen_movi_i64(val
, 0);
3172 switch ((insn
>> 12) & 0xF) {
3174 /* Longword physical access */
3175 gen_helper_stl_phys(addr
, val
);
3178 /* Quadword physical access */
3179 gen_helper_stq_phys(addr
, val
);
3182 /* Longword physical access with lock */
3183 gen_helper_stl_c_phys(val
, addr
, val
);
3186 /* Quadword physical access with lock */
3187 gen_helper_stq_c_phys(val
, addr
, val
);
3190 /* Longword virtual access */
3193 /* Quadword virtual access */
3214 /* Longword virtual access with alternate access mode */
3217 /* Quadword virtual access with alternate access mode */
3228 tcg_temp_free(addr
);
3235 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3239 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3243 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3247 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3251 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3255 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3259 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3263 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3267 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3271 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3275 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3279 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3283 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3287 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3291 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3295 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3299 ret
= gen_bdirect(ctx
, ra
, disp21
);
3301 case 0x31: /* FBEQ */
3302 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3304 case 0x32: /* FBLT */
3305 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3307 case 0x33: /* FBLE */
3308 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3312 ret
= gen_bdirect(ctx
, ra
, disp21
);
3314 case 0x35: /* FBNE */
3315 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3317 case 0x36: /* FBGE */
3318 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3320 case 0x37: /* FBGT */
3321 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3325 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3329 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3333 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3337 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3341 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3345 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3349 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3353 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3356 ret
= gen_invalid(ctx
);
3363 static inline void gen_intermediate_code_internal(CPUAlphaState
*env
,
3364 TranslationBlock
*tb
,
3367 DisasContext ctx
, *ctxp
= &ctx
;
3368 target_ulong pc_start
;
3370 uint16_t *gen_opc_end
;
3378 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3383 ctx
.mem_idx
= cpu_mmu_index(env
);
3385 /* ??? Every TB begins with unset rounding mode, to be initialized on
3386 the first fp insn of the TB. Alternately we could define a proper
3387 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3388 to reset the FP_STATUS to that default at the end of any TB that
3389 changes the default. We could even (gasp) dynamiclly figure out
3390 what default would be most efficient given the running program. */
3392 /* Similarly for flush-to-zero. */
3396 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3398 max_insns
= CF_COUNT_MASK
;
3402 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3403 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3404 if (bp
->pc
== ctx
.pc
) {
3405 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3411 j
= gen_opc_ptr
- gen_opc_buf
;
3415 gen_opc_instr_start
[lj
++] = 0;
3417 gen_opc_pc
[lj
] = ctx
.pc
;
3418 gen_opc_instr_start
[lj
] = 1;
3419 gen_opc_icount
[lj
] = num_insns
;
3421 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3423 insn
= ldl_code(ctx
.pc
);
3426 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3427 tcg_gen_debug_insn_start(ctx
.pc
);
3431 ret
= translate_one(ctxp
, insn
);
3433 /* If we reach a page boundary, are single stepping,
3434 or exhaust instruction count, stop generation. */
3436 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3437 || gen_opc_ptr
>= gen_opc_end
3438 || num_insns
>= max_insns
3440 || env
->singlestep_enabled
)) {
3441 ret
= EXIT_PC_STALE
;
3443 } while (ret
== NO_EXIT
);
3445 if (tb
->cflags
& CF_LAST_IO
) {
3454 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3456 case EXIT_PC_UPDATED
:
3457 if (env
->singlestep_enabled
) {
3458 gen_excp_1(EXCP_DEBUG
, 0);
3467 gen_icount_end(tb
, num_insns
);
3468 *gen_opc_ptr
= INDEX_op_end
;
3470 j
= gen_opc_ptr
- gen_opc_buf
;
3473 gen_opc_instr_start
[lj
++] = 0;
3475 tb
->size
= ctx
.pc
- pc_start
;
3476 tb
->icount
= num_insns
;
3480 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3481 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3482 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3488 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3490 gen_intermediate_code_internal(env
, tb
, 0);
3493 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3495 gen_intermediate_code_internal(env
, tb
, 1);
3503 static const struct cpu_def_t cpu_defs
[] = {
3504 { "ev4", IMPLVER_2106x
, 0 },
3505 { "ev5", IMPLVER_21164
, 0 },
3506 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3507 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3508 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3509 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3510 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3511 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3512 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3513 { "21064", IMPLVER_2106x
, 0 },
3514 { "21164", IMPLVER_21164
, 0 },
3515 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3516 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3517 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3518 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3519 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3522 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3525 int implver
, amask
, i
, max
;
3527 env
= g_malloc0(sizeof(CPUAlphaState
));
3529 alpha_translate_init();
3532 /* Default to ev67; no reason not to emulate insns by default. */
3533 implver
= IMPLVER_21264
;
3534 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3535 | AMASK_TRAP
| AMASK_PREFETCH
);
3537 max
= ARRAY_SIZE(cpu_defs
);
3538 for (i
= 0; i
< max
; i
++) {
3539 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3540 implver
= cpu_defs
[i
].implver
;
3541 amask
= cpu_defs
[i
].amask
;
3545 env
->implver
= implver
;
3548 #if defined (CONFIG_USER_ONLY)
3549 env
->ps
= PS_USER_MODE
;
3550 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3551 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
3552 | FPCR_DYN_NORMAL
));
3554 env
->lock_addr
= -1;
3557 qemu_init_vcpu(env
);
3561 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3563 env
->pc
= gen_opc_pc
[pc_pos
];