2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
45 /* Current rounding mode for this TB. */
47 /* Current flush-to-zero setting for this TB. */
51 /* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
57 /* We have emitted one or more goto_tb. No fixup required. */
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
74 /* global register indexes */
75 static TCGv_ptr cpu_env
;
76 static TCGv cpu_ir
[31];
77 static TCGv cpu_fir
[31];
79 static TCGv cpu_lock_addr
;
80 static TCGv cpu_lock_st_addr
;
81 static TCGv cpu_lock_value
;
82 static TCGv cpu_unique
;
83 #ifndef CONFIG_USER_ONLY
84 static TCGv cpu_sysval
;
89 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
91 #include "exec/gen-icount.h"
93 void alpha_translate_init(void)
97 static int done_init
= 0;
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
105 for (i
= 0; i
< 31; i
++) {
106 sprintf(p
, "ir%d", i
);
107 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUAlphaState
, ir
[i
]), p
);
109 p
+= (i
< 10) ? 4 : 5;
111 sprintf(p
, "fir%d", i
);
112 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
113 offsetof(CPUAlphaState
, fir
[i
]), p
);
114 p
+= (i
< 10) ? 5 : 6;
117 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
118 offsetof(CPUAlphaState
, pc
), "pc");
120 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
121 offsetof(CPUAlphaState
, lock_addr
),
123 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
124 offsetof(CPUAlphaState
, lock_st_addr
),
126 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUAlphaState
, lock_value
),
130 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, unique
), "unique");
132 #ifndef CONFIG_USER_ONLY
133 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
134 offsetof(CPUAlphaState
, sysval
), "sysval");
135 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUAlphaState
, usp
), "usp");
139 /* register helpers */
146 static void gen_excp_1(int exception
, int error_code
)
150 tmp1
= tcg_const_i32(exception
);
151 tmp2
= tcg_const_i32(error_code
);
152 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
153 tcg_temp_free_i32(tmp2
);
154 tcg_temp_free_i32(tmp1
);
157 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
159 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
160 gen_excp_1(exception
, error_code
);
161 return EXIT_NORETURN
;
164 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
166 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
169 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
171 TCGv tmp
= tcg_temp_new();
172 TCGv_i32 tmp32
= tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
174 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
175 gen_helper_memory_to_f(t0
, tmp32
);
176 tcg_temp_free_i32(tmp32
);
180 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
182 TCGv tmp
= tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
184 gen_helper_memory_to_g(t0
, tmp
);
188 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
190 TCGv tmp
= tcg_temp_new();
191 TCGv_i32 tmp32
= tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
193 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
194 gen_helper_memory_to_s(t0
, tmp32
);
195 tcg_temp_free_i32(tmp32
);
199 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
201 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
202 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
203 tcg_gen_mov_i64(cpu_lock_value
, t0
);
206 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
208 tcg_gen_qemu_ld64(t0
, t1
, flags
);
209 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
210 tcg_gen_mov_i64(cpu_lock_value
, t0
);
213 static inline void gen_load_mem(DisasContext
*ctx
,
214 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
216 int ra
, int rb
, int32_t disp16
, int fp
,
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra
== 31)) {
228 addr
= tcg_temp_new();
230 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
232 tcg_gen_andi_i64(addr
, addr
, ~0x7);
238 tcg_gen_movi_i64(addr
, disp16
);
241 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
242 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
247 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
249 TCGv_i32 tmp32
= tcg_temp_new_i32();
250 TCGv tmp
= tcg_temp_new();
251 gen_helper_f_to_memory(tmp32
, t0
);
252 tcg_gen_extu_i32_i64(tmp
, tmp32
);
253 tcg_gen_qemu_st32(tmp
, t1
, flags
);
255 tcg_temp_free_i32(tmp32
);
258 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
260 TCGv tmp
= tcg_temp_new();
261 gen_helper_g_to_memory(tmp
, t0
);
262 tcg_gen_qemu_st64(tmp
, t1
, flags
);
266 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
268 TCGv_i32 tmp32
= tcg_temp_new_i32();
269 TCGv tmp
= tcg_temp_new();
270 gen_helper_s_to_memory(tmp32
, t0
);
271 tcg_gen_extu_i32_i64(tmp
, tmp32
);
272 tcg_gen_qemu_st32(tmp
, t1
, flags
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_store_mem(DisasContext
*ctx
,
278 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
280 int ra
, int rb
, int32_t disp16
, int fp
,
285 addr
= tcg_temp_new();
287 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
289 tcg_gen_andi_i64(addr
, addr
, ~0x7);
295 tcg_gen_movi_i64(addr
, disp16
);
299 va
= tcg_const_i64(0);
301 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
303 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
311 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
312 int32_t disp16
, int quad
)
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
322 #if defined(CONFIG_USER_ONLY)
323 addr
= cpu_lock_st_addr
;
325 addr
= tcg_temp_local_new();
329 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
331 tcg_gen_movi_i64(addr
, disp16
);
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
343 int lab_fail
, lab_done
;
346 lab_fail
= gen_new_label();
347 lab_done
= gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
350 val
= tcg_temp_new();
352 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
354 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
356 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
359 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
361 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
363 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
364 tcg_gen_br(lab_done
);
366 gen_set_label(lab_fail
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
369 gen_set_label(lab_done
);
370 tcg_gen_movi_i64(cpu_lock_addr
, -1);
378 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
383 && !ctx
->env
->singlestep_enabled
384 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
387 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
389 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
392 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
395 /* Notice branch-to-next; used to initialize RA with the PC. */
398 } else if (use_goto_tb(ctx
, dest
)) {
400 tcg_gen_movi_i64(cpu_pc
, dest
);
401 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
404 tcg_gen_movi_i64(cpu_pc
, dest
);
405 return EXIT_PC_UPDATED
;
409 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
410 TCGv cmp
, int32_t disp
)
412 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
413 int lab_true
= gen_new_label();
415 if (use_goto_tb(ctx
, dest
)) {
416 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
419 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
420 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
422 gen_set_label(lab_true
);
424 tcg_gen_movi_i64(cpu_pc
, dest
);
425 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
429 TCGv_i64 z
= tcg_const_i64(0);
430 TCGv_i64 d
= tcg_const_i64(dest
);
431 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
433 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
435 tcg_temp_free_i64(z
);
436 tcg_temp_free_i64(d
);
437 tcg_temp_free_i64(p
);
438 return EXIT_PC_UPDATED
;
442 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
443 int32_t disp
, int mask
)
447 if (unlikely(ra
== 31)) {
448 cmp_tmp
= tcg_const_i64(0);
450 cmp_tmp
= tcg_temp_new();
452 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
454 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
458 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
461 /* Fold -0.0 for comparison with COND. */
463 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
465 uint64_t mzero
= 1ull << 63;
470 /* For <= or >, the -0.0 value directly compares the way we want. */
471 tcg_gen_mov_i64(dest
, src
);
476 /* For == or !=, we can simply mask off the sign bit and compare. */
477 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
482 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
483 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
484 tcg_gen_neg_i64(dest
, dest
);
485 tcg_gen_and_i64(dest
, dest
, src
);
493 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
498 if (unlikely(ra
== 31)) {
499 /* Very uncommon case, but easier to optimize it to an integer
500 comparison than continuing with the floating point comparison. */
501 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
504 cmp_tmp
= tcg_temp_new();
505 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
506 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
509 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
510 int islit
, uint8_t lit
, int mask
)
514 if (unlikely(rc
== 31)) {
519 /* Very uncommon case - Do not bother to optimize. */
520 c1
= tcg_const_i64(0);
522 c1
= tcg_const_i64(1);
523 tcg_gen_and_i64(c1
, c1
, cpu_ir
[ra
]);
528 v1
= tcg_const_i64(lit
);
532 z
= tcg_const_i64(0);
534 tcg_gen_movcond_i64(cond
, cpu_ir
[rc
], c1
, z
, v1
, cpu_ir
[rc
]);
536 tcg_temp_free_i64(z
);
537 if (ra
== 31 || mask
) {
538 tcg_temp_free_i64(c1
);
541 tcg_temp_free_i64(v1
);
545 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
549 if (unlikely(rc
== 31)) {
553 c1
= tcg_temp_new_i64();
554 if (unlikely(ra
== 31)) {
555 tcg_gen_movi_i64(c1
, 0);
557 gen_fold_mzero(cond
, c1
, cpu_fir
[ra
]);
560 v1
= tcg_const_i64(0);
564 z
= tcg_const_i64(0);
566 tcg_gen_movcond_i64(cond
, cpu_fir
[rc
], c1
, z
, v1
, cpu_fir
[rc
]);
568 tcg_temp_free_i64(z
);
569 tcg_temp_free_i64(c1
);
571 tcg_temp_free_i64(v1
);
575 #define QUAL_RM_N 0x080 /* Round mode nearest even */
576 #define QUAL_RM_C 0x000 /* Round mode chopped */
577 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
578 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
579 #define QUAL_RM_MASK 0x0c0
581 #define QUAL_U 0x100 /* Underflow enable (fp output) */
582 #define QUAL_V 0x100 /* Overflow enable (int output) */
583 #define QUAL_S 0x400 /* Software completion enable */
584 #define QUAL_I 0x200 /* Inexact detection enable */
586 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
590 fn11
&= QUAL_RM_MASK
;
591 if (fn11
== ctx
->tb_rm
) {
596 tmp
= tcg_temp_new_i32();
599 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
602 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
605 tcg_gen_movi_i32(tmp
, float_round_down
);
608 tcg_gen_ld8u_i32(tmp
, cpu_env
,
609 offsetof(CPUAlphaState
, fpcr_dyn_round
));
613 #if defined(CONFIG_SOFTFLOAT_INLINE)
614 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
615 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
616 sets the one field. */
617 tcg_gen_st8_i32(tmp
, cpu_env
,
618 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
620 gen_helper_setroundmode(tmp
);
623 tcg_temp_free_i32(tmp
);
626 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
631 if (fn11
== ctx
->tb_ftz
) {
636 tmp
= tcg_temp_new_i32();
638 /* Underflow is enabled, use the FPCR setting. */
639 tcg_gen_ld8u_i32(tmp
, cpu_env
,
640 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
642 /* Underflow is disabled, force flush-to-zero. */
643 tcg_gen_movi_i32(tmp
, 1);
646 #if defined(CONFIG_SOFTFLOAT_INLINE)
647 tcg_gen_st8_i32(tmp
, cpu_env
,
648 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
650 gen_helper_setflushzero(tmp
);
653 tcg_temp_free_i32(tmp
);
656 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
660 val
= tcg_const_i64(0);
662 if ((fn11
& QUAL_S
) == 0) {
664 gen_helper_ieee_input_cmp(cpu_env
, cpu_fir
[reg
]);
666 gen_helper_ieee_input(cpu_env
, cpu_fir
[reg
]);
669 val
= tcg_temp_new();
670 tcg_gen_mov_i64(val
, cpu_fir
[reg
]);
675 static void gen_fp_exc_clear(void)
677 #if defined(CONFIG_SOFTFLOAT_INLINE)
678 TCGv_i32 zero
= tcg_const_i32(0);
679 tcg_gen_st8_i32(zero
, cpu_env
,
680 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
681 tcg_temp_free_i32(zero
);
683 gen_helper_fp_exc_clear(cpu_env
);
687 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
689 /* ??? We ought to be able to do something with imprecise exceptions.
690 E.g. notice we're still in the trap shadow of something within the
691 TB and do not generate the code to signal the exception; end the TB
692 when an exception is forced to arrive, either by consumption of a
693 register value or TRAPB or EXCB. */
694 TCGv_i32 exc
= tcg_temp_new_i32();
697 #if defined(CONFIG_SOFTFLOAT_INLINE)
698 tcg_gen_ld8u_i32(exc
, cpu_env
,
699 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
701 gen_helper_fp_exc_get(exc
, cpu_env
);
705 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
708 /* ??? Pass in the regno of the destination so that the helper can
709 set EXC_MASK, which contains a bitmask of destination registers
710 that have caused arithmetic traps. A simple userspace emulation
711 does not require this. We do need it for a guest kernel's entArith,
712 or if we were to do something clever with imprecise exceptions. */
713 reg
= tcg_const_i32(rc
+ 32);
716 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
718 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
721 tcg_temp_free_i32(reg
);
722 tcg_temp_free_i32(exc
);
725 static inline void gen_fp_exc_raise(int rc
, int fn11
)
727 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
730 static void gen_fcvtlq(int rb
, int rc
)
732 if (unlikely(rc
== 31)) {
735 if (unlikely(rb
== 31)) {
736 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
738 TCGv tmp
= tcg_temp_new();
740 /* The arithmetic right shift here, plus the sign-extended mask below
741 yields a sign-extended result without an explicit ext32s_i64. */
742 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
743 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
744 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
745 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
746 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
752 static void gen_fcvtql(int rb
, int rc
)
754 if (unlikely(rc
== 31)) {
757 if (unlikely(rb
== 31)) {
758 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
760 TCGv tmp
= tcg_temp_new();
762 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
763 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
764 tcg_gen_shli_i64(tmp
, tmp
, 32);
765 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
766 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
772 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
775 int lab
= gen_new_label();
776 TCGv tmp
= tcg_temp_new();
778 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
779 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
780 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
787 #define FARITH2(name) \
788 static inline void glue(gen_f, name)(int rb, int rc) \
790 if (unlikely(rc == 31)) { \
794 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
796 TCGv tmp = tcg_const_i64(0); \
797 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
798 tcg_temp_free(tmp); \
802 /* ??? VAX instruction qualifiers ignored. */
810 static void gen_ieee_arith2(DisasContext
*ctx
,
811 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
812 int rb
, int rc
, int fn11
)
816 /* ??? This is wrong: the instruction is not a nop, it still may
818 if (unlikely(rc
== 31)) {
822 gen_qual_roundmode(ctx
, fn11
);
823 gen_qual_flushzero(ctx
, fn11
);
826 vb
= gen_ieee_input(rb
, fn11
, 0);
827 helper(cpu_fir
[rc
], cpu_env
, vb
);
830 gen_fp_exc_raise(rc
, fn11
);
833 #define IEEE_ARITH2(name) \
834 static inline void glue(gen_f, name)(DisasContext *ctx, \
835 int rb, int rc, int fn11) \
837 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
844 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
849 /* ??? This is wrong: the instruction is not a nop, it still may
851 if (unlikely(rc
== 31)) {
855 /* No need to set flushzero, since we have an integer output. */
857 vb
= gen_ieee_input(rb
, fn11
, 0);
859 /* Almost all integer conversions use cropped rounding, and most
860 also do not have integer overflow enabled. Special case that. */
863 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
865 case QUAL_V
| QUAL_RM_C
:
866 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
867 ignore
= float_flag_inexact
;
869 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
870 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
873 gen_qual_roundmode(ctx
, fn11
);
874 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
875 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
876 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
881 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
884 static void gen_ieee_intcvt(DisasContext
*ctx
,
885 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
886 int rb
, int rc
, int fn11
)
890 /* ??? This is wrong: the instruction is not a nop, it still may
892 if (unlikely(rc
== 31)) {
896 gen_qual_roundmode(ctx
, fn11
);
899 vb
= tcg_const_i64(0);
904 /* The only exception that can be raised by integer conversion
905 is inexact. Thus we only need to worry about exceptions when
906 inexact handling is requested. */
909 helper(cpu_fir
[rc
], cpu_env
, vb
);
910 gen_fp_exc_raise(rc
, fn11
);
912 helper(cpu_fir
[rc
], cpu_env
, vb
);
920 #define IEEE_INTCVT(name) \
921 static inline void glue(gen_f, name)(DisasContext *ctx, \
922 int rb, int rc, int fn11) \
924 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
929 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
934 if (unlikely(rc
== 31)) {
938 vmask
= tcg_const_i64(mask
);
948 va
= tcg_temp_new_i64();
949 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
951 tcg_gen_andc_i64(va
, vmask
, va
);
953 tcg_gen_and_i64(va
, va
, vmask
);
961 vb
= tcg_temp_new_i64();
962 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
965 switch (za
<< 1 | zb
) {
967 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
970 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
973 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
976 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
980 tcg_temp_free(vmask
);
989 static inline void gen_fcpys(int ra
, int rb
, int rc
)
991 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
994 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
996 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
999 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1001 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1004 #define FARITH3(name) \
1005 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1009 if (unlikely(rc == 31)) { \
1013 va = tcg_const_i64(0); \
1018 vb = tcg_const_i64(0); \
1023 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1026 tcg_temp_free(va); \
1029 tcg_temp_free(vb); \
1033 /* ??? VAX instruction qualifiers ignored. */
1046 static void gen_ieee_arith3(DisasContext
*ctx
,
1047 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1048 int ra
, int rb
, int rc
, int fn11
)
1052 /* ??? This is wrong: the instruction is not a nop, it still may
1053 raise exceptions. */
1054 if (unlikely(rc
== 31)) {
1058 gen_qual_roundmode(ctx
, fn11
);
1059 gen_qual_flushzero(ctx
, fn11
);
1062 va
= gen_ieee_input(ra
, fn11
, 0);
1063 vb
= gen_ieee_input(rb
, fn11
, 0);
1064 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1068 gen_fp_exc_raise(rc
, fn11
);
1071 #define IEEE_ARITH3(name) \
1072 static inline void glue(gen_f, name)(DisasContext *ctx, \
1073 int ra, int rb, int rc, int fn11) \
1075 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1086 static void gen_ieee_compare(DisasContext
*ctx
,
1087 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1088 int ra
, int rb
, int rc
, int fn11
)
1092 /* ??? This is wrong: the instruction is not a nop, it still may
1093 raise exceptions. */
1094 if (unlikely(rc
== 31)) {
1100 va
= gen_ieee_input(ra
, fn11
, 1);
1101 vb
= gen_ieee_input(rb
, fn11
, 1);
1102 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1106 gen_fp_exc_raise(rc
, fn11
);
1109 #define IEEE_CMP3(name) \
1110 static inline void glue(gen_f, name)(DisasContext *ctx, \
1111 int ra, int rb, int rc, int fn11) \
1113 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1120 static inline uint64_t zapnot_mask(uint8_t lit
)
1125 for (i
= 0; i
< 8; ++i
) {
1127 mask
|= 0xffull
<< (i
* 8);
1132 /* Implement zapnot with an immediate operand, which expands to some
1133 form of immediate AND. This is a basic building block in the
1134 definition of many of the other byte manipulation instructions. */
1135 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1139 tcg_gen_movi_i64(dest
, 0);
1142 tcg_gen_ext8u_i64(dest
, src
);
1145 tcg_gen_ext16u_i64(dest
, src
);
1148 tcg_gen_ext32u_i64(dest
, src
);
1151 tcg_gen_mov_i64(dest
, src
);
1154 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1159 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1161 if (unlikely(rc
== 31))
1163 else if (unlikely(ra
== 31))
1164 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1166 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1168 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1171 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1173 if (unlikely(rc
== 31))
1175 else if (unlikely(ra
== 31))
1176 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1178 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1180 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1184 /* EXTWH, EXTLH, EXTQH */
1185 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1186 uint8_t lit
, uint8_t byte_mask
)
1188 if (unlikely(rc
== 31))
1190 else if (unlikely(ra
== 31))
1191 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1194 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1195 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1197 TCGv tmp1
= tcg_temp_new();
1198 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1199 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1200 tcg_gen_neg_i64(tmp1
, tmp1
);
1201 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1202 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1203 tcg_temp_free(tmp1
);
1205 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1209 /* EXTBL, EXTWL, EXTLL, EXTQL */
1210 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1211 uint8_t lit
, uint8_t byte_mask
)
1213 if (unlikely(rc
== 31))
1215 else if (unlikely(ra
== 31))
1216 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1219 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1221 TCGv tmp
= tcg_temp_new();
1222 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1223 tcg_gen_shli_i64(tmp
, tmp
, 3);
1224 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1227 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1231 /* INSWH, INSLH, INSQH */
1232 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1233 uint8_t lit
, uint8_t byte_mask
)
1235 if (unlikely(rc
== 31))
1237 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1238 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1240 TCGv tmp
= tcg_temp_new();
1242 /* The instruction description has us left-shift the byte mask
1243 and extract bits <15:8> and apply that zap at the end. This
1244 is equivalent to simply performing the zap first and shifting
1246 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1249 /* Note that we have handled the lit==0 case above. */
1250 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1252 TCGv shift
= tcg_temp_new();
1254 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1255 Do this portably by splitting the shift into two parts:
1256 shift_count-1 and 1. Arrange for the -1 by using
1257 ones-complement instead of twos-complement in the negation:
1258 ~((B & 7) * 8) & 63. */
1260 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1261 tcg_gen_shli_i64(shift
, shift
, 3);
1262 tcg_gen_not_i64(shift
, shift
);
1263 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1265 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1266 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1267 tcg_temp_free(shift
);
1273 /* INSBL, INSWL, INSLL, INSQL */
1274 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1275 uint8_t lit
, uint8_t byte_mask
)
1277 if (unlikely(rc
== 31))
1279 else if (unlikely(ra
== 31))
1280 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1282 TCGv tmp
= tcg_temp_new();
1284 /* The instruction description has us left-shift the byte mask
1285 the same number of byte slots as the data and apply the zap
1286 at the end. This is equivalent to simply performing the zap
1287 first and shifting afterward. */
1288 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1291 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1293 TCGv shift
= tcg_temp_new();
1294 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1295 tcg_gen_shli_i64(shift
, shift
, 3);
1296 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1297 tcg_temp_free(shift
);
1303 /* MSKWH, MSKLH, MSKQH */
1304 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1305 uint8_t lit
, uint8_t byte_mask
)
1307 if (unlikely(rc
== 31))
1309 else if (unlikely(ra
== 31))
1310 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1312 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1314 TCGv shift
= tcg_temp_new();
1315 TCGv mask
= tcg_temp_new();
1317 /* The instruction description is as above, where the byte_mask
1318 is shifted left, and then we extract bits <15:8>. This can be
1319 emulated with a right-shift on the expanded byte mask. This
1320 requires extra care because for an input <2:0> == 0 we need a
1321 shift of 64 bits in order to generate a zero. This is done by
1322 splitting the shift into two parts, the variable shift - 1
1323 followed by a constant 1 shift. The code we expand below is
1324 equivalent to ~((B & 7) * 8) & 63. */
1326 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1327 tcg_gen_shli_i64(shift
, shift
, 3);
1328 tcg_gen_not_i64(shift
, shift
);
1329 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1330 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1331 tcg_gen_shr_i64(mask
, mask
, shift
);
1332 tcg_gen_shri_i64(mask
, mask
, 1);
1334 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1336 tcg_temp_free(mask
);
1337 tcg_temp_free(shift
);
1341 /* MSKBL, MSKWL, MSKLL, MSKQL */
1342 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1343 uint8_t lit
, uint8_t byte_mask
)
1345 if (unlikely(rc
== 31))
1347 else if (unlikely(ra
== 31))
1348 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1350 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1352 TCGv shift
= tcg_temp_new();
1353 TCGv mask
= tcg_temp_new();
1355 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1356 tcg_gen_shli_i64(shift
, shift
, 3);
1357 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1358 tcg_gen_shl_i64(mask
, mask
, shift
);
1360 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1362 tcg_temp_free(mask
);
1363 tcg_temp_free(shift
);
1367 /* Code to call arith3 helpers */
1368 #define ARITH3(name) \
1369 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1372 if (unlikely(rc == 31)) \
1377 TCGv tmp = tcg_const_i64(lit); \
1378 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1379 tcg_temp_free(tmp); \
1381 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1383 TCGv tmp1 = tcg_const_i64(0); \
1385 TCGv tmp2 = tcg_const_i64(lit); \
1386 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1387 tcg_temp_free(tmp2); \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1390 tcg_temp_free(tmp1); \
1404 /* Code to call arith3 helpers */
1405 #define ARITH3_EX(name) \
1406 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1407 int islit, uint8_t lit) \
1409 if (unlikely(rc == 31)) { \
1414 TCGv tmp = tcg_const_i64(lit); \
1415 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1417 tcg_temp_free(tmp); \
1419 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1420 cpu_ir[ra], cpu_ir[rb]); \
1423 TCGv tmp1 = tcg_const_i64(0); \
1425 TCGv tmp2 = tcg_const_i64(lit); \
1426 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1427 tcg_temp_free(tmp2); \
1429 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1431 tcg_temp_free(tmp1); \
1441 #define MVIOP2(name) \
1442 static inline void glue(gen_, name)(int rb, int rc) \
1444 if (unlikely(rc == 31)) \
1446 if (unlikely(rb == 31)) \
1447 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1449 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1456 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1457 int islit
, uint8_t lit
)
1461 if (unlikely(rc
== 31)) {
1466 va
= tcg_const_i64(0);
1471 vb
= tcg_const_i64(lit
);
1476 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1486 static void gen_rx(int ra
, int set
)
1491 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1494 tmp
= tcg_const_i32(set
);
1495 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1496 tcg_temp_free_i32(tmp
);
1499 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1501 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1502 to internal cpu registers. */
1504 /* Unprivileged PAL call */
1505 if (palcode
>= 0x80 && palcode
< 0xC0) {
1509 /* No-op inside QEMU. */
1513 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1517 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1520 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1525 #ifndef CONFIG_USER_ONLY
1526 /* Privileged PAL code */
1527 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1531 /* No-op inside QEMU. */
1535 /* No-op inside QEMU. */
1539 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1543 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1547 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1554 /* Note that we already know we're in kernel mode, so we know
1555 that PS only contains the 3 IPL bits. */
1556 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1558 /* But make sure and store only the 3 IPL bits from the user. */
1559 tmp
= tcg_temp_new();
1560 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1561 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1568 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1572 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1576 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1580 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1581 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1585 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1591 return gen_invalid(ctx
);
1594 #ifndef CONFIG_USER_ONLY
1596 #define PR_BYTE 0x100000
1597 #define PR_LONG 0x200000
1599 static int cpu_pr_data(int pr
)
1602 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1603 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1604 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1605 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1606 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1607 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1608 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1609 case 7: return offsetof(CPUAlphaState
, palbr
);
1610 case 8: return offsetof(CPUAlphaState
, ptbr
);
1611 case 9: return offsetof(CPUAlphaState
, vptptr
);
1612 case 10: return offsetof(CPUAlphaState
, unique
);
1613 case 11: return offsetof(CPUAlphaState
, sysval
);
1614 case 12: return offsetof(CPUAlphaState
, usp
);
1617 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1619 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1622 return offsetof(CPUAlphaState
, alarm_expire
);
1627 static ExitStatus
gen_mfpr(int ra
, int regno
)
1629 int data
= cpu_pr_data(regno
);
1631 /* In our emulated PALcode, these processor registers have no
1632 side effects from reading. */
1641 gen_helper_get_time(cpu_ir
[ra
]);
1643 return EXIT_PC_STALE
;
1645 gen_helper_get_time(cpu_ir
[ra
]);
1650 /* The basic registers are data only, and unknown registers
1651 are read-zero, write-ignore. */
1653 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1654 } else if (data
& PR_BYTE
) {
1655 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1656 } else if (data
& PR_LONG
) {
1657 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1659 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1664 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1670 tmp
= tcg_const_i64(0);
1678 gen_helper_tbia(cpu_env
);
1683 gen_helper_tbis(cpu_env
, tmp
);
1688 tmp
= tcg_const_i64(1);
1689 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, halted
));
1690 return gen_excp(ctx
, EXCP_HLT
, 0);
1694 gen_helper_halt(tmp
);
1695 return EXIT_PC_STALE
;
1699 gen_helper_set_alarm(cpu_env
, tmp
);
1703 /* The basic registers are data only, and unknown registers
1704 are read-zero, write-ignore. */
1705 data
= cpu_pr_data(regno
);
1707 if (data
& PR_BYTE
) {
1708 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1709 } else if (data
& PR_LONG
) {
1710 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1712 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1724 #endif /* !USER_ONLY*/
1726 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1729 int32_t disp21
, disp16
;
1730 #ifndef CONFIG_USER_ONLY
1734 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1738 /* Decode all instruction fields */
1740 ra
= (insn
>> 21) & 0x1F;
1741 rb
= (insn
>> 16) & 0x1F;
1743 real_islit
= islit
= (insn
>> 12) & 1;
1744 if (rb
== 31 && !islit
) {
1748 lit
= (insn
>> 13) & 0xFF;
1749 palcode
= insn
& 0x03FFFFFF;
1750 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1751 disp16
= (int16_t)(insn
& 0x0000FFFF);
1752 #ifndef CONFIG_USER_ONLY
1753 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1755 fn11
= (insn
>> 5) & 0x000007FF;
1757 fn7
= (insn
>> 5) & 0x0000007F;
1758 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1759 opc
, ra
, rb
, rc
, disp16
);
1765 ret
= gen_call_pal(ctx
, palcode
);
1790 if (likely(ra
!= 31)) {
1792 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1794 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1799 if (likely(ra
!= 31)) {
1801 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1803 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1808 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1809 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1815 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1819 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1820 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1826 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1830 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1834 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1840 if (likely(rc
!= 31)) {
1843 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1844 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1846 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1847 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1851 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1853 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1859 if (likely(rc
!= 31)) {
1861 TCGv tmp
= tcg_temp_new();
1862 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1864 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1866 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1867 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1871 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1873 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1879 if (likely(rc
!= 31)) {
1882 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1884 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1885 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1888 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1890 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1891 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1897 if (likely(rc
!= 31)) {
1899 TCGv tmp
= tcg_temp_new();
1900 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1902 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1904 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1905 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1909 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1911 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1912 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1919 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1923 if (likely(rc
!= 31)) {
1925 TCGv tmp
= tcg_temp_new();
1926 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1928 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1930 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1931 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1935 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1937 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1943 if (likely(rc
!= 31)) {
1945 TCGv tmp
= tcg_temp_new();
1946 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1948 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1950 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1951 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1955 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1957 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1958 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1965 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1969 if (likely(rc
!= 31)) {
1972 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1974 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1977 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1979 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1985 if (likely(rc
!= 31)) {
1987 TCGv tmp
= tcg_temp_new();
1988 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1990 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1992 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1996 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1998 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2004 if (likely(rc
!= 31)) {
2007 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2009 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2012 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2014 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2020 if (likely(rc
!= 31)) {
2022 TCGv tmp
= tcg_temp_new();
2023 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2025 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2027 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2031 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2033 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2039 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2043 if (likely(rc
!= 31)) {
2045 TCGv tmp
= tcg_temp_new();
2046 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2048 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2050 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2054 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2056 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2062 if (likely(rc
!= 31)) {
2064 TCGv tmp
= tcg_temp_new();
2065 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2067 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2069 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2073 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2075 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2081 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2085 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2089 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2093 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2097 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2101 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2105 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2115 if (likely(rc
!= 31)) {
2117 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2119 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2121 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2126 if (likely(rc
!= 31)) {
2129 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2131 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2133 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2138 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2142 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2146 if (likely(rc
!= 31)) {
2149 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2151 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2154 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2156 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2162 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2166 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2170 if (likely(rc
!= 31)) {
2173 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2175 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2178 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2180 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2186 if (likely(rc
!= 31)) {
2189 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2191 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2194 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2196 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2202 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2206 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2210 if (likely(rc
!= 31)) {
2213 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2215 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2218 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2220 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2226 if (likely(rc
!= 31)) {
2227 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2230 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2232 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2238 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2242 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2247 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2257 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2261 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2265 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2269 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2273 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2277 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2281 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2285 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2289 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2293 gen_zap(ra
, rb
, rc
, islit
, lit
);
2297 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2301 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2305 if (likely(rc
!= 31)) {
2308 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2310 TCGv shift
= tcg_temp_new();
2311 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2312 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2313 tcg_temp_free(shift
);
2316 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2321 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2325 if (likely(rc
!= 31)) {
2328 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2330 TCGv shift
= tcg_temp_new();
2331 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2332 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2333 tcg_temp_free(shift
);
2336 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2341 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2345 if (likely(rc
!= 31)) {
2348 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2350 TCGv shift
= tcg_temp_new();
2351 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2352 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2353 tcg_temp_free(shift
);
2356 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2361 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2365 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2369 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2373 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2377 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2381 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2385 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2389 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2393 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2403 if (likely(rc
!= 31)) {
2405 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2408 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2410 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2411 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2417 if (likely(rc
!= 31)) {
2419 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2421 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2423 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2430 if (unlikely(rc
== 31)){
2434 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2437 low
= tcg_temp_new();
2439 tcg_gen_movi_tl(low
, lit
);
2440 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], low
);
2442 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2449 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2453 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2460 switch (fpfn
) { /* fn11 & 0x3F */
2463 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2466 if (likely(rc
!= 31)) {
2468 TCGv_i32 tmp
= tcg_temp_new_i32();
2469 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2470 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2471 tcg_temp_free_i32(tmp
);
2473 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2478 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2485 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2486 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2492 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2495 if (likely(rc
!= 31)) {
2497 TCGv_i32 tmp
= tcg_temp_new_i32();
2498 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2499 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2500 tcg_temp_free_i32(tmp
);
2502 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2507 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2510 if (likely(rc
!= 31)) {
2512 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2514 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2519 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2526 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2527 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2536 /* VAX floating point */
2537 /* XXX: rounding mode and trap are ignored (!) */
2538 switch (fpfn
) { /* fn11 & 0x3F */
2541 gen_faddf(ra
, rb
, rc
);
2545 gen_fsubf(ra
, rb
, rc
);
2549 gen_fmulf(ra
, rb
, rc
);
2553 gen_fdivf(ra
, rb
, rc
);
2565 gen_faddg(ra
, rb
, rc
);
2569 gen_fsubg(ra
, rb
, rc
);
2573 gen_fmulg(ra
, rb
, rc
);
2577 gen_fdivg(ra
, rb
, rc
);
2581 gen_fcmpgeq(ra
, rb
, rc
);
2585 gen_fcmpglt(ra
, rb
, rc
);
2589 gen_fcmpgle(ra
, rb
, rc
);
2620 /* IEEE floating-point */
2621 switch (fpfn
) { /* fn11 & 0x3F */
2624 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2628 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2632 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2636 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2640 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2644 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2648 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2652 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2656 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2660 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2664 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2668 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2671 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2673 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2676 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2681 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2685 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2689 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2702 if (likely(rc
!= 31)) {
2706 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2708 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2711 gen_fcpys(ra
, rb
, rc
);
2717 gen_fcpysn(ra
, rb
, rc
);
2721 gen_fcpyse(ra
, rb
, rc
);
2725 if (likely(ra
!= 31))
2726 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2728 TCGv tmp
= tcg_const_i64(0);
2729 gen_helper_store_fpcr(cpu_env
, tmp
);
2735 if (likely(ra
!= 31))
2736 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2740 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2744 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2748 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2752 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2756 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2760 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2770 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2771 /v doesn't do. The only thing I can think is that /sv is a
2772 valid instruction merely for completeness in the ISA. */
2773 gen_fcvtql_v(ctx
, rb
, rc
);
2780 switch ((uint16_t)disp16
) {
2810 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2812 ret
= EXIT_PC_STALE
;
2814 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2838 /* HW_MFPR (PALcode) */
2839 #ifndef CONFIG_USER_ONLY
2840 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2841 return gen_mfpr(ra
, insn
& 0xffff);
2846 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2847 prediction stack action, which of course we don't implement. */
2849 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2851 tcg_gen_movi_i64(cpu_pc
, 0);
2854 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2856 ret
= EXIT_PC_UPDATED
;
2859 /* HW_LD (PALcode) */
2860 #ifndef CONFIG_USER_ONLY
2861 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2868 addr
= tcg_temp_new();
2870 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2872 tcg_gen_movi_i64(addr
, disp12
);
2873 switch ((insn
>> 12) & 0xF) {
2875 /* Longword physical access (hw_ldl/p) */
2876 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2879 /* Quadword physical access (hw_ldq/p) */
2880 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2883 /* Longword physical access with lock (hw_ldl_l/p) */
2884 gen_helper_ldl_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2887 /* Quadword physical access with lock (hw_ldq_l/p) */
2888 gen_helper_ldq_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2891 /* Longword virtual PTE fetch (hw_ldl/v) */
2894 /* Quadword virtual PTE fetch (hw_ldq/v) */
2898 /* Incpu_ir[ra]id */
2901 /* Incpu_ir[ra]id */
2904 /* Longword virtual access (hw_ldl) */
2907 /* Quadword virtual access (hw_ldq) */
2910 /* Longword virtual access with protection check (hw_ldl/w) */
2911 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2914 /* Quadword virtual access with protection check (hw_ldq/w) */
2915 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2918 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2921 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2924 /* Longword virtual access with alternate access mode and
2925 protection checks (hw_ldl/wa) */
2926 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2929 /* Quadword virtual access with alternate access mode and
2930 protection checks (hw_ldq/wa) */
2931 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2934 tcg_temp_free(addr
);
2943 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2946 if (likely(rc
!= 31)) {
2948 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2950 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2955 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2956 if (likely(rc
!= 31)) {
2958 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2960 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2968 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2969 if (likely(rc
!= 31)) {
2971 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2973 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2981 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2982 gen_perr(ra
, rb
, rc
, islit
, lit
);
2988 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2989 if (likely(rc
!= 31)) {
2991 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2993 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
3001 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3002 if (likely(rc
!= 31)) {
3004 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
3006 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3014 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3015 if (real_islit
|| ra
!= 31) {
3024 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3025 if (real_islit
|| ra
!= 31) {
3034 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3035 if (real_islit
|| ra
!= 31) {
3044 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3045 if (real_islit
|| ra
!= 31) {
3054 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3055 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3061 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3062 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3068 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3069 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3075 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3076 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3082 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3083 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3089 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3090 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3096 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3097 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3103 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3104 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3110 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3113 if (likely(rc
!= 31)) {
3115 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3117 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3122 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3126 TCGv_i32 tmp1
= tcg_temp_new_i32();
3128 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3130 TCGv tmp2
= tcg_const_i64(0);
3131 gen_helper_s_to_memory(tmp1
, tmp2
);
3132 tcg_temp_free(tmp2
);
3134 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3135 tcg_temp_free_i32(tmp1
);
3143 /* HW_MTPR (PALcode) */
3144 #ifndef CONFIG_USER_ONLY
3145 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3146 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3151 /* HW_RET (PALcode) */
3152 #ifndef CONFIG_USER_ONLY
3153 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3155 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3156 address from EXC_ADDR. This turns out to be useful for our
3157 emulation PALcode, so continue to accept it. */
3158 TCGv tmp
= tcg_temp_new();
3159 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3160 gen_helper_hw_ret(cpu_env
, tmp
);
3163 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3165 ret
= EXIT_PC_UPDATED
;
3171 /* HW_ST (PALcode) */
3172 #ifndef CONFIG_USER_ONLY
3173 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3175 addr
= tcg_temp_new();
3177 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3179 tcg_gen_movi_i64(addr
, disp12
);
3183 val
= tcg_temp_new();
3184 tcg_gen_movi_i64(val
, 0);
3186 switch ((insn
>> 12) & 0xF) {
3188 /* Longword physical access */
3189 gen_helper_stl_phys(addr
, val
);
3192 /* Quadword physical access */
3193 gen_helper_stq_phys(addr
, val
);
3196 /* Longword physical access with lock */
3197 gen_helper_stl_c_phys(val
, cpu_env
, addr
, val
);
3200 /* Quadword physical access with lock */
3201 gen_helper_stq_c_phys(val
, cpu_env
, addr
, val
);
3204 /* Longword virtual access */
3207 /* Quadword virtual access */
3228 /* Longword virtual access with alternate access mode */
3231 /* Quadword virtual access with alternate access mode */
3242 tcg_temp_free(addr
);
3249 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3253 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3257 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3261 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3265 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3269 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3273 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3277 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3281 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3285 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3289 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3293 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3297 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3301 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3305 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3309 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3313 ret
= gen_bdirect(ctx
, ra
, disp21
);
3315 case 0x31: /* FBEQ */
3316 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3318 case 0x32: /* FBLT */
3319 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3321 case 0x33: /* FBLE */
3322 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3326 ret
= gen_bdirect(ctx
, ra
, disp21
);
3328 case 0x35: /* FBNE */
3329 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3331 case 0x36: /* FBGE */
3332 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3334 case 0x37: /* FBGT */
3335 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3339 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3343 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3347 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3351 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3355 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3359 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3363 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3367 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3370 ret
= gen_invalid(ctx
);
3377 static inline void gen_intermediate_code_internal(CPUAlphaState
*env
,
3378 TranslationBlock
*tb
,
3381 DisasContext ctx
, *ctxp
= &ctx
;
3382 target_ulong pc_start
;
3384 uint16_t *gen_opc_end
;
3392 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
3397 ctx
.mem_idx
= cpu_mmu_index(env
);
3399 /* ??? Every TB begins with unset rounding mode, to be initialized on
3400 the first fp insn of the TB. Alternately we could define a proper
3401 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3402 to reset the FP_STATUS to that default at the end of any TB that
3403 changes the default. We could even (gasp) dynamiclly figure out
3404 what default would be most efficient given the running program. */
3406 /* Similarly for flush-to-zero. */
3410 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3412 max_insns
= CF_COUNT_MASK
;
3416 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3417 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3418 if (bp
->pc
== ctx
.pc
) {
3419 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3425 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3429 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3431 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
3432 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
3433 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
3435 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3437 insn
= cpu_ldl_code(env
, ctx
.pc
);
3440 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
3441 tcg_gen_debug_insn_start(ctx
.pc
);
3445 ret
= translate_one(ctxp
, insn
);
3447 /* If we reach a page boundary, are single stepping,
3448 or exhaust instruction count, stop generation. */
3450 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3451 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
3452 || num_insns
>= max_insns
3454 || env
->singlestep_enabled
)) {
3455 ret
= EXIT_PC_STALE
;
3457 } while (ret
== NO_EXIT
);
3459 if (tb
->cflags
& CF_LAST_IO
) {
3468 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3470 case EXIT_PC_UPDATED
:
3471 if (env
->singlestep_enabled
) {
3472 gen_excp_1(EXCP_DEBUG
, 0);
3481 gen_tb_end(tb
, num_insns
);
3482 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
3484 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3487 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3489 tb
->size
= ctx
.pc
- pc_start
;
3490 tb
->icount
= num_insns
;
3494 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3495 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3496 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
3502 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3504 gen_intermediate_code_internal(env
, tb
, 0);
3507 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3509 gen_intermediate_code_internal(env
, tb
, 1);
3512 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3514 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];