2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
45 /* Current rounding mode for this TB. */
47 /* Current flush-to-zero setting for this TB. */
51 /* Return values from translate_one, indicating the state of the TB.
52 Note that zero indicates that we are not exiting the TB. */
57 /* We have emitted one or more goto_tb. No fixup required. */
60 /* We are not using a goto_tb (for whatever reason), but have updated
61 the PC (for whatever reason), so there's no need to do it again on
65 /* We are exiting the TB, but have neither emitted a goto_tb, nor
66 updated the PC for the next instruction to be executed. */
69 /* We are ending the TB with a noreturn function call, e.g. longjmp.
70 No following code will be executed. */
74 /* global register indexes */
75 static TCGv_ptr cpu_env
;
76 static TCGv cpu_ir
[31];
77 static TCGv cpu_fir
[31];
79 static TCGv cpu_lock_addr
;
80 static TCGv cpu_lock_st_addr
;
81 static TCGv cpu_lock_value
;
82 static TCGv cpu_unique
;
83 #ifndef CONFIG_USER_ONLY
84 static TCGv cpu_sysval
;
89 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
91 #include "gen-icount.h"
93 static void alpha_translate_init(void)
97 static int done_init
= 0;
102 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
105 for (i
= 0; i
< 31; i
++) {
106 sprintf(p
, "ir%d", i
);
107 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
108 offsetof(CPUState
, ir
[i
]), p
);
109 p
+= (i
< 10) ? 4 : 5;
111 sprintf(p
, "fir%d", i
);
112 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
113 offsetof(CPUState
, fir
[i
]), p
);
114 p
+= (i
< 10) ? 5 : 6;
117 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
118 offsetof(CPUState
, pc
), "pc");
120 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
121 offsetof(CPUState
, lock_addr
),
123 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
124 offsetof(CPUState
, lock_st_addr
),
126 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUState
, lock_value
),
130 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUState
, unique
), "unique");
132 #ifndef CONFIG_USER_ONLY
133 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
134 offsetof(CPUState
, sysval
), "sysval");
135 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUState
, usp
), "usp");
139 /* register helpers */
146 static void gen_excp_1(int exception
, int error_code
)
150 tmp1
= tcg_const_i32(exception
);
151 tmp2
= tcg_const_i32(error_code
);
152 gen_helper_excp(tmp1
, tmp2
);
153 tcg_temp_free_i32(tmp2
);
154 tcg_temp_free_i32(tmp1
);
157 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
159 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
160 gen_excp_1(exception
, error_code
);
161 return EXIT_NORETURN
;
164 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
166 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
169 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
171 TCGv tmp
= tcg_temp_new();
172 TCGv_i32 tmp32
= tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
174 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
175 gen_helper_memory_to_f(t0
, tmp32
);
176 tcg_temp_free_i32(tmp32
);
180 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
182 TCGv tmp
= tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
184 gen_helper_memory_to_g(t0
, tmp
);
188 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
190 TCGv tmp
= tcg_temp_new();
191 TCGv_i32 tmp32
= tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
193 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
194 gen_helper_memory_to_s(t0
, tmp32
);
195 tcg_temp_free_i32(tmp32
);
199 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
201 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
202 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
203 tcg_gen_mov_i64(cpu_lock_value
, t0
);
206 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
208 tcg_gen_qemu_ld64(t0
, t1
, flags
);
209 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
210 tcg_gen_mov_i64(cpu_lock_value
, t0
);
213 static inline void gen_load_mem(DisasContext
*ctx
,
214 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
216 int ra
, int rb
, int32_t disp16
, int fp
,
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra
== 31)) {
228 addr
= tcg_temp_new();
230 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
232 tcg_gen_andi_i64(addr
, addr
, ~0x7);
238 tcg_gen_movi_i64(addr
, disp16
);
241 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
242 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
247 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
249 TCGv_i32 tmp32
= tcg_temp_new_i32();
250 TCGv tmp
= tcg_temp_new();
251 gen_helper_f_to_memory(tmp32
, t0
);
252 tcg_gen_extu_i32_i64(tmp
, tmp32
);
253 tcg_gen_qemu_st32(tmp
, t1
, flags
);
255 tcg_temp_free_i32(tmp32
);
258 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
260 TCGv tmp
= tcg_temp_new();
261 gen_helper_g_to_memory(tmp
, t0
);
262 tcg_gen_qemu_st64(tmp
, t1
, flags
);
266 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
268 TCGv_i32 tmp32
= tcg_temp_new_i32();
269 TCGv tmp
= tcg_temp_new();
270 gen_helper_s_to_memory(tmp32
, t0
);
271 tcg_gen_extu_i32_i64(tmp
, tmp32
);
272 tcg_gen_qemu_st32(tmp
, t1
, flags
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_store_mem(DisasContext
*ctx
,
278 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
280 int ra
, int rb
, int32_t disp16
, int fp
,
285 addr
= tcg_temp_new();
287 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
289 tcg_gen_andi_i64(addr
, addr
, ~0x7);
295 tcg_gen_movi_i64(addr
, disp16
);
299 va
= tcg_const_i64(0);
301 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
303 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
311 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
312 int32_t disp16
, int quad
)
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
322 #if defined(CONFIG_USER_ONLY)
323 addr
= cpu_lock_st_addr
;
325 addr
= tcg_temp_local_new();
329 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
331 tcg_gen_movi_i64(addr
, disp16
);
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
343 int lab_fail
, lab_done
;
346 lab_fail
= gen_new_label();
347 lab_done
= gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
350 val
= tcg_temp_new();
352 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
354 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
356 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
359 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
361 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
363 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
364 tcg_gen_br(lab_done
);
366 gen_set_label(lab_fail
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
369 gen_set_label(lab_done
);
370 tcg_gen_movi_i64(cpu_lock_addr
, -1);
378 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
380 /* Check for the dest on the same page as the start of the TB. We
381 also want to suppress goto_tb in the case of single-steping and IO. */
382 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
383 && !ctx
->env
->singlestep_enabled
384 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
387 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
389 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
392 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
395 /* Notice branch-to-next; used to initialize RA with the PC. */
398 } else if (use_goto_tb(ctx
, dest
)) {
400 tcg_gen_movi_i64(cpu_pc
, dest
);
401 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
404 tcg_gen_movi_i64(cpu_pc
, dest
);
405 return EXIT_PC_UPDATED
;
409 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
410 TCGv cmp
, int32_t disp
)
412 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
413 int lab_true
= gen_new_label();
415 if (use_goto_tb(ctx
, dest
)) {
416 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
419 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
420 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
422 gen_set_label(lab_true
);
424 tcg_gen_movi_i64(cpu_pc
, dest
);
425 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
429 int lab_over
= gen_new_label();
431 /* ??? Consider using either
434 movcond pc, cond, 0, tmp, pc
441 The current diamond subgraph surely isn't efficient. */
443 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
444 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
445 tcg_gen_br(lab_over
);
446 gen_set_label(lab_true
);
447 tcg_gen_movi_i64(cpu_pc
, dest
);
448 gen_set_label(lab_over
);
450 return EXIT_PC_UPDATED
;
454 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
455 int32_t disp
, int mask
)
459 if (unlikely(ra
== 31)) {
460 cmp_tmp
= tcg_const_i64(0);
462 cmp_tmp
= tcg_temp_new();
464 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
466 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
470 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
473 /* Fold -0.0 for comparison with COND. */
475 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
477 uint64_t mzero
= 1ull << 63;
482 /* For <= or >, the -0.0 value directly compares the way we want. */
483 tcg_gen_mov_i64(dest
, src
);
488 /* For == or !=, we can simply mask off the sign bit and compare. */
489 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
494 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
495 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
496 tcg_gen_neg_i64(dest
, dest
);
497 tcg_gen_and_i64(dest
, dest
, src
);
505 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
510 if (unlikely(ra
== 31)) {
511 /* Very uncommon case, but easier to optimize it to an integer
512 comparison than continuing with the floating point comparison. */
513 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
516 cmp_tmp
= tcg_temp_new();
517 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
518 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
521 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
522 int islit
, uint8_t lit
, int mask
)
524 TCGCond inv_cond
= tcg_invert_cond(cond
);
527 if (unlikely(rc
== 31))
530 l1
= gen_new_label();
534 TCGv tmp
= tcg_temp_new();
535 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
536 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
539 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
541 /* Very uncommon case - Do not bother to optimize. */
542 TCGv tmp
= tcg_const_i64(0);
543 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
548 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
550 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
554 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
559 if (unlikely(rc
== 31)) {
563 cmp_tmp
= tcg_temp_new();
564 if (unlikely(ra
== 31)) {
565 tcg_gen_movi_i64(cmp_tmp
, 0);
567 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
570 l1
= gen_new_label();
571 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
572 tcg_temp_free(cmp_tmp
);
575 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
577 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
581 #define QUAL_RM_N 0x080 /* Round mode nearest even */
582 #define QUAL_RM_C 0x000 /* Round mode chopped */
583 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
584 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
585 #define QUAL_RM_MASK 0x0c0
587 #define QUAL_U 0x100 /* Underflow enable (fp output) */
588 #define QUAL_V 0x100 /* Overflow enable (int output) */
589 #define QUAL_S 0x400 /* Software completion enable */
590 #define QUAL_I 0x200 /* Inexact detection enable */
592 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
596 fn11
&= QUAL_RM_MASK
;
597 if (fn11
== ctx
->tb_rm
) {
602 tmp
= tcg_temp_new_i32();
605 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
608 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
611 tcg_gen_movi_i32(tmp
, float_round_down
);
614 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
618 #if defined(CONFIG_SOFTFLOAT_INLINE)
619 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
620 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
621 sets the one field. */
622 tcg_gen_st8_i32(tmp
, cpu_env
,
623 offsetof(CPUState
, fp_status
.float_rounding_mode
));
625 gen_helper_setroundmode(tmp
);
628 tcg_temp_free_i32(tmp
);
631 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
636 if (fn11
== ctx
->tb_ftz
) {
641 tmp
= tcg_temp_new_i32();
643 /* Underflow is enabled, use the FPCR setting. */
644 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
646 /* Underflow is disabled, force flush-to-zero. */
647 tcg_gen_movi_i32(tmp
, 1);
650 #if defined(CONFIG_SOFTFLOAT_INLINE)
651 tcg_gen_st8_i32(tmp
, cpu_env
,
652 offsetof(CPUState
, fp_status
.flush_to_zero
));
654 gen_helper_setflushzero(tmp
);
657 tcg_temp_free_i32(tmp
);
660 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
662 TCGv val
= tcg_temp_new();
664 tcg_gen_movi_i64(val
, 0);
665 } else if (fn11
& QUAL_S
) {
666 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
668 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
670 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
675 static void gen_fp_exc_clear(void)
677 #if defined(CONFIG_SOFTFLOAT_INLINE)
678 TCGv_i32 zero
= tcg_const_i32(0);
679 tcg_gen_st8_i32(zero
, cpu_env
,
680 offsetof(CPUState
, fp_status
.float_exception_flags
));
681 tcg_temp_free_i32(zero
);
683 gen_helper_fp_exc_clear();
687 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
689 /* ??? We ought to be able to do something with imprecise exceptions.
690 E.g. notice we're still in the trap shadow of something within the
691 TB and do not generate the code to signal the exception; end the TB
692 when an exception is forced to arrive, either by consumption of a
693 register value or TRAPB or EXCB. */
694 TCGv_i32 exc
= tcg_temp_new_i32();
697 #if defined(CONFIG_SOFTFLOAT_INLINE)
698 tcg_gen_ld8u_i32(exc
, cpu_env
,
699 offsetof(CPUState
, fp_status
.float_exception_flags
));
701 gen_helper_fp_exc_get(exc
);
705 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
708 /* ??? Pass in the regno of the destination so that the helper can
709 set EXC_MASK, which contains a bitmask of destination registers
710 that have caused arithmetic traps. A simple userspace emulation
711 does not require this. We do need it for a guest kernel's entArith,
712 or if we were to do something clever with imprecise exceptions. */
713 reg
= tcg_const_i32(rc
+ 32);
716 gen_helper_fp_exc_raise_s(exc
, reg
);
718 gen_helper_fp_exc_raise(exc
, reg
);
721 tcg_temp_free_i32(reg
);
722 tcg_temp_free_i32(exc
);
725 static inline void gen_fp_exc_raise(int rc
, int fn11
)
727 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
730 static void gen_fcvtlq(int rb
, int rc
)
732 if (unlikely(rc
== 31)) {
735 if (unlikely(rb
== 31)) {
736 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
738 TCGv tmp
= tcg_temp_new();
740 /* The arithmetic right shift here, plus the sign-extended mask below
741 yields a sign-extended result without an explicit ext32s_i64. */
742 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
743 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
744 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
745 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
746 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
752 static void gen_fcvtql(int rb
, int rc
)
754 if (unlikely(rc
== 31)) {
757 if (unlikely(rb
== 31)) {
758 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
760 TCGv tmp
= tcg_temp_new();
762 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
763 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
764 tcg_gen_shli_i64(tmp
, tmp
, 32);
765 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
766 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
772 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
775 int lab
= gen_new_label();
776 TCGv tmp
= tcg_temp_new();
778 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
779 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
780 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
787 #define FARITH2(name) \
788 static inline void glue(gen_f, name)(int rb, int rc) \
790 if (unlikely(rc == 31)) { \
794 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
796 TCGv tmp = tcg_const_i64(0); \
797 gen_helper_ ## name (cpu_fir[rc], tmp); \
798 tcg_temp_free(tmp); \
802 /* ??? VAX instruction qualifiers ignored. */
810 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
811 int rb
, int rc
, int fn11
)
815 /* ??? This is wrong: the instruction is not a nop, it still may
817 if (unlikely(rc
== 31)) {
821 gen_qual_roundmode(ctx
, fn11
);
822 gen_qual_flushzero(ctx
, fn11
);
825 vb
= gen_ieee_input(rb
, fn11
, 0);
826 helper(cpu_fir
[rc
], vb
);
829 gen_fp_exc_raise(rc
, fn11
);
832 #define IEEE_ARITH2(name) \
833 static inline void glue(gen_f, name)(DisasContext *ctx, \
834 int rb, int rc, int fn11) \
836 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
843 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
848 /* ??? This is wrong: the instruction is not a nop, it still may
850 if (unlikely(rc
== 31)) {
854 /* No need to set flushzero, since we have an integer output. */
856 vb
= gen_ieee_input(rb
, fn11
, 0);
858 /* Almost all integer conversions use cropped rounding, and most
859 also do not have integer overflow enabled. Special case that. */
862 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
864 case QUAL_V
| QUAL_RM_C
:
865 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
866 ignore
= float_flag_inexact
;
868 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
869 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
872 gen_qual_roundmode(ctx
, fn11
);
873 gen_helper_cvttq(cpu_fir
[rc
], vb
);
874 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
875 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
880 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
883 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
884 int rb
, int rc
, int fn11
)
888 /* ??? This is wrong: the instruction is not a nop, it still may
890 if (unlikely(rc
== 31)) {
894 gen_qual_roundmode(ctx
, fn11
);
897 vb
= tcg_const_i64(0);
902 /* The only exception that can be raised by integer conversion
903 is inexact. Thus we only need to worry about exceptions when
904 inexact handling is requested. */
907 helper(cpu_fir
[rc
], vb
);
908 gen_fp_exc_raise(rc
, fn11
);
910 helper(cpu_fir
[rc
], vb
);
918 #define IEEE_INTCVT(name) \
919 static inline void glue(gen_f, name)(DisasContext *ctx, \
920 int rb, int rc, int fn11) \
922 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
927 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
932 if (unlikely(rc
== 31)) {
936 vmask
= tcg_const_i64(mask
);
946 va
= tcg_temp_new_i64();
947 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
949 tcg_gen_andc_i64(va
, vmask
, va
);
951 tcg_gen_and_i64(va
, va
, vmask
);
959 vb
= tcg_temp_new_i64();
960 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
963 switch (za
<< 1 | zb
) {
965 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
968 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
971 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
974 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
978 tcg_temp_free(vmask
);
987 static inline void gen_fcpys(int ra
, int rb
, int rc
)
989 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
992 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
994 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
997 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
999 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1002 #define FARITH3(name) \
1003 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1007 if (unlikely(rc == 31)) { \
1011 va = tcg_const_i64(0); \
1016 vb = tcg_const_i64(0); \
1021 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1024 tcg_temp_free(va); \
1027 tcg_temp_free(vb); \
1031 /* ??? VAX instruction qualifiers ignored. */
1044 static void gen_ieee_arith3(DisasContext
*ctx
,
1045 void (*helper
)(TCGv
, TCGv
, TCGv
),
1046 int ra
, int rb
, int rc
, int fn11
)
1050 /* ??? This is wrong: the instruction is not a nop, it still may
1051 raise exceptions. */
1052 if (unlikely(rc
== 31)) {
1056 gen_qual_roundmode(ctx
, fn11
);
1057 gen_qual_flushzero(ctx
, fn11
);
1060 va
= gen_ieee_input(ra
, fn11
, 0);
1061 vb
= gen_ieee_input(rb
, fn11
, 0);
1062 helper(cpu_fir
[rc
], va
, vb
);
1066 gen_fp_exc_raise(rc
, fn11
);
1069 #define IEEE_ARITH3(name) \
1070 static inline void glue(gen_f, name)(DisasContext *ctx, \
1071 int ra, int rb, int rc, int fn11) \
1073 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1084 static void gen_ieee_compare(DisasContext
*ctx
,
1085 void (*helper
)(TCGv
, TCGv
, TCGv
),
1086 int ra
, int rb
, int rc
, int fn11
)
1090 /* ??? This is wrong: the instruction is not a nop, it still may
1091 raise exceptions. */
1092 if (unlikely(rc
== 31)) {
1098 va
= gen_ieee_input(ra
, fn11
, 1);
1099 vb
= gen_ieee_input(rb
, fn11
, 1);
1100 helper(cpu_fir
[rc
], va
, vb
);
1104 gen_fp_exc_raise(rc
, fn11
);
1107 #define IEEE_CMP3(name) \
1108 static inline void glue(gen_f, name)(DisasContext *ctx, \
1109 int ra, int rb, int rc, int fn11) \
1111 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1118 static inline uint64_t zapnot_mask(uint8_t lit
)
1123 for (i
= 0; i
< 8; ++i
) {
1125 mask
|= 0xffull
<< (i
* 8);
1130 /* Implement zapnot with an immediate operand, which expands to some
1131 form of immediate AND. This is a basic building block in the
1132 definition of many of the other byte manipulation instructions. */
1133 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1137 tcg_gen_movi_i64(dest
, 0);
1140 tcg_gen_ext8u_i64(dest
, src
);
1143 tcg_gen_ext16u_i64(dest
, src
);
1146 tcg_gen_ext32u_i64(dest
, src
);
1149 tcg_gen_mov_i64(dest
, src
);
1152 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1157 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1159 if (unlikely(rc
== 31))
1161 else if (unlikely(ra
== 31))
1162 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1164 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1166 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1169 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1171 if (unlikely(rc
== 31))
1173 else if (unlikely(ra
== 31))
1174 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1176 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1178 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1182 /* EXTWH, EXTLH, EXTQH */
1183 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1184 uint8_t lit
, uint8_t byte_mask
)
1186 if (unlikely(rc
== 31))
1188 else if (unlikely(ra
== 31))
1189 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1192 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1193 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1195 TCGv tmp1
= tcg_temp_new();
1196 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1197 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1198 tcg_gen_neg_i64(tmp1
, tmp1
);
1199 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1200 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1201 tcg_temp_free(tmp1
);
1203 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1207 /* EXTBL, EXTWL, EXTLL, EXTQL */
1208 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1209 uint8_t lit
, uint8_t byte_mask
)
1211 if (unlikely(rc
== 31))
1213 else if (unlikely(ra
== 31))
1214 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1217 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1219 TCGv tmp
= tcg_temp_new();
1220 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1221 tcg_gen_shli_i64(tmp
, tmp
, 3);
1222 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1225 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1229 /* INSWH, INSLH, INSQH */
1230 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1231 uint8_t lit
, uint8_t byte_mask
)
1233 if (unlikely(rc
== 31))
1235 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1236 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1238 TCGv tmp
= tcg_temp_new();
1240 /* The instruction description has us left-shift the byte mask
1241 and extract bits <15:8> and apply that zap at the end. This
1242 is equivalent to simply performing the zap first and shifting
1244 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1247 /* Note that we have handled the lit==0 case above. */
1248 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1250 TCGv shift
= tcg_temp_new();
1252 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1253 Do this portably by splitting the shift into two parts:
1254 shift_count-1 and 1. Arrange for the -1 by using
1255 ones-complement instead of twos-complement in the negation:
1256 ~((B & 7) * 8) & 63. */
1258 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1259 tcg_gen_shli_i64(shift
, shift
, 3);
1260 tcg_gen_not_i64(shift
, shift
);
1261 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1263 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1264 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1265 tcg_temp_free(shift
);
1271 /* INSBL, INSWL, INSLL, INSQL */
1272 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1273 uint8_t lit
, uint8_t byte_mask
)
1275 if (unlikely(rc
== 31))
1277 else if (unlikely(ra
== 31))
1278 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1280 TCGv tmp
= tcg_temp_new();
1282 /* The instruction description has us left-shift the byte mask
1283 the same number of byte slots as the data and apply the zap
1284 at the end. This is equivalent to simply performing the zap
1285 first and shifting afterward. */
1286 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1289 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1291 TCGv shift
= tcg_temp_new();
1292 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1293 tcg_gen_shli_i64(shift
, shift
, 3);
1294 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1295 tcg_temp_free(shift
);
1301 /* MSKWH, MSKLH, MSKQH */
1302 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1303 uint8_t lit
, uint8_t byte_mask
)
1305 if (unlikely(rc
== 31))
1307 else if (unlikely(ra
== 31))
1308 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1310 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1312 TCGv shift
= tcg_temp_new();
1313 TCGv mask
= tcg_temp_new();
1315 /* The instruction description is as above, where the byte_mask
1316 is shifted left, and then we extract bits <15:8>. This can be
1317 emulated with a right-shift on the expanded byte mask. This
1318 requires extra care because for an input <2:0> == 0 we need a
1319 shift of 64 bits in order to generate a zero. This is done by
1320 splitting the shift into two parts, the variable shift - 1
1321 followed by a constant 1 shift. The code we expand below is
1322 equivalent to ~((B & 7) * 8) & 63. */
1324 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1325 tcg_gen_shli_i64(shift
, shift
, 3);
1326 tcg_gen_not_i64(shift
, shift
);
1327 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1328 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1329 tcg_gen_shr_i64(mask
, mask
, shift
);
1330 tcg_gen_shri_i64(mask
, mask
, 1);
1332 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1334 tcg_temp_free(mask
);
1335 tcg_temp_free(shift
);
1339 /* MSKBL, MSKWL, MSKLL, MSKQL */
1340 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1341 uint8_t lit
, uint8_t byte_mask
)
1343 if (unlikely(rc
== 31))
1345 else if (unlikely(ra
== 31))
1346 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1348 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1350 TCGv shift
= tcg_temp_new();
1351 TCGv mask
= tcg_temp_new();
1353 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1354 tcg_gen_shli_i64(shift
, shift
, 3);
1355 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1356 tcg_gen_shl_i64(mask
, mask
, shift
);
1358 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1360 tcg_temp_free(mask
);
1361 tcg_temp_free(shift
);
1365 /* Code to call arith3 helpers */
1366 #define ARITH3(name) \
1367 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1370 if (unlikely(rc == 31)) \
1375 TCGv tmp = tcg_const_i64(lit); \
1376 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1377 tcg_temp_free(tmp); \
1379 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1381 TCGv tmp1 = tcg_const_i64(0); \
1383 TCGv tmp2 = tcg_const_i64(lit); \
1384 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1385 tcg_temp_free(tmp2); \
1387 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1388 tcg_temp_free(tmp1); \
1409 #define MVIOP2(name) \
1410 static inline void glue(gen_, name)(int rb, int rc) \
1412 if (unlikely(rc == 31)) \
1414 if (unlikely(rb == 31)) \
1415 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1417 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1424 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1425 int islit
, uint8_t lit
)
1429 if (unlikely(rc
== 31)) {
1434 va
= tcg_const_i64(0);
1439 vb
= tcg_const_i64(lit
);
1444 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1454 static void gen_rx(int ra
, int set
)
1459 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1462 tmp
= tcg_const_i32(set
);
1463 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1464 tcg_temp_free_i32(tmp
);
1467 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1469 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1470 to internal cpu registers. */
1472 /* Unprivileged PAL call */
1473 if (palcode
>= 0x80 && palcode
< 0xC0) {
1477 /* No-op inside QEMU. */
1481 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1485 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1488 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1493 #ifndef CONFIG_USER_ONLY
1494 /* Privileged PAL code */
1495 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1499 /* No-op inside QEMU. */
1503 /* No-op inside QEMU. */
1507 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUState
, vptptr
));
1511 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1515 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1522 /* Note that we already know we're in kernel mode, so we know
1523 that PS only contains the 3 IPL bits. */
1524 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1526 /* But make sure and store only the 3 IPL bits from the user. */
1527 tmp
= tcg_temp_new();
1528 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1529 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUState
, ps
));
1536 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1540 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1544 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1548 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1549 offsetof(CPUState
, cpu_index
));
1553 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1559 return gen_invalid(ctx
);
1562 #ifndef CONFIG_USER_ONLY
1564 #define PR_BYTE 0x100000
1565 #define PR_LONG 0x200000
1567 static int cpu_pr_data(int pr
)
1570 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1571 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1572 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1573 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1574 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1575 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1576 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1577 case 7: return offsetof(CPUAlphaState
, palbr
);
1578 case 8: return offsetof(CPUAlphaState
, ptbr
);
1579 case 9: return offsetof(CPUAlphaState
, vptptr
);
1580 case 10: return offsetof(CPUAlphaState
, unique
);
1581 case 11: return offsetof(CPUAlphaState
, sysval
);
1582 case 12: return offsetof(CPUAlphaState
, usp
);
1585 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1587 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1590 return offsetof(CPUAlphaState
, alarm_expire
);
1595 static ExitStatus
gen_mfpr(int ra
, int regno
)
1597 int data
= cpu_pr_data(regno
);
1599 /* In our emulated PALcode, these processor registers have no
1600 side effects from reading. */
1609 gen_helper_get_time(cpu_ir
[ra
]);
1611 return EXIT_PC_STALE
;
1613 gen_helper_get_time(cpu_ir
[ra
]);
1618 /* The basic registers are data only, and unknown registers
1619 are read-zero, write-ignore. */
1621 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1622 } else if (data
& PR_BYTE
) {
1623 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1624 } else if (data
& PR_LONG
) {
1625 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1627 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1632 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1638 tmp
= tcg_const_i64(0);
1651 gen_helper_tbis(tmp
);
1656 tmp
= tcg_const_i64(1);
1657 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUState
, halted
));
1658 return gen_excp(ctx
, EXCP_HLT
, 0);
1662 gen_helper_halt(tmp
);
1663 return EXIT_PC_STALE
;
1667 gen_helper_set_alarm(tmp
);
1671 /* The basic registers are data only, and unknown registers
1672 are read-zero, write-ignore. */
1673 data
= cpu_pr_data(regno
);
1675 if (data
& PR_BYTE
) {
1676 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1677 } else if (data
& PR_LONG
) {
1678 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1680 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1692 #endif /* !USER_ONLY*/
1694 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1697 int32_t disp21
, disp16
;
1698 #ifndef CONFIG_USER_ONLY
1702 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1706 /* Decode all instruction fields */
1708 ra
= (insn
>> 21) & 0x1F;
1709 rb
= (insn
>> 16) & 0x1F;
1711 real_islit
= islit
= (insn
>> 12) & 1;
1712 if (rb
== 31 && !islit
) {
1716 lit
= (insn
>> 13) & 0xFF;
1717 palcode
= insn
& 0x03FFFFFF;
1718 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1719 disp16
= (int16_t)(insn
& 0x0000FFFF);
1720 #ifndef CONFIG_USER_ONLY
1721 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1723 fn11
= (insn
>> 5) & 0x000007FF;
1725 fn7
= (insn
>> 5) & 0x0000007F;
1726 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1727 opc
, ra
, rb
, rc
, disp16
);
1733 ret
= gen_call_pal(ctx
, palcode
);
1758 if (likely(ra
!= 31)) {
1760 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1762 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1767 if (likely(ra
!= 31)) {
1769 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1771 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1776 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1777 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1783 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1787 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1788 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1794 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1798 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1802 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1808 if (likely(rc
!= 31)) {
1811 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1812 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1814 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1815 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1819 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1821 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1827 if (likely(rc
!= 31)) {
1829 TCGv tmp
= tcg_temp_new();
1830 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1832 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1834 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1835 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1839 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1841 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1847 if (likely(rc
!= 31)) {
1850 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1852 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1853 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1856 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1858 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1859 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1865 if (likely(rc
!= 31)) {
1867 TCGv tmp
= tcg_temp_new();
1868 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1870 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1872 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1873 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1877 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1879 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1880 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1887 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1891 if (likely(rc
!= 31)) {
1893 TCGv tmp
= tcg_temp_new();
1894 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1896 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1898 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1899 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1903 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1905 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1911 if (likely(rc
!= 31)) {
1913 TCGv tmp
= tcg_temp_new();
1914 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1916 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1918 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1919 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1923 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1925 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1926 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1933 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1937 if (likely(rc
!= 31)) {
1940 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1942 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1945 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1947 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1953 if (likely(rc
!= 31)) {
1955 TCGv tmp
= tcg_temp_new();
1956 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1958 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1960 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1964 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1966 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1972 if (likely(rc
!= 31)) {
1975 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1977 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1980 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1982 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1988 if (likely(rc
!= 31)) {
1990 TCGv tmp
= tcg_temp_new();
1991 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1993 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1995 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1999 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2001 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2007 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2011 if (likely(rc
!= 31)) {
2013 TCGv tmp
= tcg_temp_new();
2014 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2016 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2018 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2022 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2024 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2030 if (likely(rc
!= 31)) {
2032 TCGv tmp
= tcg_temp_new();
2033 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2035 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2037 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2041 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2043 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2049 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2053 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2057 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2061 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2065 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2069 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2073 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2083 if (likely(rc
!= 31)) {
2085 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2087 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2089 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2094 if (likely(rc
!= 31)) {
2097 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2099 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2101 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2106 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2110 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2114 if (likely(rc
!= 31)) {
2117 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2119 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2122 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2124 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2130 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2134 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2138 if (likely(rc
!= 31)) {
2141 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2143 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2146 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2148 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2154 if (likely(rc
!= 31)) {
2157 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2159 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2162 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2164 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2170 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2174 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2178 if (likely(rc
!= 31)) {
2181 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2183 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2186 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2188 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2194 if (likely(rc
!= 31)) {
2195 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2198 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2200 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2206 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2210 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2215 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2225 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2229 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2233 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2237 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2241 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2245 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2249 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2253 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2257 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2261 gen_zap(ra
, rb
, rc
, islit
, lit
);
2265 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2269 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2273 if (likely(rc
!= 31)) {
2276 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2278 TCGv shift
= tcg_temp_new();
2279 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2280 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2281 tcg_temp_free(shift
);
2284 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2289 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2293 if (likely(rc
!= 31)) {
2296 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2298 TCGv shift
= tcg_temp_new();
2299 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2300 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2301 tcg_temp_free(shift
);
2304 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2309 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2313 if (likely(rc
!= 31)) {
2316 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2318 TCGv shift
= tcg_temp_new();
2319 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2320 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2321 tcg_temp_free(shift
);
2324 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2329 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2333 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2337 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2341 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2345 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2349 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2353 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2357 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2361 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2371 if (likely(rc
!= 31)) {
2373 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2376 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2378 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2379 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2385 if (likely(rc
!= 31)) {
2387 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2389 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2391 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2396 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2400 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2404 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2411 switch (fpfn
) { /* fn11 & 0x3F */
2414 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2417 if (likely(rc
!= 31)) {
2419 TCGv_i32 tmp
= tcg_temp_new_i32();
2420 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2421 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2422 tcg_temp_free_i32(tmp
);
2424 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2429 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2436 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2437 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2443 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2446 if (likely(rc
!= 31)) {
2448 TCGv_i32 tmp
= tcg_temp_new_i32();
2449 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2450 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2451 tcg_temp_free_i32(tmp
);
2453 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2458 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2461 if (likely(rc
!= 31)) {
2463 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2465 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2470 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2477 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2478 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2487 /* VAX floating point */
2488 /* XXX: rounding mode and trap are ignored (!) */
2489 switch (fpfn
) { /* fn11 & 0x3F */
2492 gen_faddf(ra
, rb
, rc
);
2496 gen_fsubf(ra
, rb
, rc
);
2500 gen_fmulf(ra
, rb
, rc
);
2504 gen_fdivf(ra
, rb
, rc
);
2516 gen_faddg(ra
, rb
, rc
);
2520 gen_fsubg(ra
, rb
, rc
);
2524 gen_fmulg(ra
, rb
, rc
);
2528 gen_fdivg(ra
, rb
, rc
);
2532 gen_fcmpgeq(ra
, rb
, rc
);
2536 gen_fcmpglt(ra
, rb
, rc
);
2540 gen_fcmpgle(ra
, rb
, rc
);
2571 /* IEEE floating-point */
2572 switch (fpfn
) { /* fn11 & 0x3F */
2575 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2579 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2583 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2587 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2591 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2595 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2599 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2603 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2607 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2611 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2615 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2619 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2622 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2624 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2627 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2632 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2636 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2640 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2653 if (likely(rc
!= 31)) {
2657 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2659 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2662 gen_fcpys(ra
, rb
, rc
);
2668 gen_fcpysn(ra
, rb
, rc
);
2672 gen_fcpyse(ra
, rb
, rc
);
2676 if (likely(ra
!= 31))
2677 gen_helper_store_fpcr(cpu_fir
[ra
]);
2679 TCGv tmp
= tcg_const_i64(0);
2680 gen_helper_store_fpcr(tmp
);
2686 if (likely(ra
!= 31))
2687 gen_helper_load_fpcr(cpu_fir
[ra
]);
2691 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2695 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2699 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2703 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2707 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2711 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2721 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2722 /v doesn't do. The only thing I can think is that /sv is a
2723 valid instruction merely for completeness in the ISA. */
2724 gen_fcvtql_v(ctx
, rb
, rc
);
2731 switch ((uint16_t)disp16
) {
2761 gen_helper_load_pcc(cpu_ir
[ra
]);
2763 ret
= EXIT_PC_STALE
;
2765 gen_helper_load_pcc(cpu_ir
[ra
]);
2789 /* HW_MFPR (PALcode) */
2790 #ifndef CONFIG_USER_ONLY
2791 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2792 return gen_mfpr(ra
, insn
& 0xffff);
2797 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2798 prediction stack action, which of course we don't implement. */
2800 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2802 tcg_gen_movi_i64(cpu_pc
, 0);
2805 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2807 ret
= EXIT_PC_UPDATED
;
2810 /* HW_LD (PALcode) */
2811 #ifndef CONFIG_USER_ONLY
2812 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2819 addr
= tcg_temp_new();
2821 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2823 tcg_gen_movi_i64(addr
, disp12
);
2824 switch ((insn
>> 12) & 0xF) {
2826 /* Longword physical access (hw_ldl/p) */
2827 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2830 /* Quadword physical access (hw_ldq/p) */
2831 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2834 /* Longword physical access with lock (hw_ldl_l/p) */
2835 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2838 /* Quadword physical access with lock (hw_ldq_l/p) */
2839 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2842 /* Longword virtual PTE fetch (hw_ldl/v) */
2845 /* Quadword virtual PTE fetch (hw_ldq/v) */
2849 /* Incpu_ir[ra]id */
2852 /* Incpu_ir[ra]id */
2855 /* Longword virtual access (hw_ldl) */
2858 /* Quadword virtual access (hw_ldq) */
2861 /* Longword virtual access with protection check (hw_ldl/w) */
2862 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2865 /* Quadword virtual access with protection check (hw_ldq/w) */
2866 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2869 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2872 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2875 /* Longword virtual access with alternate access mode and
2876 protection checks (hw_ldl/wa) */
2877 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2880 /* Quadword virtual access with alternate access mode and
2881 protection checks (hw_ldq/wa) */
2882 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2885 tcg_temp_free(addr
);
2894 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2897 if (likely(rc
!= 31)) {
2899 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2901 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2906 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2907 if (likely(rc
!= 31)) {
2909 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2911 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2919 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2920 if (likely(rc
!= 31)) {
2922 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2924 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2932 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2933 gen_perr(ra
, rb
, rc
, islit
, lit
);
2939 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2940 if (likely(rc
!= 31)) {
2942 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2944 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2952 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2953 if (likely(rc
!= 31)) {
2955 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2957 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2965 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2966 if (real_islit
|| ra
!= 31) {
2975 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2976 if (real_islit
|| ra
!= 31) {
2985 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2986 if (real_islit
|| ra
!= 31) {
2995 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2996 if (real_islit
|| ra
!= 31) {
3005 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3006 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3012 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3013 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3019 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3020 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3026 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3027 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3033 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3034 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3040 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3041 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3047 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3048 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3054 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3055 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3061 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3064 if (likely(rc
!= 31)) {
3066 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3068 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3073 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3077 TCGv_i32 tmp1
= tcg_temp_new_i32();
3079 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3081 TCGv tmp2
= tcg_const_i64(0);
3082 gen_helper_s_to_memory(tmp1
, tmp2
);
3083 tcg_temp_free(tmp2
);
3085 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3086 tcg_temp_free_i32(tmp1
);
3094 /* HW_MTPR (PALcode) */
3095 #ifndef CONFIG_USER_ONLY
3096 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3097 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3102 /* HW_RET (PALcode) */
3103 #ifndef CONFIG_USER_ONLY
3104 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3106 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3107 address from EXC_ADDR. This turns out to be useful for our
3108 emulation PALcode, so continue to accept it. */
3109 TCGv tmp
= tcg_temp_new();
3110 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUState
, exc_addr
));
3111 gen_helper_hw_ret(tmp
);
3114 gen_helper_hw_ret(cpu_ir
[rb
]);
3116 ret
= EXIT_PC_UPDATED
;
3122 /* HW_ST (PALcode) */
3123 #ifndef CONFIG_USER_ONLY
3124 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3126 addr
= tcg_temp_new();
3128 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3130 tcg_gen_movi_i64(addr
, disp12
);
3134 val
= tcg_temp_new();
3135 tcg_gen_movi_i64(val
, 0);
3137 switch ((insn
>> 12) & 0xF) {
3139 /* Longword physical access */
3140 gen_helper_stl_phys(addr
, val
);
3143 /* Quadword physical access */
3144 gen_helper_stq_phys(addr
, val
);
3147 /* Longword physical access with lock */
3148 gen_helper_stl_c_phys(val
, addr
, val
);
3151 /* Quadword physical access with lock */
3152 gen_helper_stq_c_phys(val
, addr
, val
);
3155 /* Longword virtual access */
3158 /* Quadword virtual access */
3179 /* Longword virtual access with alternate access mode */
3182 /* Quadword virtual access with alternate access mode */
3193 tcg_temp_free(addr
);
3200 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3204 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3208 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3212 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3216 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3220 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3224 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3228 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3232 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3236 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3240 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3244 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3248 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3252 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3256 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3260 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3264 ret
= gen_bdirect(ctx
, ra
, disp21
);
3266 case 0x31: /* FBEQ */
3267 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3269 case 0x32: /* FBLT */
3270 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3272 case 0x33: /* FBLE */
3273 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3277 ret
= gen_bdirect(ctx
, ra
, disp21
);
3279 case 0x35: /* FBNE */
3280 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3282 case 0x36: /* FBGE */
3283 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3285 case 0x37: /* FBGT */
3286 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3290 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3294 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3298 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3302 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3306 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3310 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3314 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3318 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3321 ret
= gen_invalid(ctx
);
3328 static inline void gen_intermediate_code_internal(CPUState
*env
,
3329 TranslationBlock
*tb
,
3332 DisasContext ctx
, *ctxp
= &ctx
;
3333 target_ulong pc_start
;
3335 uint16_t *gen_opc_end
;
3343 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3348 ctx
.mem_idx
= cpu_mmu_index(env
);
3350 /* ??? Every TB begins with unset rounding mode, to be initialized on
3351 the first fp insn of the TB. Alternately we could define a proper
3352 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3353 to reset the FP_STATUS to that default at the end of any TB that
3354 changes the default. We could even (gasp) dynamiclly figure out
3355 what default would be most efficient given the running program. */
3357 /* Similarly for flush-to-zero. */
3361 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3363 max_insns
= CF_COUNT_MASK
;
3367 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3368 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3369 if (bp
->pc
== ctx
.pc
) {
3370 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3376 j
= gen_opc_ptr
- gen_opc_buf
;
3380 gen_opc_instr_start
[lj
++] = 0;
3382 gen_opc_pc
[lj
] = ctx
.pc
;
3383 gen_opc_instr_start
[lj
] = 1;
3384 gen_opc_icount
[lj
] = num_insns
;
3386 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3388 insn
= ldl_code(ctx
.pc
);
3391 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3392 tcg_gen_debug_insn_start(ctx
.pc
);
3396 ret
= translate_one(ctxp
, insn
);
3398 /* If we reach a page boundary, are single stepping,
3399 or exhaust instruction count, stop generation. */
3401 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3402 || gen_opc_ptr
>= gen_opc_end
3403 || num_insns
>= max_insns
3405 || env
->singlestep_enabled
)) {
3406 ret
= EXIT_PC_STALE
;
3408 } while (ret
== NO_EXIT
);
3410 if (tb
->cflags
& CF_LAST_IO
) {
3419 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3421 case EXIT_PC_UPDATED
:
3422 if (env
->singlestep_enabled
) {
3423 gen_excp_1(EXCP_DEBUG
, 0);
3432 gen_icount_end(tb
, num_insns
);
3433 *gen_opc_ptr
= INDEX_op_end
;
3435 j
= gen_opc_ptr
- gen_opc_buf
;
3438 gen_opc_instr_start
[lj
++] = 0;
3440 tb
->size
= ctx
.pc
- pc_start
;
3441 tb
->icount
= num_insns
;
3445 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3446 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3447 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3453 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3455 gen_intermediate_code_internal(env
, tb
, 0);
3458 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3460 gen_intermediate_code_internal(env
, tb
, 1);
3468 static const struct cpu_def_t cpu_defs
[] = {
3469 { "ev4", IMPLVER_2106x
, 0 },
3470 { "ev5", IMPLVER_21164
, 0 },
3471 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3472 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3473 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3474 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3475 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3476 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3477 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3478 { "21064", IMPLVER_2106x
, 0 },
3479 { "21164", IMPLVER_21164
, 0 },
3480 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3481 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3482 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3483 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3484 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3487 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3490 int implver
, amask
, i
, max
;
3492 env
= g_malloc0(sizeof(CPUAlphaState
));
3494 alpha_translate_init();
3497 /* Default to ev67; no reason not to emulate insns by default. */
3498 implver
= IMPLVER_21264
;
3499 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3500 | AMASK_TRAP
| AMASK_PREFETCH
);
3502 max
= ARRAY_SIZE(cpu_defs
);
3503 for (i
= 0; i
< max
; i
++) {
3504 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3505 implver
= cpu_defs
[i
].implver
;
3506 amask
= cpu_defs
[i
].amask
;
3510 env
->implver
= implver
;
3513 #if defined (CONFIG_USER_ONLY)
3514 env
->ps
= PS_USER_MODE
;
3515 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3516 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3518 env
->lock_addr
= -1;
3521 qemu_init_vcpu(env
);
3525 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3527 env
->pc
= gen_opc_pc
[pc_pos
];