2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
26 #include "host-utils.h"
28 #include "qemu-common.h"
34 #undef ALPHA_DEBUG_DISAS
35 #define CONFIG_SOFTFLOAT_INLINE
37 #ifdef ALPHA_DEBUG_DISAS
38 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
40 # define LOG_DISAS(...) do { } while (0)
43 typedef struct DisasContext DisasContext
;
45 struct TranslationBlock
*tb
;
50 /* Current rounding mode for this TB. */
52 /* Current flush-to-zero setting for this TB. */
56 /* Return values from translate_one, indicating the state of the TB.
57 Note that zero indicates that we are not exiting the TB. */
62 /* We have emitted one or more goto_tb. No fixup required. */
65 /* We are not using a goto_tb (for whatever reason), but have updated
66 the PC (for whatever reason), so there's no need to do it again on
70 /* We are exiting the TB, but have neither emitted a goto_tb, nor
71 updated the PC for the next instruction to be executed. */
74 /* We are ending the TB with a noreturn function call, e.g. longjmp.
75 No following code will be executed. */
79 /* global register indexes */
80 static TCGv_ptr cpu_env
;
81 static TCGv cpu_ir
[31];
82 static TCGv cpu_fir
[31];
84 static TCGv cpu_lock_addr
;
85 static TCGv cpu_lock_st_addr
;
86 static TCGv cpu_lock_value
;
87 static TCGv cpu_unique
;
88 #ifndef CONFIG_USER_ONLY
89 static TCGv cpu_sysval
;
94 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
96 #include "gen-icount.h"
98 static void alpha_translate_init(void)
102 static int done_init
= 0;
107 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
110 for (i
= 0; i
< 31; i
++) {
111 sprintf(p
, "ir%d", i
);
112 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
113 offsetof(CPUState
, ir
[i
]), p
);
114 p
+= (i
< 10) ? 4 : 5;
116 sprintf(p
, "fir%d", i
);
117 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
118 offsetof(CPUState
, fir
[i
]), p
);
119 p
+= (i
< 10) ? 5 : 6;
122 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
123 offsetof(CPUState
, pc
), "pc");
125 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
126 offsetof(CPUState
, lock_addr
),
128 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
129 offsetof(CPUState
, lock_st_addr
),
131 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
132 offsetof(CPUState
, lock_value
),
135 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
136 offsetof(CPUState
, unique
), "unique");
137 #ifndef CONFIG_USER_ONLY
138 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
139 offsetof(CPUState
, sysval
), "sysval");
140 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
141 offsetof(CPUState
, usp
), "usp");
144 /* register helpers */
151 static void gen_excp_1(int exception
, int error_code
)
155 tmp1
= tcg_const_i32(exception
);
156 tmp2
= tcg_const_i32(error_code
);
157 gen_helper_excp(tmp1
, tmp2
);
158 tcg_temp_free_i32(tmp2
);
159 tcg_temp_free_i32(tmp1
);
162 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
164 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
165 gen_excp_1(exception
, error_code
);
166 return EXIT_NORETURN
;
169 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
171 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
174 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
176 TCGv tmp
= tcg_temp_new();
177 TCGv_i32 tmp32
= tcg_temp_new_i32();
178 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
179 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
180 gen_helper_memory_to_f(t0
, tmp32
);
181 tcg_temp_free_i32(tmp32
);
185 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
187 TCGv tmp
= tcg_temp_new();
188 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
189 gen_helper_memory_to_g(t0
, tmp
);
193 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
195 TCGv tmp
= tcg_temp_new();
196 TCGv_i32 tmp32
= tcg_temp_new_i32();
197 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
198 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
199 gen_helper_memory_to_s(t0
, tmp32
);
200 tcg_temp_free_i32(tmp32
);
204 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
206 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
207 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
208 tcg_gen_mov_i64(cpu_lock_value
, t0
);
211 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
213 tcg_gen_qemu_ld64(t0
, t1
, flags
);
214 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
215 tcg_gen_mov_i64(cpu_lock_value
, t0
);
218 static inline void gen_load_mem(DisasContext
*ctx
,
219 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
221 int ra
, int rb
, int32_t disp16
, int fp
,
226 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
227 prefetches, which we can treat as nops. No worries about
228 missed exceptions here. */
229 if (unlikely(ra
== 31)) {
233 addr
= tcg_temp_new();
235 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
237 tcg_gen_andi_i64(addr
, addr
, ~0x7);
243 tcg_gen_movi_i64(addr
, disp16
);
246 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
247 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
252 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
254 TCGv_i32 tmp32
= tcg_temp_new_i32();
255 TCGv tmp
= tcg_temp_new();
256 gen_helper_f_to_memory(tmp32
, t0
);
257 tcg_gen_extu_i32_i64(tmp
, tmp32
);
258 tcg_gen_qemu_st32(tmp
, t1
, flags
);
260 tcg_temp_free_i32(tmp32
);
263 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
265 TCGv tmp
= tcg_temp_new();
266 gen_helper_g_to_memory(tmp
, t0
);
267 tcg_gen_qemu_st64(tmp
, t1
, flags
);
271 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
273 TCGv_i32 tmp32
= tcg_temp_new_i32();
274 TCGv tmp
= tcg_temp_new();
275 gen_helper_s_to_memory(tmp32
, t0
);
276 tcg_gen_extu_i32_i64(tmp
, tmp32
);
277 tcg_gen_qemu_st32(tmp
, t1
, flags
);
279 tcg_temp_free_i32(tmp32
);
282 static inline void gen_store_mem(DisasContext
*ctx
,
283 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
285 int ra
, int rb
, int32_t disp16
, int fp
,
290 addr
= tcg_temp_new();
292 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
294 tcg_gen_andi_i64(addr
, addr
, ~0x7);
300 tcg_gen_movi_i64(addr
, disp16
);
304 va
= tcg_const_i64(0);
306 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
308 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
316 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
317 int32_t disp16
, int quad
)
322 /* ??? Don't bother storing anything. The user can't tell
323 the difference, since the zero register always reads zero. */
327 #if defined(CONFIG_USER_ONLY)
328 addr
= cpu_lock_st_addr
;
330 addr
= tcg_temp_local_new();
334 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
336 tcg_gen_movi_i64(addr
, disp16
);
339 #if defined(CONFIG_USER_ONLY)
340 /* ??? This is handled via a complicated version of compare-and-swap
341 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
342 in TCG so that this isn't necessary. */
343 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
345 /* ??? In system mode we are never multi-threaded, so CAS can be
346 implemented via a non-atomic load-compare-store sequence. */
348 int lab_fail
, lab_done
;
351 lab_fail
= gen_new_label();
352 lab_done
= gen_new_label();
353 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
355 val
= tcg_temp_new();
357 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
359 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
361 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
364 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
366 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
368 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
369 tcg_gen_br(lab_done
);
371 gen_set_label(lab_fail
);
372 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
374 gen_set_label(lab_done
);
375 tcg_gen_movi_i64(cpu_lock_addr
, -1);
383 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
385 /* Check for the dest on the same page as the start of the TB. We
386 also want to suppress goto_tb in the case of single-steping and IO. */
387 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
388 && !ctx
->env
->singlestep_enabled
389 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
392 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
394 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
397 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
400 /* Notice branch-to-next; used to initialize RA with the PC. */
403 } else if (use_goto_tb(ctx
, dest
)) {
405 tcg_gen_movi_i64(cpu_pc
, dest
);
406 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
409 tcg_gen_movi_i64(cpu_pc
, dest
);
410 return EXIT_PC_UPDATED
;
414 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
415 TCGv cmp
, int32_t disp
)
417 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
418 int lab_true
= gen_new_label();
420 if (use_goto_tb(ctx
, dest
)) {
421 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
424 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
425 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
);
427 gen_set_label(lab_true
);
429 tcg_gen_movi_i64(cpu_pc
, dest
);
430 tcg_gen_exit_tb((tcg_target_long
)ctx
->tb
+ 1);
434 int lab_over
= gen_new_label();
436 /* ??? Consider using either
439 movcond pc, cond, 0, tmp, pc
446 The current diamond subgraph surely isn't efficient. */
448 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
449 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
450 tcg_gen_br(lab_over
);
451 gen_set_label(lab_true
);
452 tcg_gen_movi_i64(cpu_pc
, dest
);
453 gen_set_label(lab_over
);
455 return EXIT_PC_UPDATED
;
459 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
460 int32_t disp
, int mask
)
464 if (unlikely(ra
== 31)) {
465 cmp_tmp
= tcg_const_i64(0);
467 cmp_tmp
= tcg_temp_new();
469 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
471 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
475 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
478 /* Fold -0.0 for comparison with COND. */
480 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
482 uint64_t mzero
= 1ull << 63;
487 /* For <= or >, the -0.0 value directly compares the way we want. */
488 tcg_gen_mov_i64(dest
, src
);
493 /* For == or !=, we can simply mask off the sign bit and compare. */
494 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
499 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
500 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
501 tcg_gen_neg_i64(dest
, dest
);
502 tcg_gen_and_i64(dest
, dest
, src
);
510 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
515 if (unlikely(ra
== 31)) {
516 /* Very uncommon case, but easier to optimize it to an integer
517 comparison than continuing with the floating point comparison. */
518 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
521 cmp_tmp
= tcg_temp_new();
522 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
523 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
526 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
527 int islit
, uint8_t lit
, int mask
)
529 TCGCond inv_cond
= tcg_invert_cond(cond
);
532 if (unlikely(rc
== 31))
535 l1
= gen_new_label();
539 TCGv tmp
= tcg_temp_new();
540 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
541 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
544 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
546 /* Very uncommon case - Do not bother to optimize. */
547 TCGv tmp
= tcg_const_i64(0);
548 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
553 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
555 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
559 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
564 if (unlikely(rc
== 31)) {
568 cmp_tmp
= tcg_temp_new();
569 if (unlikely(ra
== 31)) {
570 tcg_gen_movi_i64(cmp_tmp
, 0);
572 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
575 l1
= gen_new_label();
576 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
577 tcg_temp_free(cmp_tmp
);
580 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
582 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
586 #define QUAL_RM_N 0x080 /* Round mode nearest even */
587 #define QUAL_RM_C 0x000 /* Round mode chopped */
588 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
589 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
590 #define QUAL_RM_MASK 0x0c0
592 #define QUAL_U 0x100 /* Underflow enable (fp output) */
593 #define QUAL_V 0x100 /* Overflow enable (int output) */
594 #define QUAL_S 0x400 /* Software completion enable */
595 #define QUAL_I 0x200 /* Inexact detection enable */
597 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
601 fn11
&= QUAL_RM_MASK
;
602 if (fn11
== ctx
->tb_rm
) {
607 tmp
= tcg_temp_new_i32();
610 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
613 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
616 tcg_gen_movi_i32(tmp
, float_round_down
);
619 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
623 #if defined(CONFIG_SOFTFLOAT_INLINE)
624 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
625 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
626 sets the one field. */
627 tcg_gen_st8_i32(tmp
, cpu_env
,
628 offsetof(CPUState
, fp_status
.float_rounding_mode
));
630 gen_helper_setroundmode(tmp
);
633 tcg_temp_free_i32(tmp
);
636 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
641 if (fn11
== ctx
->tb_ftz
) {
646 tmp
= tcg_temp_new_i32();
648 /* Underflow is enabled, use the FPCR setting. */
649 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
651 /* Underflow is disabled, force flush-to-zero. */
652 tcg_gen_movi_i32(tmp
, 1);
655 #if defined(CONFIG_SOFTFLOAT_INLINE)
656 tcg_gen_st8_i32(tmp
, cpu_env
,
657 offsetof(CPUState
, fp_status
.flush_to_zero
));
659 gen_helper_setflushzero(tmp
);
662 tcg_temp_free_i32(tmp
);
665 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
667 TCGv val
= tcg_temp_new();
669 tcg_gen_movi_i64(val
, 0);
670 } else if (fn11
& QUAL_S
) {
671 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
673 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
675 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
680 static void gen_fp_exc_clear(void)
682 #if defined(CONFIG_SOFTFLOAT_INLINE)
683 TCGv_i32 zero
= tcg_const_i32(0);
684 tcg_gen_st8_i32(zero
, cpu_env
,
685 offsetof(CPUState
, fp_status
.float_exception_flags
));
686 tcg_temp_free_i32(zero
);
688 gen_helper_fp_exc_clear();
692 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
694 /* ??? We ought to be able to do something with imprecise exceptions.
695 E.g. notice we're still in the trap shadow of something within the
696 TB and do not generate the code to signal the exception; end the TB
697 when an exception is forced to arrive, either by consumption of a
698 register value or TRAPB or EXCB. */
699 TCGv_i32 exc
= tcg_temp_new_i32();
702 #if defined(CONFIG_SOFTFLOAT_INLINE)
703 tcg_gen_ld8u_i32(exc
, cpu_env
,
704 offsetof(CPUState
, fp_status
.float_exception_flags
));
706 gen_helper_fp_exc_get(exc
);
710 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
713 /* ??? Pass in the regno of the destination so that the helper can
714 set EXC_MASK, which contains a bitmask of destination registers
715 that have caused arithmetic traps. A simple userspace emulation
716 does not require this. We do need it for a guest kernel's entArith,
717 or if we were to do something clever with imprecise exceptions. */
718 reg
= tcg_const_i32(rc
+ 32);
721 gen_helper_fp_exc_raise_s(exc
, reg
);
723 gen_helper_fp_exc_raise(exc
, reg
);
726 tcg_temp_free_i32(reg
);
727 tcg_temp_free_i32(exc
);
730 static inline void gen_fp_exc_raise(int rc
, int fn11
)
732 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
735 static void gen_fcvtlq(int rb
, int rc
)
737 if (unlikely(rc
== 31)) {
740 if (unlikely(rb
== 31)) {
741 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
743 TCGv tmp
= tcg_temp_new();
745 /* The arithmetic right shift here, plus the sign-extended mask below
746 yields a sign-extended result without an explicit ext32s_i64. */
747 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
748 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
749 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
750 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
751 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
757 static void gen_fcvtql(int rb
, int rc
)
759 if (unlikely(rc
== 31)) {
762 if (unlikely(rb
== 31)) {
763 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
765 TCGv tmp
= tcg_temp_new();
767 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
768 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
769 tcg_gen_shli_i64(tmp
, tmp
, 32);
770 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
771 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
777 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
780 int lab
= gen_new_label();
781 TCGv tmp
= tcg_temp_new();
783 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
784 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
785 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
792 #define FARITH2(name) \
793 static inline void glue(gen_f, name)(int rb, int rc) \
795 if (unlikely(rc == 31)) { \
799 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
801 TCGv tmp = tcg_const_i64(0); \
802 gen_helper_ ## name (cpu_fir[rc], tmp); \
803 tcg_temp_free(tmp); \
807 /* ??? VAX instruction qualifiers ignored. */
815 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
816 int rb
, int rc
, int fn11
)
820 /* ??? This is wrong: the instruction is not a nop, it still may
822 if (unlikely(rc
== 31)) {
826 gen_qual_roundmode(ctx
, fn11
);
827 gen_qual_flushzero(ctx
, fn11
);
830 vb
= gen_ieee_input(rb
, fn11
, 0);
831 helper(cpu_fir
[rc
], vb
);
834 gen_fp_exc_raise(rc
, fn11
);
837 #define IEEE_ARITH2(name) \
838 static inline void glue(gen_f, name)(DisasContext *ctx, \
839 int rb, int rc, int fn11) \
841 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
848 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
853 /* ??? This is wrong: the instruction is not a nop, it still may
855 if (unlikely(rc
== 31)) {
859 /* No need to set flushzero, since we have an integer output. */
861 vb
= gen_ieee_input(rb
, fn11
, 0);
863 /* Almost all integer conversions use cropped rounding, and most
864 also do not have integer overflow enabled. Special case that. */
867 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
869 case QUAL_V
| QUAL_RM_C
:
870 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
871 ignore
= float_flag_inexact
;
873 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
874 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
877 gen_qual_roundmode(ctx
, fn11
);
878 gen_helper_cvttq(cpu_fir
[rc
], vb
);
879 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
880 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
885 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
888 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
889 int rb
, int rc
, int fn11
)
893 /* ??? This is wrong: the instruction is not a nop, it still may
895 if (unlikely(rc
== 31)) {
899 gen_qual_roundmode(ctx
, fn11
);
902 vb
= tcg_const_i64(0);
907 /* The only exception that can be raised by integer conversion
908 is inexact. Thus we only need to worry about exceptions when
909 inexact handling is requested. */
912 helper(cpu_fir
[rc
], vb
);
913 gen_fp_exc_raise(rc
, fn11
);
915 helper(cpu_fir
[rc
], vb
);
923 #define IEEE_INTCVT(name) \
924 static inline void glue(gen_f, name)(DisasContext *ctx, \
925 int rb, int rc, int fn11) \
927 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
932 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
937 if (unlikely(rc
== 31)) {
941 vmask
= tcg_const_i64(mask
);
951 va
= tcg_temp_new_i64();
952 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
954 tcg_gen_andc_i64(va
, vmask
, va
);
956 tcg_gen_and_i64(va
, va
, vmask
);
964 vb
= tcg_temp_new_i64();
965 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
968 switch (za
<< 1 | zb
) {
970 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
973 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
976 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
979 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
983 tcg_temp_free(vmask
);
992 static inline void gen_fcpys(int ra
, int rb
, int rc
)
994 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
997 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
999 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1002 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1004 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1007 #define FARITH3(name) \
1008 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1012 if (unlikely(rc == 31)) { \
1016 va = tcg_const_i64(0); \
1021 vb = tcg_const_i64(0); \
1026 gen_helper_ ## name (cpu_fir[rc], va, vb); \
1029 tcg_temp_free(va); \
1032 tcg_temp_free(vb); \
1036 /* ??? VAX instruction qualifiers ignored. */
1049 static void gen_ieee_arith3(DisasContext
*ctx
,
1050 void (*helper
)(TCGv
, TCGv
, TCGv
),
1051 int ra
, int rb
, int rc
, int fn11
)
1055 /* ??? This is wrong: the instruction is not a nop, it still may
1056 raise exceptions. */
1057 if (unlikely(rc
== 31)) {
1061 gen_qual_roundmode(ctx
, fn11
);
1062 gen_qual_flushzero(ctx
, fn11
);
1065 va
= gen_ieee_input(ra
, fn11
, 0);
1066 vb
= gen_ieee_input(rb
, fn11
, 0);
1067 helper(cpu_fir
[rc
], va
, vb
);
1071 gen_fp_exc_raise(rc
, fn11
);
1074 #define IEEE_ARITH3(name) \
1075 static inline void glue(gen_f, name)(DisasContext *ctx, \
1076 int ra, int rb, int rc, int fn11) \
1078 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1089 static void gen_ieee_compare(DisasContext
*ctx
,
1090 void (*helper
)(TCGv
, TCGv
, TCGv
),
1091 int ra
, int rb
, int rc
, int fn11
)
1095 /* ??? This is wrong: the instruction is not a nop, it still may
1096 raise exceptions. */
1097 if (unlikely(rc
== 31)) {
1103 va
= gen_ieee_input(ra
, fn11
, 1);
1104 vb
= gen_ieee_input(rb
, fn11
, 1);
1105 helper(cpu_fir
[rc
], va
, vb
);
1109 gen_fp_exc_raise(rc
, fn11
);
1112 #define IEEE_CMP3(name) \
1113 static inline void glue(gen_f, name)(DisasContext *ctx, \
1114 int ra, int rb, int rc, int fn11) \
1116 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1123 static inline uint64_t zapnot_mask(uint8_t lit
)
1128 for (i
= 0; i
< 8; ++i
) {
1130 mask
|= 0xffull
<< (i
* 8);
1135 /* Implement zapnot with an immediate operand, which expands to some
1136 form of immediate AND. This is a basic building block in the
1137 definition of many of the other byte manipulation instructions. */
1138 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1142 tcg_gen_movi_i64(dest
, 0);
1145 tcg_gen_ext8u_i64(dest
, src
);
1148 tcg_gen_ext16u_i64(dest
, src
);
1151 tcg_gen_ext32u_i64(dest
, src
);
1154 tcg_gen_mov_i64(dest
, src
);
1157 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1162 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1164 if (unlikely(rc
== 31))
1166 else if (unlikely(ra
== 31))
1167 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1169 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1171 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1174 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1176 if (unlikely(rc
== 31))
1178 else if (unlikely(ra
== 31))
1179 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1181 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1183 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1187 /* EXTWH, EXTLH, EXTQH */
1188 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1189 uint8_t lit
, uint8_t byte_mask
)
1191 if (unlikely(rc
== 31))
1193 else if (unlikely(ra
== 31))
1194 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1197 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1198 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1200 TCGv tmp1
= tcg_temp_new();
1201 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1202 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1203 tcg_gen_neg_i64(tmp1
, tmp1
);
1204 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1205 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1206 tcg_temp_free(tmp1
);
1208 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1212 /* EXTBL, EXTWL, EXTLL, EXTQL */
1213 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1214 uint8_t lit
, uint8_t byte_mask
)
1216 if (unlikely(rc
== 31))
1218 else if (unlikely(ra
== 31))
1219 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1222 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1224 TCGv tmp
= tcg_temp_new();
1225 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1226 tcg_gen_shli_i64(tmp
, tmp
, 3);
1227 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1230 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1234 /* INSWH, INSLH, INSQH */
1235 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1236 uint8_t lit
, uint8_t byte_mask
)
1238 if (unlikely(rc
== 31))
1240 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1241 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1243 TCGv tmp
= tcg_temp_new();
1245 /* The instruction description has us left-shift the byte mask
1246 and extract bits <15:8> and apply that zap at the end. This
1247 is equivalent to simply performing the zap first and shifting
1249 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1252 /* Note that we have handled the lit==0 case above. */
1253 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1255 TCGv shift
= tcg_temp_new();
1257 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1258 Do this portably by splitting the shift into two parts:
1259 shift_count-1 and 1. Arrange for the -1 by using
1260 ones-complement instead of twos-complement in the negation:
1261 ~((B & 7) * 8) & 63. */
1263 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1264 tcg_gen_shli_i64(shift
, shift
, 3);
1265 tcg_gen_not_i64(shift
, shift
);
1266 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1268 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1269 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1270 tcg_temp_free(shift
);
1276 /* INSBL, INSWL, INSLL, INSQL */
1277 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1278 uint8_t lit
, uint8_t byte_mask
)
1280 if (unlikely(rc
== 31))
1282 else if (unlikely(ra
== 31))
1283 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1285 TCGv tmp
= tcg_temp_new();
1287 /* The instruction description has us left-shift the byte mask
1288 the same number of byte slots as the data and apply the zap
1289 at the end. This is equivalent to simply performing the zap
1290 first and shifting afterward. */
1291 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1294 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1296 TCGv shift
= tcg_temp_new();
1297 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1298 tcg_gen_shli_i64(shift
, shift
, 3);
1299 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1300 tcg_temp_free(shift
);
1306 /* MSKWH, MSKLH, MSKQH */
1307 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1308 uint8_t lit
, uint8_t byte_mask
)
1310 if (unlikely(rc
== 31))
1312 else if (unlikely(ra
== 31))
1313 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1315 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1317 TCGv shift
= tcg_temp_new();
1318 TCGv mask
= tcg_temp_new();
1320 /* The instruction description is as above, where the byte_mask
1321 is shifted left, and then we extract bits <15:8>. This can be
1322 emulated with a right-shift on the expanded byte mask. This
1323 requires extra care because for an input <2:0> == 0 we need a
1324 shift of 64 bits in order to generate a zero. This is done by
1325 splitting the shift into two parts, the variable shift - 1
1326 followed by a constant 1 shift. The code we expand below is
1327 equivalent to ~((B & 7) * 8) & 63. */
1329 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1330 tcg_gen_shli_i64(shift
, shift
, 3);
1331 tcg_gen_not_i64(shift
, shift
);
1332 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1333 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1334 tcg_gen_shr_i64(mask
, mask
, shift
);
1335 tcg_gen_shri_i64(mask
, mask
, 1);
1337 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1339 tcg_temp_free(mask
);
1340 tcg_temp_free(shift
);
1344 /* MSKBL, MSKWL, MSKLL, MSKQL */
1345 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1346 uint8_t lit
, uint8_t byte_mask
)
1348 if (unlikely(rc
== 31))
1350 else if (unlikely(ra
== 31))
1351 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1353 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1355 TCGv shift
= tcg_temp_new();
1356 TCGv mask
= tcg_temp_new();
1358 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1359 tcg_gen_shli_i64(shift
, shift
, 3);
1360 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1361 tcg_gen_shl_i64(mask
, mask
, shift
);
1363 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1365 tcg_temp_free(mask
);
1366 tcg_temp_free(shift
);
1370 /* Code to call arith3 helpers */
1371 #define ARITH3(name) \
1372 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1375 if (unlikely(rc == 31)) \
1380 TCGv tmp = tcg_const_i64(lit); \
1381 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1382 tcg_temp_free(tmp); \
1384 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1386 TCGv tmp1 = tcg_const_i64(0); \
1388 TCGv tmp2 = tcg_const_i64(lit); \
1389 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1390 tcg_temp_free(tmp2); \
1392 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1393 tcg_temp_free(tmp1); \
1414 #define MVIOP2(name) \
1415 static inline void glue(gen_, name)(int rb, int rc) \
1417 if (unlikely(rc == 31)) \
1419 if (unlikely(rb == 31)) \
1420 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1422 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1429 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1430 int islit
, uint8_t lit
)
1434 if (unlikely(rc
== 31)) {
1439 va
= tcg_const_i64(0);
1444 vb
= tcg_const_i64(lit
);
1449 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1459 static void gen_rx(int ra
, int set
)
1464 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1467 tmp
= tcg_const_i32(set
);
1468 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1469 tcg_temp_free_i32(tmp
);
1472 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1474 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1475 to internal cpu registers. */
1477 /* Unprivileged PAL call */
1478 if (palcode
>= 0x80 && palcode
< 0xC0) {
1482 /* No-op inside QEMU. */
1486 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1490 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1493 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0xbf);
1498 #ifndef CONFIG_USER_ONLY
1499 /* Privileged PAL code */
1500 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1504 /* No-op inside QEMU. */
1508 /* No-op inside QEMU. */
1512 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUState
, vptptr
));
1516 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1520 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1527 /* Note that we already know we're in kernel mode, so we know
1528 that PS only contains the 3 IPL bits. */
1529 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1531 /* But make sure and store only the 3 IPL bits from the user. */
1532 tmp
= tcg_temp_new();
1533 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1534 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUState
, ps
));
1541 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUState
, ps
));
1545 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1549 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1553 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1554 offsetof(CPUState
, cpu_index
));
1558 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
& 0x3f);
1564 return gen_invalid(ctx
);
1567 #ifndef CONFIG_USER_ONLY
1569 #define PR_BYTE 0x100000
1570 #define PR_LONG 0x200000
1572 static int cpu_pr_data(int pr
)
1575 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1576 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1577 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1578 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1579 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1580 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1581 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1582 case 7: return offsetof(CPUAlphaState
, palbr
);
1583 case 8: return offsetof(CPUAlphaState
, ptbr
);
1584 case 9: return offsetof(CPUAlphaState
, vptptr
);
1585 case 10: return offsetof(CPUAlphaState
, unique
);
1586 case 11: return offsetof(CPUAlphaState
, sysval
);
1587 case 12: return offsetof(CPUAlphaState
, usp
);
1590 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1592 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1595 return offsetof(CPUAlphaState
, alarm_expire
);
1600 static ExitStatus
gen_mfpr(int ra
, int regno
)
1602 int data
= cpu_pr_data(regno
);
1604 /* In our emulated PALcode, these processor registers have no
1605 side effects from reading. */
1614 gen_helper_get_time(cpu_ir
[ra
]);
1616 return EXIT_PC_STALE
;
1618 gen_helper_get_time(cpu_ir
[ra
]);
1623 /* The basic registers are data only, and unknown registers
1624 are read-zero, write-ignore. */
1626 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1627 } else if (data
& PR_BYTE
) {
1628 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1629 } else if (data
& PR_LONG
) {
1630 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1632 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1637 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1643 tmp
= tcg_const_i64(0);
1656 gen_helper_tbis(tmp
);
1661 tmp
= tcg_const_i64(1);
1662 tcg_gen_st32_i64(tmp
, cpu_env
, offsetof(CPUState
, halted
));
1663 return gen_excp(ctx
, EXCP_HLT
, 0);
1667 gen_helper_halt(tmp
);
1668 return EXIT_PC_STALE
;
1672 gen_helper_set_alarm(tmp
);
1676 /* The basic registers are data only, and unknown registers
1677 are read-zero, write-ignore. */
1678 data
= cpu_pr_data(regno
);
1680 if (data
& PR_BYTE
) {
1681 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1682 } else if (data
& PR_LONG
) {
1683 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1685 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1697 #endif /* !USER_ONLY*/
1699 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1702 int32_t disp21
, disp16
;
1703 #ifndef CONFIG_USER_ONLY
1707 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1711 /* Decode all instruction fields */
1713 ra
= (insn
>> 21) & 0x1F;
1714 rb
= (insn
>> 16) & 0x1F;
1716 real_islit
= islit
= (insn
>> 12) & 1;
1717 if (rb
== 31 && !islit
) {
1721 lit
= (insn
>> 13) & 0xFF;
1722 palcode
= insn
& 0x03FFFFFF;
1723 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1724 disp16
= (int16_t)(insn
& 0x0000FFFF);
1725 #ifndef CONFIG_USER_ONLY
1726 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1728 fn11
= (insn
>> 5) & 0x000007FF;
1730 fn7
= (insn
>> 5) & 0x0000007F;
1731 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1732 opc
, ra
, rb
, rc
, disp16
);
1738 ret
= gen_call_pal(ctx
, palcode
);
1763 if (likely(ra
!= 31)) {
1765 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1767 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1772 if (likely(ra
!= 31)) {
1774 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1776 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1781 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1782 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1788 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1792 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1793 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1799 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1803 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1807 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1813 if (likely(rc
!= 31)) {
1816 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1817 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1819 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1820 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1824 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1826 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1832 if (likely(rc
!= 31)) {
1834 TCGv tmp
= tcg_temp_new();
1835 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1837 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1839 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1840 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1844 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1846 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1852 if (likely(rc
!= 31)) {
1855 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1857 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1858 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1861 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1863 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1864 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1870 if (likely(rc
!= 31)) {
1872 TCGv tmp
= tcg_temp_new();
1873 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1875 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1877 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1878 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1882 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1884 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1885 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1892 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1896 if (likely(rc
!= 31)) {
1898 TCGv tmp
= tcg_temp_new();
1899 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1901 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1903 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1904 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1908 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1910 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1916 if (likely(rc
!= 31)) {
1918 TCGv tmp
= tcg_temp_new();
1919 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1921 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1923 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1924 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1928 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1930 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1931 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1938 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1942 if (likely(rc
!= 31)) {
1945 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1947 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1950 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1952 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1958 if (likely(rc
!= 31)) {
1960 TCGv tmp
= tcg_temp_new();
1961 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1963 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1965 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1969 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1971 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1977 if (likely(rc
!= 31)) {
1980 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1982 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1985 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1987 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1993 if (likely(rc
!= 31)) {
1995 TCGv tmp
= tcg_temp_new();
1996 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1998 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2000 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2004 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2006 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2012 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2016 if (likely(rc
!= 31)) {
2018 TCGv tmp
= tcg_temp_new();
2019 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2021 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2023 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2027 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2029 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2035 if (likely(rc
!= 31)) {
2037 TCGv tmp
= tcg_temp_new();
2038 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2040 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2042 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2046 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2048 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2054 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2058 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2062 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2066 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2070 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2074 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2078 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2088 if (likely(rc
!= 31)) {
2090 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2092 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2094 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2099 if (likely(rc
!= 31)) {
2102 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2104 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2106 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2111 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2115 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2119 if (likely(rc
!= 31)) {
2122 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2124 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2127 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2129 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2135 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2139 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2143 if (likely(rc
!= 31)) {
2146 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2148 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2151 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2153 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2159 if (likely(rc
!= 31)) {
2162 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2164 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2167 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2169 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2175 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2179 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2183 if (likely(rc
!= 31)) {
2186 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2188 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2191 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2193 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2199 if (likely(rc
!= 31)) {
2200 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2203 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2205 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2211 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2215 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2220 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
2230 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2234 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2238 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2242 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2246 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2250 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2254 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2258 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2262 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2266 gen_zap(ra
, rb
, rc
, islit
, lit
);
2270 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2274 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2278 if (likely(rc
!= 31)) {
2281 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2283 TCGv shift
= tcg_temp_new();
2284 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2285 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2286 tcg_temp_free(shift
);
2289 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2294 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2298 if (likely(rc
!= 31)) {
2301 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2303 TCGv shift
= tcg_temp_new();
2304 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2305 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2306 tcg_temp_free(shift
);
2309 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2314 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2318 if (likely(rc
!= 31)) {
2321 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2323 TCGv shift
= tcg_temp_new();
2324 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2325 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2326 tcg_temp_free(shift
);
2329 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2334 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2338 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2342 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2346 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2350 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2354 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2358 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2362 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2366 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2376 if (likely(rc
!= 31)) {
2378 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2381 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2383 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2384 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2390 if (likely(rc
!= 31)) {
2392 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2394 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2396 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2401 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2405 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2409 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2416 switch (fpfn
) { /* fn11 & 0x3F */
2419 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2422 if (likely(rc
!= 31)) {
2424 TCGv_i32 tmp
= tcg_temp_new_i32();
2425 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2426 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2427 tcg_temp_free_i32(tmp
);
2429 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2434 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2441 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2442 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2448 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2451 if (likely(rc
!= 31)) {
2453 TCGv_i32 tmp
= tcg_temp_new_i32();
2454 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2455 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2456 tcg_temp_free_i32(tmp
);
2458 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2463 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2466 if (likely(rc
!= 31)) {
2468 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2470 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2475 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2482 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2483 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2492 /* VAX floating point */
2493 /* XXX: rounding mode and trap are ignored (!) */
2494 switch (fpfn
) { /* fn11 & 0x3F */
2497 gen_faddf(ra
, rb
, rc
);
2501 gen_fsubf(ra
, rb
, rc
);
2505 gen_fmulf(ra
, rb
, rc
);
2509 gen_fdivf(ra
, rb
, rc
);
2521 gen_faddg(ra
, rb
, rc
);
2525 gen_fsubg(ra
, rb
, rc
);
2529 gen_fmulg(ra
, rb
, rc
);
2533 gen_fdivg(ra
, rb
, rc
);
2537 gen_fcmpgeq(ra
, rb
, rc
);
2541 gen_fcmpglt(ra
, rb
, rc
);
2545 gen_fcmpgle(ra
, rb
, rc
);
2576 /* IEEE floating-point */
2577 switch (fpfn
) { /* fn11 & 0x3F */
2580 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2584 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2588 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2592 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2596 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2600 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2604 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2608 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2612 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2616 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2620 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2624 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2627 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2629 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2632 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2637 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2641 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2645 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2658 if (likely(rc
!= 31)) {
2662 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2664 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2667 gen_fcpys(ra
, rb
, rc
);
2673 gen_fcpysn(ra
, rb
, rc
);
2677 gen_fcpyse(ra
, rb
, rc
);
2681 if (likely(ra
!= 31))
2682 gen_helper_store_fpcr(cpu_fir
[ra
]);
2684 TCGv tmp
= tcg_const_i64(0);
2685 gen_helper_store_fpcr(tmp
);
2691 if (likely(ra
!= 31))
2692 gen_helper_load_fpcr(cpu_fir
[ra
]);
2696 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2700 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2704 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2708 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2712 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2716 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2726 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2727 /v doesn't do. The only thing I can think is that /sv is a
2728 valid instruction merely for completeness in the ISA. */
2729 gen_fcvtql_v(ctx
, rb
, rc
);
2736 switch ((uint16_t)disp16
) {
2766 gen_helper_load_pcc(cpu_ir
[ra
]);
2768 ret
= EXIT_PC_STALE
;
2770 gen_helper_load_pcc(cpu_ir
[ra
]);
2794 /* HW_MFPR (PALcode) */
2795 #ifndef CONFIG_USER_ONLY
2796 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2797 return gen_mfpr(ra
, insn
& 0xffff);
2802 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2803 prediction stack action, which of course we don't implement. */
2805 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2807 tcg_gen_movi_i64(cpu_pc
, 0);
2810 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2812 ret
= EXIT_PC_UPDATED
;
2815 /* HW_LD (PALcode) */
2816 #ifndef CONFIG_USER_ONLY
2817 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2824 addr
= tcg_temp_new();
2826 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2828 tcg_gen_movi_i64(addr
, disp12
);
2829 switch ((insn
>> 12) & 0xF) {
2831 /* Longword physical access (hw_ldl/p) */
2832 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2835 /* Quadword physical access (hw_ldq/p) */
2836 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2839 /* Longword physical access with lock (hw_ldl_l/p) */
2840 gen_helper_ldl_l_phys(cpu_ir
[ra
], addr
);
2843 /* Quadword physical access with lock (hw_ldq_l/p) */
2844 gen_helper_ldq_l_phys(cpu_ir
[ra
], addr
);
2847 /* Longword virtual PTE fetch (hw_ldl/v) */
2850 /* Quadword virtual PTE fetch (hw_ldq/v) */
2854 /* Incpu_ir[ra]id */
2857 /* Incpu_ir[ra]id */
2860 /* Longword virtual access (hw_ldl) */
2863 /* Quadword virtual access (hw_ldq) */
2866 /* Longword virtual access with protection check (hw_ldl/w) */
2867 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2870 /* Quadword virtual access with protection check (hw_ldq/w) */
2871 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2874 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2877 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2880 /* Longword virtual access with alternate access mode and
2881 protection checks (hw_ldl/wa) */
2882 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2885 /* Quadword virtual access with alternate access mode and
2886 protection checks (hw_ldq/wa) */
2887 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2890 tcg_temp_free(addr
);
2899 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
2902 if (likely(rc
!= 31)) {
2904 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2906 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2911 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
2912 if (likely(rc
!= 31)) {
2914 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2916 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2924 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2925 if (likely(rc
!= 31)) {
2927 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2929 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2937 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2938 gen_perr(ra
, rb
, rc
, islit
, lit
);
2944 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2945 if (likely(rc
!= 31)) {
2947 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2949 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2957 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
2958 if (likely(rc
!= 31)) {
2960 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2962 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2970 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2971 if (real_islit
|| ra
!= 31) {
2980 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2981 if (real_islit
|| ra
!= 31) {
2990 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
2991 if (real_islit
|| ra
!= 31) {
3000 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3001 if (real_islit
|| ra
!= 31) {
3010 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3011 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3017 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3018 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3024 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3025 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3031 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3032 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3038 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3039 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3045 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3046 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3052 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3053 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3059 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3060 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3066 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3069 if (likely(rc
!= 31)) {
3071 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3073 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3078 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3082 TCGv_i32 tmp1
= tcg_temp_new_i32();
3084 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3086 TCGv tmp2
= tcg_const_i64(0);
3087 gen_helper_s_to_memory(tmp1
, tmp2
);
3088 tcg_temp_free(tmp2
);
3090 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3091 tcg_temp_free_i32(tmp1
);
3099 /* HW_MTPR (PALcode) */
3100 #ifndef CONFIG_USER_ONLY
3101 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3102 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3107 /* HW_RET (PALcode) */
3108 #ifndef CONFIG_USER_ONLY
3109 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3111 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3112 address from EXC_ADDR. This turns out to be useful for our
3113 emulation PALcode, so continue to accept it. */
3114 TCGv tmp
= tcg_temp_new();
3115 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUState
, exc_addr
));
3116 gen_helper_hw_ret(tmp
);
3119 gen_helper_hw_ret(cpu_ir
[rb
]);
3121 ret
= EXIT_PC_UPDATED
;
3127 /* HW_ST (PALcode) */
3128 #ifndef CONFIG_USER_ONLY
3129 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3131 addr
= tcg_temp_new();
3133 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3135 tcg_gen_movi_i64(addr
, disp12
);
3139 val
= tcg_temp_new();
3140 tcg_gen_movi_i64(val
, 0);
3142 switch ((insn
>> 12) & 0xF) {
3144 /* Longword physical access */
3145 gen_helper_stl_phys(addr
, val
);
3148 /* Quadword physical access */
3149 gen_helper_stq_phys(addr
, val
);
3152 /* Longword physical access with lock */
3153 gen_helper_stl_c_phys(val
, addr
, val
);
3156 /* Quadword physical access with lock */
3157 gen_helper_stq_c_phys(val
, addr
, val
);
3160 /* Longword virtual access */
3163 /* Quadword virtual access */
3184 /* Longword virtual access with alternate access mode */
3187 /* Quadword virtual access with alternate access mode */
3198 tcg_temp_free(addr
);
3205 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3209 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3213 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3217 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3221 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3225 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3229 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3233 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3237 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3241 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3245 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3249 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3253 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3257 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3261 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3265 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3269 ret
= gen_bdirect(ctx
, ra
, disp21
);
3271 case 0x31: /* FBEQ */
3272 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3274 case 0x32: /* FBLT */
3275 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3277 case 0x33: /* FBLE */
3278 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3282 ret
= gen_bdirect(ctx
, ra
, disp21
);
3284 case 0x35: /* FBNE */
3285 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3287 case 0x36: /* FBGE */
3288 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3290 case 0x37: /* FBGT */
3291 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3295 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3299 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3303 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3307 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3311 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3315 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3319 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3323 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3326 ret
= gen_invalid(ctx
);
3333 static inline void gen_intermediate_code_internal(CPUState
*env
,
3334 TranslationBlock
*tb
,
3337 DisasContext ctx
, *ctxp
= &ctx
;
3338 target_ulong pc_start
;
3340 uint16_t *gen_opc_end
;
3348 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3353 ctx
.mem_idx
= cpu_mmu_index(env
);
3355 /* ??? Every TB begins with unset rounding mode, to be initialized on
3356 the first fp insn of the TB. Alternately we could define a proper
3357 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3358 to reset the FP_STATUS to that default at the end of any TB that
3359 changes the default. We could even (gasp) dynamiclly figure out
3360 what default would be most efficient given the running program. */
3362 /* Similarly for flush-to-zero. */
3366 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3368 max_insns
= CF_COUNT_MASK
;
3372 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3373 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3374 if (bp
->pc
== ctx
.pc
) {
3375 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3381 j
= gen_opc_ptr
- gen_opc_buf
;
3385 gen_opc_instr_start
[lj
++] = 0;
3387 gen_opc_pc
[lj
] = ctx
.pc
;
3388 gen_opc_instr_start
[lj
] = 1;
3389 gen_opc_icount
[lj
] = num_insns
;
3391 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3393 insn
= ldl_code(ctx
.pc
);
3396 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3397 tcg_gen_debug_insn_start(ctx
.pc
);
3401 ret
= translate_one(ctxp
, insn
);
3403 /* If we reach a page boundary, are single stepping,
3404 or exhaust instruction count, stop generation. */
3406 && ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3407 || gen_opc_ptr
>= gen_opc_end
3408 || num_insns
>= max_insns
3410 || env
->singlestep_enabled
)) {
3411 ret
= EXIT_PC_STALE
;
3413 } while (ret
== NO_EXIT
);
3415 if (tb
->cflags
& CF_LAST_IO
) {
3424 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3426 case EXIT_PC_UPDATED
:
3427 if (env
->singlestep_enabled
) {
3428 gen_excp_1(EXCP_DEBUG
, 0);
3437 gen_icount_end(tb
, num_insns
);
3438 *gen_opc_ptr
= INDEX_op_end
;
3440 j
= gen_opc_ptr
- gen_opc_buf
;
3443 gen_opc_instr_start
[lj
++] = 0;
3445 tb
->size
= ctx
.pc
- pc_start
;
3446 tb
->icount
= num_insns
;
3450 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3451 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3452 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3458 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3460 gen_intermediate_code_internal(env
, tb
, 0);
3463 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3465 gen_intermediate_code_internal(env
, tb
, 1);
3473 static const struct cpu_def_t cpu_defs
[] = {
3474 { "ev4", IMPLVER_2106x
, 0 },
3475 { "ev5", IMPLVER_21164
, 0 },
3476 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3477 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3478 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3479 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3480 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3481 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3482 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3483 { "21064", IMPLVER_2106x
, 0 },
3484 { "21164", IMPLVER_21164
, 0 },
3485 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3486 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3487 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3488 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3489 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3492 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3495 int implver
, amask
, i
, max
;
3497 env
= g_malloc0(sizeof(CPUAlphaState
));
3499 alpha_translate_init();
3502 /* Default to ev67; no reason not to emulate insns by default. */
3503 implver
= IMPLVER_21264
;
3504 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3505 | AMASK_TRAP
| AMASK_PREFETCH
);
3507 max
= ARRAY_SIZE(cpu_defs
);
3508 for (i
= 0; i
< max
; i
++) {
3509 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3510 implver
= cpu_defs
[i
].implver
;
3511 amask
= cpu_defs
[i
].amask
;
3515 env
->implver
= implver
;
3518 #if defined (CONFIG_USER_ONLY)
3519 env
->ps
= PS_USER_MODE
;
3520 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3521 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3523 env
->lock_addr
= -1;
3526 qemu_init_vcpu(env
);
3530 void restore_state_to_opc(CPUState
*env
, TranslationBlock
*tb
, int pc_pos
)
3532 env
->pc
= gen_opc_pc
[pc_pos
];