2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
46 struct TranslationBlock
*tb
;
50 #if !defined (CONFIG_USER_ONLY)
55 /* Current rounding mode for this TB. */
57 /* Current flush-to-zero setting for this TB. */
61 /* Return values from translate_one, indicating the state of the TB.
62 Note that zero indicates that we are not exiting the TB. */
67 /* We have emitted one or more goto_tb. No fixup required. */
70 /* We are not using a goto_tb (for whatever reason), but have updated
71 the PC (for whatever reason), so there's no need to do it again on
75 /* We are exiting the TB, but have neither emitted a goto_tb, nor
76 updated the PC for the next instruction to be executed. */
80 /* global register indexes */
81 static TCGv_ptr cpu_env
;
82 static TCGv cpu_ir
[31];
83 static TCGv cpu_fir
[31];
86 #ifdef CONFIG_USER_ONLY
91 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
93 #include "gen-icount.h"
95 static void alpha_translate_init(void)
99 static int done_init
= 0;
104 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
107 for (i
= 0; i
< 31; i
++) {
108 sprintf(p
, "ir%d", i
);
109 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
110 offsetof(CPUState
, ir
[i
]), p
);
111 p
+= (i
< 10) ? 4 : 5;
113 sprintf(p
, "fir%d", i
);
114 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
115 offsetof(CPUState
, fir
[i
]), p
);
116 p
+= (i
< 10) ? 5 : 6;
119 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
120 offsetof(CPUState
, pc
), "pc");
122 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
123 offsetof(CPUState
, lock
), "lock");
125 #ifdef CONFIG_USER_ONLY
126 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
127 offsetof(CPUState
, unique
), "uniq");
130 /* register helpers */
137 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
141 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
142 tmp1
= tcg_const_i32(exception
);
143 tmp2
= tcg_const_i32(error_code
);
144 gen_helper_excp(tmp1
, tmp2
);
145 tcg_temp_free_i32(tmp2
);
146 tcg_temp_free_i32(tmp1
);
149 static inline void gen_invalid(DisasContext
*ctx
)
151 gen_excp(ctx
, EXCP_OPCDEC
, 0);
154 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
156 TCGv tmp
= tcg_temp_new();
157 TCGv_i32 tmp32
= tcg_temp_new_i32();
158 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
159 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
160 gen_helper_memory_to_f(t0
, tmp32
);
161 tcg_temp_free_i32(tmp32
);
165 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
167 TCGv tmp
= tcg_temp_new();
168 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
169 gen_helper_memory_to_g(t0
, tmp
);
173 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
175 TCGv tmp
= tcg_temp_new();
176 TCGv_i32 tmp32
= tcg_temp_new_i32();
177 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
178 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
179 gen_helper_memory_to_s(t0
, tmp32
);
180 tcg_temp_free_i32(tmp32
);
184 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
186 tcg_gen_mov_i64(cpu_lock
, t1
);
187 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
190 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
192 tcg_gen_mov_i64(cpu_lock
, t1
);
193 tcg_gen_qemu_ld64(t0
, t1
, flags
);
196 static inline void gen_load_mem(DisasContext
*ctx
,
197 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
199 int ra
, int rb
, int32_t disp16
, int fp
,
204 if (unlikely(ra
== 31))
207 addr
= tcg_temp_new();
209 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
211 tcg_gen_andi_i64(addr
, addr
, ~0x7);
215 tcg_gen_movi_i64(addr
, disp16
);
218 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
220 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
224 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
226 TCGv_i32 tmp32
= tcg_temp_new_i32();
227 TCGv tmp
= tcg_temp_new();
228 gen_helper_f_to_memory(tmp32
, t0
);
229 tcg_gen_extu_i32_i64(tmp
, tmp32
);
230 tcg_gen_qemu_st32(tmp
, t1
, flags
);
232 tcg_temp_free_i32(tmp32
);
235 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
237 TCGv tmp
= tcg_temp_new();
238 gen_helper_g_to_memory(tmp
, t0
);
239 tcg_gen_qemu_st64(tmp
, t1
, flags
);
243 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
245 TCGv_i32 tmp32
= tcg_temp_new_i32();
246 TCGv tmp
= tcg_temp_new();
247 gen_helper_s_to_memory(tmp32
, t0
);
248 tcg_gen_extu_i32_i64(tmp
, tmp32
);
249 tcg_gen_qemu_st32(tmp
, t1
, flags
);
251 tcg_temp_free_i32(tmp32
);
254 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
258 l1
= gen_new_label();
259 l2
= gen_new_label();
260 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
261 tcg_gen_qemu_st32(t0
, t1
, flags
);
262 tcg_gen_movi_i64(t0
, 1);
265 tcg_gen_movi_i64(t0
, 0);
267 tcg_gen_movi_i64(cpu_lock
, -1);
270 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
274 l1
= gen_new_label();
275 l2
= gen_new_label();
276 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
277 tcg_gen_qemu_st64(t0
, t1
, flags
);
278 tcg_gen_movi_i64(t0
, 1);
281 tcg_gen_movi_i64(t0
, 0);
283 tcg_gen_movi_i64(cpu_lock
, -1);
286 static inline void gen_store_mem(DisasContext
*ctx
,
287 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
289 int ra
, int rb
, int32_t disp16
, int fp
,
290 int clear
, int local
)
294 addr
= tcg_temp_local_new();
296 addr
= tcg_temp_new();
298 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
300 tcg_gen_andi_i64(addr
, addr
, ~0x7);
304 tcg_gen_movi_i64(addr
, disp16
);
308 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
310 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
314 zero
= tcg_const_local_i64(0);
316 zero
= tcg_const_i64(0);
317 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
323 static int use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
325 /* Check for the dest on the same page as the start of the TB. We
326 also want to suppress goto_tb in the case of single-steping and IO. */
327 return (((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0
328 && !ctx
->env
->singlestep_enabled
329 && !(ctx
->tb
->cflags
& CF_LAST_IO
));
332 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
334 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
337 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
340 /* Notice branch-to-next; used to initialize RA with the PC. */
343 } else if (use_goto_tb(ctx
, dest
)) {
345 tcg_gen_movi_i64(cpu_pc
, dest
);
346 tcg_gen_exit_tb((long)ctx
->tb
);
349 tcg_gen_movi_i64(cpu_pc
, dest
);
350 return EXIT_PC_UPDATED
;
354 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
355 TCGv cmp
, int32_t disp
)
357 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
358 int lab_true
= gen_new_label();
360 if (use_goto_tb(ctx
, dest
)) {
361 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
364 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
365 tcg_gen_exit_tb((long)ctx
->tb
);
367 gen_set_label(lab_true
);
369 tcg_gen_movi_i64(cpu_pc
, dest
);
370 tcg_gen_exit_tb((long)ctx
->tb
+ 1);
374 int lab_over
= gen_new_label();
376 /* ??? Consider using either
379 movcond pc, cond, 0, tmp, pc
386 The current diamond subgraph surely isn't efficient. */
388 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
389 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
390 tcg_gen_br(lab_over
);
391 gen_set_label(lab_true
);
392 tcg_gen_movi_i64(cpu_pc
, dest
);
393 gen_set_label(lab_over
);
395 return EXIT_PC_UPDATED
;
399 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
400 int32_t disp
, int mask
)
404 if (unlikely(ra
== 31)) {
405 cmp_tmp
= tcg_const_i64(0);
407 cmp_tmp
= tcg_temp_new();
409 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
411 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
415 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
418 /* Fold -0.0 for comparison with COND. */
420 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
422 uint64_t mzero
= 1ull << 63;
427 /* For <= or >, the -0.0 value directly compares the way we want. */
428 tcg_gen_mov_i64(dest
, src
);
433 /* For == or !=, we can simply mask off the sign bit and compare. */
434 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
439 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
440 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
441 tcg_gen_neg_i64(dest
, dest
);
442 tcg_gen_and_i64(dest
, dest
, src
);
450 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
455 if (unlikely(ra
== 31)) {
456 /* Very uncommon case, but easier to optimize it to an integer
457 comparison than continuing with the floating point comparison. */
458 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
461 cmp_tmp
= tcg_temp_new();
462 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
463 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
466 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
467 int islit
, uint8_t lit
, int mask
)
469 TCGCond inv_cond
= tcg_invert_cond(cond
);
472 if (unlikely(rc
== 31))
475 l1
= gen_new_label();
479 TCGv tmp
= tcg_temp_new();
480 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
481 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
484 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
486 /* Very uncommon case - Do not bother to optimize. */
487 TCGv tmp
= tcg_const_i64(0);
488 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
493 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
495 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
499 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
504 if (unlikely(rc
== 31)) {
508 cmp_tmp
= tcg_temp_new();
509 if (unlikely(ra
== 31)) {
510 tcg_gen_movi_i64(cmp_tmp
, 0);
512 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
515 l1
= gen_new_label();
516 tcg_gen_brcondi_i64(tcg_invert_cond(cond
), cmp_tmp
, 0, l1
);
517 tcg_temp_free(cmp_tmp
);
520 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
522 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
526 #define QUAL_RM_N 0x080 /* Round mode nearest even */
527 #define QUAL_RM_C 0x000 /* Round mode chopped */
528 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
529 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
530 #define QUAL_RM_MASK 0x0c0
532 #define QUAL_U 0x100 /* Underflow enable (fp output) */
533 #define QUAL_V 0x100 /* Overflow enable (int output) */
534 #define QUAL_S 0x400 /* Software completion enable */
535 #define QUAL_I 0x200 /* Inexact detection enable */
537 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
541 fn11
&= QUAL_RM_MASK
;
542 if (fn11
== ctx
->tb_rm
) {
547 tmp
= tcg_temp_new_i32();
550 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
553 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
556 tcg_gen_movi_i32(tmp
, float_round_down
);
559 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
563 #if defined(CONFIG_SOFTFLOAT_INLINE)
564 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
565 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
566 sets the one field. */
567 tcg_gen_st8_i32(tmp
, cpu_env
,
568 offsetof(CPUState
, fp_status
.float_rounding_mode
));
570 gen_helper_setroundmode(tmp
);
573 tcg_temp_free_i32(tmp
);
576 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
581 if (fn11
== ctx
->tb_ftz
) {
586 tmp
= tcg_temp_new_i32();
588 /* Underflow is enabled, use the FPCR setting. */
589 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
591 /* Underflow is disabled, force flush-to-zero. */
592 tcg_gen_movi_i32(tmp
, 1);
595 #if defined(CONFIG_SOFTFLOAT_INLINE)
596 tcg_gen_st8_i32(tmp
, cpu_env
,
597 offsetof(CPUState
, fp_status
.flush_to_zero
));
599 gen_helper_setflushzero(tmp
);
602 tcg_temp_free_i32(tmp
);
605 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
607 TCGv val
= tcg_temp_new();
609 tcg_gen_movi_i64(val
, 0);
610 } else if (fn11
& QUAL_S
) {
611 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
613 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
615 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
620 static void gen_fp_exc_clear(void)
622 #if defined(CONFIG_SOFTFLOAT_INLINE)
623 TCGv_i32 zero
= tcg_const_i32(0);
624 tcg_gen_st8_i32(zero
, cpu_env
,
625 offsetof(CPUState
, fp_status
.float_exception_flags
));
626 tcg_temp_free_i32(zero
);
628 gen_helper_fp_exc_clear();
632 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
634 /* ??? We ought to be able to do something with imprecise exceptions.
635 E.g. notice we're still in the trap shadow of something within the
636 TB and do not generate the code to signal the exception; end the TB
637 when an exception is forced to arrive, either by consumption of a
638 register value or TRAPB or EXCB. */
639 TCGv_i32 exc
= tcg_temp_new_i32();
642 #if defined(CONFIG_SOFTFLOAT_INLINE)
643 tcg_gen_ld8u_i32(exc
, cpu_env
,
644 offsetof(CPUState
, fp_status
.float_exception_flags
));
646 gen_helper_fp_exc_get(exc
);
650 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
653 /* ??? Pass in the regno of the destination so that the helper can
654 set EXC_MASK, which contains a bitmask of destination registers
655 that have caused arithmetic traps. A simple userspace emulation
656 does not require this. We do need it for a guest kernel's entArith,
657 or if we were to do something clever with imprecise exceptions. */
658 reg
= tcg_const_i32(rc
+ 32);
661 gen_helper_fp_exc_raise_s(exc
, reg
);
663 gen_helper_fp_exc_raise(exc
, reg
);
666 tcg_temp_free_i32(reg
);
667 tcg_temp_free_i32(exc
);
670 static inline void gen_fp_exc_raise(int rc
, int fn11
)
672 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
675 static void gen_fcvtlq(int rb
, int rc
)
677 if (unlikely(rc
== 31)) {
680 if (unlikely(rb
== 31)) {
681 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
683 TCGv tmp
= tcg_temp_new();
685 /* The arithmetic right shift here, plus the sign-extended mask below
686 yields a sign-extended result without an explicit ext32s_i64. */
687 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
688 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
689 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
690 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
691 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
697 static void gen_fcvtql(int rb
, int rc
)
699 if (unlikely(rc
== 31)) {
702 if (unlikely(rb
== 31)) {
703 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
705 TCGv tmp
= tcg_temp_new();
707 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
708 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
709 tcg_gen_shli_i64(tmp
, tmp
, 32);
710 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
711 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
717 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
720 int lab
= gen_new_label();
721 TCGv tmp
= tcg_temp_new();
723 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
724 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
725 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
732 #define FARITH2(name) \
733 static inline void glue(gen_f, name)(int rb, int rc) \
735 if (unlikely(rc == 31)) { \
739 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
741 TCGv tmp = tcg_const_i64(0); \
742 gen_helper_ ## name (cpu_fir[rc], tmp); \
743 tcg_temp_free(tmp); \
747 /* ??? VAX instruction qualifiers ignored. */
755 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
756 int rb
, int rc
, int fn11
)
760 /* ??? This is wrong: the instruction is not a nop, it still may
762 if (unlikely(rc
== 31)) {
766 gen_qual_roundmode(ctx
, fn11
);
767 gen_qual_flushzero(ctx
, fn11
);
770 vb
= gen_ieee_input(rb
, fn11
, 0);
771 helper(cpu_fir
[rc
], vb
);
774 gen_fp_exc_raise(rc
, fn11
);
777 #define IEEE_ARITH2(name) \
778 static inline void glue(gen_f, name)(DisasContext *ctx, \
779 int rb, int rc, int fn11) \
781 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
788 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
793 /* ??? This is wrong: the instruction is not a nop, it still may
795 if (unlikely(rc
== 31)) {
799 /* No need to set flushzero, since we have an integer output. */
801 vb
= gen_ieee_input(rb
, fn11
, 0);
803 /* Almost all integer conversions use cropped rounding, and most
804 also do not have integer overflow enabled. Special case that. */
807 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
809 case QUAL_V
| QUAL_RM_C
:
810 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
811 ignore
= float_flag_inexact
;
813 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
814 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
817 gen_qual_roundmode(ctx
, fn11
);
818 gen_helper_cvttq(cpu_fir
[rc
], vb
);
819 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
820 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
825 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
828 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
829 int rb
, int rc
, int fn11
)
833 /* ??? This is wrong: the instruction is not a nop, it still may
835 if (unlikely(rc
== 31)) {
839 gen_qual_roundmode(ctx
, fn11
);
842 vb
= tcg_const_i64(0);
847 /* The only exception that can be raised by integer conversion
848 is inexact. Thus we only need to worry about exceptions when
849 inexact handling is requested. */
852 helper(cpu_fir
[rc
], vb
);
853 gen_fp_exc_raise(rc
, fn11
);
855 helper(cpu_fir
[rc
], vb
);
863 #define IEEE_INTCVT(name) \
864 static inline void glue(gen_f, name)(DisasContext *ctx, \
865 int rb, int rc, int fn11) \
867 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
872 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
877 if (unlikely(rc
== 31)) {
881 vmask
= tcg_const_i64(mask
);
891 va
= tcg_temp_new_i64();
892 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
894 tcg_gen_andc_i64(va
, vmask
, va
);
896 tcg_gen_and_i64(va
, va
, vmask
);
904 vb
= tcg_temp_new_i64();
905 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
908 switch (za
<< 1 | zb
) {
910 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
913 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
916 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
919 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
923 tcg_temp_free(vmask
);
932 static inline void gen_fcpys(int ra
, int rb
, int rc
)
934 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
937 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
939 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
942 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
944 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
947 #define FARITH3(name) \
948 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
952 if (unlikely(rc == 31)) { \
956 va = tcg_const_i64(0); \
961 vb = tcg_const_i64(0); \
966 gen_helper_ ## name (cpu_fir[rc], va, vb); \
976 /* ??? VAX instruction qualifiers ignored. */
989 static void gen_ieee_arith3(DisasContext
*ctx
,
990 void (*helper
)(TCGv
, TCGv
, TCGv
),
991 int ra
, int rb
, int rc
, int fn11
)
995 /* ??? This is wrong: the instruction is not a nop, it still may
997 if (unlikely(rc
== 31)) {
1001 gen_qual_roundmode(ctx
, fn11
);
1002 gen_qual_flushzero(ctx
, fn11
);
1005 va
= gen_ieee_input(ra
, fn11
, 0);
1006 vb
= gen_ieee_input(rb
, fn11
, 0);
1007 helper(cpu_fir
[rc
], va
, vb
);
1011 gen_fp_exc_raise(rc
, fn11
);
1014 #define IEEE_ARITH3(name) \
1015 static inline void glue(gen_f, name)(DisasContext *ctx, \
1016 int ra, int rb, int rc, int fn11) \
1018 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1029 static void gen_ieee_compare(DisasContext
*ctx
,
1030 void (*helper
)(TCGv
, TCGv
, TCGv
),
1031 int ra
, int rb
, int rc
, int fn11
)
1035 /* ??? This is wrong: the instruction is not a nop, it still may
1036 raise exceptions. */
1037 if (unlikely(rc
== 31)) {
1043 va
= gen_ieee_input(ra
, fn11
, 1);
1044 vb
= gen_ieee_input(rb
, fn11
, 1);
1045 helper(cpu_fir
[rc
], va
, vb
);
1049 gen_fp_exc_raise(rc
, fn11
);
1052 #define IEEE_CMP3(name) \
1053 static inline void glue(gen_f, name)(DisasContext *ctx, \
1054 int ra, int rb, int rc, int fn11) \
1056 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1063 static inline uint64_t zapnot_mask(uint8_t lit
)
1068 for (i
= 0; i
< 8; ++i
) {
1070 mask
|= 0xffull
<< (i
* 8);
1075 /* Implement zapnot with an immediate operand, which expands to some
1076 form of immediate AND. This is a basic building block in the
1077 definition of many of the other byte manipulation instructions. */
1078 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1082 tcg_gen_movi_i64(dest
, 0);
1085 tcg_gen_ext8u_i64(dest
, src
);
1088 tcg_gen_ext16u_i64(dest
, src
);
1091 tcg_gen_ext32u_i64(dest
, src
);
1094 tcg_gen_mov_i64(dest
, src
);
1097 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1102 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1104 if (unlikely(rc
== 31))
1106 else if (unlikely(ra
== 31))
1107 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1109 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1111 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1114 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1116 if (unlikely(rc
== 31))
1118 else if (unlikely(ra
== 31))
1119 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1121 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1123 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1127 /* EXTWH, EXTLH, EXTQH */
1128 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1129 uint8_t lit
, uint8_t byte_mask
)
1131 if (unlikely(rc
== 31))
1133 else if (unlikely(ra
== 31))
1134 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1137 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1138 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1140 TCGv tmp1
= tcg_temp_new();
1141 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1142 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1143 tcg_gen_neg_i64(tmp1
, tmp1
);
1144 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1145 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1146 tcg_temp_free(tmp1
);
1148 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1152 /* EXTBL, EXTWL, EXTLL, EXTQL */
1153 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1154 uint8_t lit
, uint8_t byte_mask
)
1156 if (unlikely(rc
== 31))
1158 else if (unlikely(ra
== 31))
1159 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1162 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1164 TCGv tmp
= tcg_temp_new();
1165 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1166 tcg_gen_shli_i64(tmp
, tmp
, 3);
1167 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1170 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1174 /* INSWH, INSLH, INSQH */
1175 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1176 uint8_t lit
, uint8_t byte_mask
)
1178 if (unlikely(rc
== 31))
1180 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1181 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1183 TCGv tmp
= tcg_temp_new();
1185 /* The instruction description has us left-shift the byte mask
1186 and extract bits <15:8> and apply that zap at the end. This
1187 is equivalent to simply performing the zap first and shifting
1189 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1192 /* Note that we have handled the lit==0 case above. */
1193 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1195 TCGv shift
= tcg_temp_new();
1197 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1198 Do this portably by splitting the shift into two parts:
1199 shift_count-1 and 1. Arrange for the -1 by using
1200 ones-complement instead of twos-complement in the negation:
1201 ~((B & 7) * 8) & 63. */
1203 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1204 tcg_gen_shli_i64(shift
, shift
, 3);
1205 tcg_gen_not_i64(shift
, shift
);
1206 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1208 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1209 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1210 tcg_temp_free(shift
);
1216 /* INSBL, INSWL, INSLL, INSQL */
1217 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1218 uint8_t lit
, uint8_t byte_mask
)
1220 if (unlikely(rc
== 31))
1222 else if (unlikely(ra
== 31))
1223 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1225 TCGv tmp
= tcg_temp_new();
1227 /* The instruction description has us left-shift the byte mask
1228 the same number of byte slots as the data and apply the zap
1229 at the end. This is equivalent to simply performing the zap
1230 first and shifting afterward. */
1231 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1234 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1236 TCGv shift
= tcg_temp_new();
1237 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1238 tcg_gen_shli_i64(shift
, shift
, 3);
1239 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1240 tcg_temp_free(shift
);
1246 /* MSKWH, MSKLH, MSKQH */
1247 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1248 uint8_t lit
, uint8_t byte_mask
)
1250 if (unlikely(rc
== 31))
1252 else if (unlikely(ra
== 31))
1253 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1255 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1257 TCGv shift
= tcg_temp_new();
1258 TCGv mask
= tcg_temp_new();
1260 /* The instruction description is as above, where the byte_mask
1261 is shifted left, and then we extract bits <15:8>. This can be
1262 emulated with a right-shift on the expanded byte mask. This
1263 requires extra care because for an input <2:0> == 0 we need a
1264 shift of 64 bits in order to generate a zero. This is done by
1265 splitting the shift into two parts, the variable shift - 1
1266 followed by a constant 1 shift. The code we expand below is
1267 equivalent to ~((B & 7) * 8) & 63. */
1269 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1270 tcg_gen_shli_i64(shift
, shift
, 3);
1271 tcg_gen_not_i64(shift
, shift
);
1272 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1273 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1274 tcg_gen_shr_i64(mask
, mask
, shift
);
1275 tcg_gen_shri_i64(mask
, mask
, 1);
1277 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1279 tcg_temp_free(mask
);
1280 tcg_temp_free(shift
);
1284 /* MSKBL, MSKWL, MSKLL, MSKQL */
1285 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1286 uint8_t lit
, uint8_t byte_mask
)
1288 if (unlikely(rc
== 31))
1290 else if (unlikely(ra
== 31))
1291 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1293 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1295 TCGv shift
= tcg_temp_new();
1296 TCGv mask
= tcg_temp_new();
1298 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1299 tcg_gen_shli_i64(shift
, shift
, 3);
1300 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1301 tcg_gen_shl_i64(mask
, mask
, shift
);
1303 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1305 tcg_temp_free(mask
);
1306 tcg_temp_free(shift
);
1310 /* Code to call arith3 helpers */
1311 #define ARITH3(name) \
1312 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1315 if (unlikely(rc == 31)) \
1320 TCGv tmp = tcg_const_i64(lit); \
1321 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1322 tcg_temp_free(tmp); \
1324 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1326 TCGv tmp1 = tcg_const_i64(0); \
1328 TCGv tmp2 = tcg_const_i64(lit); \
1329 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1330 tcg_temp_free(tmp2); \
1332 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1333 tcg_temp_free(tmp1); \
1354 #define MVIOP2(name) \
1355 static inline void glue(gen_, name)(int rb, int rc) \
1357 if (unlikely(rc == 31)) \
1359 if (unlikely(rb == 31)) \
1360 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1362 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1369 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1370 int islit
, uint8_t lit
)
1374 if (unlikely(rc
== 31)) {
1379 va
= tcg_const_i64(0);
1384 vb
= tcg_const_i64(lit
);
1389 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1399 static void gen_rx(int ra
, int set
)
1404 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUState
, intr_flag
));
1407 tmp
= tcg_const_i32(set
);
1408 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUState
, intr_flag
));
1409 tcg_temp_free_i32(tmp
);
1412 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1415 int32_t disp21
, disp16
, disp12
;
1417 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, fn2
, islit
, real_islit
;
1421 /* Decode all instruction fields */
1423 ra
= (insn
>> 21) & 0x1F;
1424 rb
= (insn
>> 16) & 0x1F;
1426 real_islit
= islit
= (insn
>> 12) & 1;
1427 if (rb
== 31 && !islit
) {
1431 lit
= (insn
>> 13) & 0xFF;
1432 palcode
= insn
& 0x03FFFFFF;
1433 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1434 disp16
= (int16_t)(insn
& 0x0000FFFF);
1435 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1436 fn11
= (insn
>> 5) & 0x000007FF;
1438 fn7
= (insn
>> 5) & 0x0000007F;
1439 fn2
= (insn
>> 5) & 0x00000003;
1440 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1441 opc
, ra
, rb
, rc
, disp16
);
1447 #ifdef CONFIG_USER_ONLY
1448 if (palcode
== 0x9E) {
1450 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
1452 } else if (palcode
== 0x9F) {
1454 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
1458 if (palcode
>= 0x80 && palcode
< 0xC0) {
1459 /* Unprivileged PAL call */
1460 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
1461 /* PC updated by gen_excp. */
1462 ret
= EXIT_PC_UPDATED
;
1465 #ifndef CONFIG_USER_ONLY
1466 if (palcode
< 0x40) {
1467 /* Privileged PAL code */
1468 if (ctx
->mem_idx
& 1)
1470 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
1473 /* Invalid PAL call */
1498 if (likely(ra
!= 31)) {
1500 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1502 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1507 if (likely(ra
!= 31)) {
1509 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1511 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1516 if (!(ctx
->amask
& AMASK_BWX
))
1518 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1522 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1526 if (!(ctx
->amask
& AMASK_BWX
))
1528 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1532 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
1536 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
1540 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
1546 if (likely(rc
!= 31)) {
1549 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1550 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1552 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1553 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1557 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1559 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1565 if (likely(rc
!= 31)) {
1567 TCGv tmp
= tcg_temp_new();
1568 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1570 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1572 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1573 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1577 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1579 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1585 if (likely(rc
!= 31)) {
1588 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1590 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1591 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1594 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1596 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1597 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1603 if (likely(rc
!= 31)) {
1605 TCGv tmp
= tcg_temp_new();
1606 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1608 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1610 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1611 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1615 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1617 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1618 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1625 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1629 if (likely(rc
!= 31)) {
1631 TCGv tmp
= tcg_temp_new();
1632 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1634 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1636 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1637 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1641 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1643 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1649 if (likely(rc
!= 31)) {
1651 TCGv tmp
= tcg_temp_new();
1652 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1654 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1656 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1657 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1661 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1663 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1664 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1671 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1675 if (likely(rc
!= 31)) {
1678 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1680 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1683 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1685 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1691 if (likely(rc
!= 31)) {
1693 TCGv tmp
= tcg_temp_new();
1694 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1696 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1698 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1702 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1704 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1710 if (likely(rc
!= 31)) {
1713 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1715 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1718 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1720 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1726 if (likely(rc
!= 31)) {
1728 TCGv tmp
= tcg_temp_new();
1729 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1731 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1733 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1737 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1739 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1745 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1749 if (likely(rc
!= 31)) {
1751 TCGv tmp
= tcg_temp_new();
1752 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1754 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1756 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1760 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1762 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1768 if (likely(rc
!= 31)) {
1770 TCGv tmp
= tcg_temp_new();
1771 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1773 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1775 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1779 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1781 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1787 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1791 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1795 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1799 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1803 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1807 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1811 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1821 if (likely(rc
!= 31)) {
1823 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1825 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1827 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1832 if (likely(rc
!= 31)) {
1835 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1837 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1839 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1844 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1848 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1852 if (likely(rc
!= 31)) {
1855 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1857 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1860 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1862 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1868 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1872 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1876 if (likely(rc
!= 31)) {
1879 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1881 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1884 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1886 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1892 if (likely(rc
!= 31)) {
1895 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1897 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1900 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1902 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1908 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1912 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1916 if (likely(rc
!= 31)) {
1919 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1921 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1924 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1926 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1932 if (likely(rc
!= 31)) {
1934 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1936 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1937 switch (ctx
->env
->implver
) {
1939 /* EV4, EV45, LCA, LCA45 & EV5 */
1944 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1945 ~(uint64_t)ctx
->amask
);
1952 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1956 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1961 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1971 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1975 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1979 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1983 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1987 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1991 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1995 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1999 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2003 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2007 gen_zap(ra
, rb
, rc
, islit
, lit
);
2011 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2015 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2019 if (likely(rc
!= 31)) {
2022 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2024 TCGv shift
= tcg_temp_new();
2025 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2026 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2027 tcg_temp_free(shift
);
2030 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2035 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2039 if (likely(rc
!= 31)) {
2042 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2044 TCGv shift
= tcg_temp_new();
2045 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2046 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2047 tcg_temp_free(shift
);
2050 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2055 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2059 if (likely(rc
!= 31)) {
2062 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2064 TCGv shift
= tcg_temp_new();
2065 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2066 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2067 tcg_temp_free(shift
);
2070 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2075 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2079 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2083 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2087 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2091 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2095 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2099 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2103 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2107 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2117 if (likely(rc
!= 31)) {
2119 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2122 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2124 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2125 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2131 if (likely(rc
!= 31)) {
2133 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2135 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2137 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2142 gen_umulh(ra
, rb
, rc
, islit
, lit
);
2146 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2150 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2157 switch (fpfn
) { /* fn11 & 0x3F */
2160 if (!(ctx
->amask
& AMASK_FIX
))
2162 if (likely(rc
!= 31)) {
2164 TCGv_i32 tmp
= tcg_temp_new_i32();
2165 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2166 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2167 tcg_temp_free_i32(tmp
);
2169 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2174 if (!(ctx
->amask
& AMASK_FIX
))
2180 if (!(ctx
->amask
& AMASK_FIX
))
2182 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2186 if (!(ctx
->amask
& AMASK_FIX
))
2188 if (likely(rc
!= 31)) {
2190 TCGv_i32 tmp
= tcg_temp_new_i32();
2191 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2192 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2193 tcg_temp_free_i32(tmp
);
2195 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2200 if (!(ctx
->amask
& AMASK_FIX
))
2202 if (likely(rc
!= 31)) {
2204 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2206 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2211 if (!(ctx
->amask
& AMASK_FIX
))
2217 if (!(ctx
->amask
& AMASK_FIX
))
2219 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2226 /* VAX floating point */
2227 /* XXX: rounding mode and trap are ignored (!) */
2228 switch (fpfn
) { /* fn11 & 0x3F */
2231 gen_faddf(ra
, rb
, rc
);
2235 gen_fsubf(ra
, rb
, rc
);
2239 gen_fmulf(ra
, rb
, rc
);
2243 gen_fdivf(ra
, rb
, rc
);
2255 gen_faddg(ra
, rb
, rc
);
2259 gen_fsubg(ra
, rb
, rc
);
2263 gen_fmulg(ra
, rb
, rc
);
2267 gen_fdivg(ra
, rb
, rc
);
2271 gen_fcmpgeq(ra
, rb
, rc
);
2275 gen_fcmpglt(ra
, rb
, rc
);
2279 gen_fcmpgle(ra
, rb
, rc
);
2310 /* IEEE floating-point */
2311 switch (fpfn
) { /* fn11 & 0x3F */
2314 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2318 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2322 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2326 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2330 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2334 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2338 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2342 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2346 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2350 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2354 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2358 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2361 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2363 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2366 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2371 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2375 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2379 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2392 if (likely(rc
!= 31)) {
2396 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2398 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2401 gen_fcpys(ra
, rb
, rc
);
2407 gen_fcpysn(ra
, rb
, rc
);
2411 gen_fcpyse(ra
, rb
, rc
);
2415 if (likely(ra
!= 31))
2416 gen_helper_store_fpcr(cpu_fir
[ra
]);
2418 TCGv tmp
= tcg_const_i64(0);
2419 gen_helper_store_fpcr(tmp
);
2425 if (likely(ra
!= 31))
2426 gen_helper_load_fpcr(cpu_fir
[ra
]);
2430 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2434 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2438 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2442 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2446 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2450 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2460 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2461 /v doesn't do. The only thing I can think is that /sv is a
2462 valid instruction merely for completeness in the ISA. */
2463 gen_fcvtql_v(ctx
, rb
, rc
);
2470 switch ((uint16_t)disp16
) {
2498 gen_helper_load_pcc(cpu_ir
[ra
]);
2520 /* HW_MFPR (PALcode) */
2521 #if defined (CONFIG_USER_ONLY)
2527 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
2528 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
2534 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2535 prediction stack action, which of course we don't implement. */
2537 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2539 tcg_gen_movi_i64(cpu_pc
, 0);
2542 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2544 ret
= EXIT_PC_UPDATED
;
2547 /* HW_LD (PALcode) */
2548 #if defined (CONFIG_USER_ONLY)
2554 TCGv addr
= tcg_temp_new();
2556 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2558 tcg_gen_movi_i64(addr
, disp12
);
2559 switch ((insn
>> 12) & 0xF) {
2561 /* Longword physical access (hw_ldl/p) */
2562 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2565 /* Quadword physical access (hw_ldq/p) */
2566 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2569 /* Longword physical access with lock (hw_ldl_l/p) */
2570 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
2573 /* Quadword physical access with lock (hw_ldq_l/p) */
2574 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
2577 /* Longword virtual PTE fetch (hw_ldl/v) */
2578 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2581 /* Quadword virtual PTE fetch (hw_ldq/v) */
2582 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2585 /* Incpu_ir[ra]id */
2588 /* Incpu_ir[ra]id */
2591 /* Longword virtual access (hw_ldl) */
2592 gen_helper_st_virt_to_phys(addr
, addr
);
2593 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2596 /* Quadword virtual access (hw_ldq) */
2597 gen_helper_st_virt_to_phys(addr
, addr
);
2598 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2601 /* Longword virtual access with protection check (hw_ldl/w) */
2602 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2605 /* Quadword virtual access with protection check (hw_ldq/w) */
2606 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2609 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2610 gen_helper_set_alt_mode();
2611 gen_helper_st_virt_to_phys(addr
, addr
);
2612 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2613 gen_helper_restore_mode();
2616 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2617 gen_helper_set_alt_mode();
2618 gen_helper_st_virt_to_phys(addr
, addr
);
2619 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2620 gen_helper_restore_mode();
2623 /* Longword virtual access with alternate access mode and
2624 * protection checks (hw_ldl/wa)
2626 gen_helper_set_alt_mode();
2627 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2628 gen_helper_restore_mode();
2631 /* Quadword virtual access with alternate access mode and
2632 * protection checks (hw_ldq/wa)
2634 gen_helper_set_alt_mode();
2635 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2636 gen_helper_restore_mode();
2639 tcg_temp_free(addr
);
2647 if (!(ctx
->amask
& AMASK_BWX
))
2649 if (likely(rc
!= 31)) {
2651 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2653 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2658 if (!(ctx
->amask
& AMASK_BWX
))
2660 if (likely(rc
!= 31)) {
2662 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2664 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2669 if (!(ctx
->amask
& AMASK_CIX
))
2671 if (likely(rc
!= 31)) {
2673 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2675 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2680 if (!(ctx
->amask
& AMASK_MVI
))
2682 gen_perr(ra
, rb
, rc
, islit
, lit
);
2686 if (!(ctx
->amask
& AMASK_CIX
))
2688 if (likely(rc
!= 31)) {
2690 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2692 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2697 if (!(ctx
->amask
& AMASK_CIX
))
2699 if (likely(rc
!= 31)) {
2701 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2703 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2708 if (!(ctx
->amask
& AMASK_MVI
))
2710 if (real_islit
|| ra
!= 31)
2712 gen_unpkbw (rb
, rc
);
2716 if (!(ctx
->amask
& AMASK_MVI
))
2718 if (real_islit
|| ra
!= 31)
2720 gen_unpkbl (rb
, rc
);
2724 if (!(ctx
->amask
& AMASK_MVI
))
2726 if (real_islit
|| ra
!= 31)
2732 if (!(ctx
->amask
& AMASK_MVI
))
2734 if (real_islit
|| ra
!= 31)
2740 if (!(ctx
->amask
& AMASK_MVI
))
2742 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2746 if (!(ctx
->amask
& AMASK_MVI
))
2748 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2752 if (!(ctx
->amask
& AMASK_MVI
))
2754 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2758 if (!(ctx
->amask
& AMASK_MVI
))
2760 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2764 if (!(ctx
->amask
& AMASK_MVI
))
2766 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2770 if (!(ctx
->amask
& AMASK_MVI
))
2772 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2776 if (!(ctx
->amask
& AMASK_MVI
))
2778 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2782 if (!(ctx
->amask
& AMASK_MVI
))
2784 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2788 if (!(ctx
->amask
& AMASK_FIX
))
2790 if (likely(rc
!= 31)) {
2792 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2794 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2799 if (!(ctx
->amask
& AMASK_FIX
))
2802 TCGv_i32 tmp1
= tcg_temp_new_i32();
2804 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2806 TCGv tmp2
= tcg_const_i64(0);
2807 gen_helper_s_to_memory(tmp1
, tmp2
);
2808 tcg_temp_free(tmp2
);
2810 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2811 tcg_temp_free_i32(tmp1
);
2819 /* HW_MTPR (PALcode) */
2820 #if defined (CONFIG_USER_ONLY)
2826 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2828 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2830 TCGv tmp2
= tcg_const_i64(0);
2831 gen_helper_mtpr(tmp1
, tmp2
);
2832 tcg_temp_free(tmp2
);
2834 tcg_temp_free(tmp1
);
2835 ret
= EXIT_PC_STALE
;
2840 /* HW_REI (PALcode) */
2841 #if defined (CONFIG_USER_ONLY)
2848 gen_helper_hw_rei();
2853 tmp
= tcg_temp_new();
2854 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2856 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2857 gen_helper_hw_ret(tmp
);
2860 ret
= EXIT_PC_UPDATED
;
2864 /* HW_ST (PALcode) */
2865 #if defined (CONFIG_USER_ONLY)
2872 addr
= tcg_temp_new();
2874 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2876 tcg_gen_movi_i64(addr
, disp12
);
2880 val
= tcg_temp_new();
2881 tcg_gen_movi_i64(val
, 0);
2883 switch ((insn
>> 12) & 0xF) {
2885 /* Longword physical access */
2886 gen_helper_stl_raw(val
, addr
);
2889 /* Quadword physical access */
2890 gen_helper_stq_raw(val
, addr
);
2893 /* Longword physical access with lock */
2894 gen_helper_stl_c_raw(val
, val
, addr
);
2897 /* Quadword physical access with lock */
2898 gen_helper_stq_c_raw(val
, val
, addr
);
2901 /* Longword virtual access */
2902 gen_helper_st_virt_to_phys(addr
, addr
);
2903 gen_helper_stl_raw(val
, addr
);
2906 /* Quadword virtual access */
2907 gen_helper_st_virt_to_phys(addr
, addr
);
2908 gen_helper_stq_raw(val
, addr
);
2929 /* Longword virtual access with alternate access mode */
2930 gen_helper_set_alt_mode();
2931 gen_helper_st_virt_to_phys(addr
, addr
);
2932 gen_helper_stl_raw(val
, addr
);
2933 gen_helper_restore_mode();
2936 /* Quadword virtual access with alternate access mode */
2937 gen_helper_set_alt_mode();
2938 gen_helper_st_virt_to_phys(addr
, addr
);
2939 gen_helper_stl_raw(val
, addr
);
2940 gen_helper_restore_mode();
2951 tcg_temp_free(addr
);
2957 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2961 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2965 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2969 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2973 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2977 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2981 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2985 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2989 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2993 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2997 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3001 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3005 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
3009 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
3013 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
3017 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
3021 ret
= gen_bdirect(ctx
, ra
, disp21
);
3023 case 0x31: /* FBEQ */
3024 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3026 case 0x32: /* FBLT */
3027 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3029 case 0x33: /* FBLE */
3030 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3034 ret
= gen_bdirect(ctx
, ra
, disp21
);
3036 case 0x35: /* FBNE */
3037 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3039 case 0x36: /* FBGE */
3040 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3042 case 0x37: /* FBGT */
3043 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3047 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3051 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3055 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3059 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3063 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3067 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3071 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3075 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3079 /* PC updated by gen_excp. */
3080 ret
= EXIT_PC_UPDATED
;
3087 static inline void gen_intermediate_code_internal(CPUState
*env
,
3088 TranslationBlock
*tb
,
3091 DisasContext ctx
, *ctxp
= &ctx
;
3092 target_ulong pc_start
;
3094 uint16_t *gen_opc_end
;
3102 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3107 ctx
.amask
= env
->amask
;
3108 #if defined (CONFIG_USER_ONLY)
3111 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
3112 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
3115 /* ??? Every TB begins with unset rounding mode, to be initialized on
3116 the first fp insn of the TB. Alternately we could define a proper
3117 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3118 to reset the FP_STATUS to that default at the end of any TB that
3119 changes the default. We could even (gasp) dynamiclly figure out
3120 what default would be most efficient given the running program. */
3122 /* Similarly for flush-to-zero. */
3126 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3128 max_insns
= CF_COUNT_MASK
;
3132 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3133 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3134 if (bp
->pc
== ctx
.pc
) {
3135 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3141 j
= gen_opc_ptr
- gen_opc_buf
;
3145 gen_opc_instr_start
[lj
++] = 0;
3147 gen_opc_pc
[lj
] = ctx
.pc
;
3148 gen_opc_instr_start
[lj
] = 1;
3149 gen_opc_icount
[lj
] = num_insns
;
3151 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3153 insn
= ldl_code(ctx
.pc
);
3156 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3157 tcg_gen_debug_insn_start(ctx
.pc
);
3161 ret
= translate_one(ctxp
, insn
);
3163 if (ret
== NO_EXIT
) {
3164 /* If we reach a page boundary, are single stepping,
3165 or exhaust instruction count, stop generation. */
3166 if (env
->singlestep_enabled
) {
3167 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3168 ret
= EXIT_PC_UPDATED
;
3169 } else if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0
3170 || gen_opc_ptr
>= gen_opc_end
3171 || num_insns
>= max_insns
3173 ret
= EXIT_PC_STALE
;
3176 } while (ret
== NO_EXIT
);
3178 if (tb
->cflags
& CF_LAST_IO
) {
3186 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3188 case EXIT_PC_UPDATED
:
3195 gen_icount_end(tb
, num_insns
);
3196 *gen_opc_ptr
= INDEX_op_end
;
3198 j
= gen_opc_ptr
- gen_opc_buf
;
3201 gen_opc_instr_start
[lj
++] = 0;
3203 tb
->size
= ctx
.pc
- pc_start
;
3204 tb
->icount
= num_insns
;
3208 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3209 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3210 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3216 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3218 gen_intermediate_code_internal(env
, tb
, 0);
3221 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3223 gen_intermediate_code_internal(env
, tb
, 1);
3231 static const struct cpu_def_t cpu_defs
[] = {
3232 { "ev4", IMPLVER_2106x
, 0 },
3233 { "ev5", IMPLVER_21164
, 0 },
3234 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3235 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3236 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3237 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3238 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3239 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3240 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3241 { "21064", IMPLVER_2106x
, 0 },
3242 { "21164", IMPLVER_21164
, 0 },
3243 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3244 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3245 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3246 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3247 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3250 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3253 int implver
, amask
, i
, max
;
3255 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3257 alpha_translate_init();
3260 /* Default to ev67; no reason not to emulate insns by default. */
3261 implver
= IMPLVER_21264
;
3262 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3263 | AMASK_TRAP
| AMASK_PREFETCH
);
3265 max
= ARRAY_SIZE(cpu_defs
);
3266 for (i
= 0; i
< max
; i
++) {
3267 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3268 implver
= cpu_defs
[i
].implver
;
3269 amask
= cpu_defs
[i
].amask
;
3273 env
->implver
= implver
;
3277 #if defined (CONFIG_USER_ONLY)
3279 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3280 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3285 /* Initialize IPR */
3286 #if defined (CONFIG_USER_ONLY)
3287 env
->ipr
[IPR_EXC_ADDR
] = 0;
3288 env
->ipr
[IPR_EXC_SUM
] = 0;
3289 env
->ipr
[IPR_EXC_MASK
] = 0;
3293 // hwpcb = env->ipr[IPR_PCBB];
3294 env
->ipr
[IPR_ASN
] = 0;
3295 env
->ipr
[IPR_ASTEN
] = 0;
3296 env
->ipr
[IPR_ASTSR
] = 0;
3297 env
->ipr
[IPR_DATFX
] = 0;
3299 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3300 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3301 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3302 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3303 env
->ipr
[IPR_FEN
] = 0;
3304 env
->ipr
[IPR_IPL
] = 31;
3305 env
->ipr
[IPR_MCES
] = 0;
3306 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
3307 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3308 env
->ipr
[IPR_SISR
] = 0;
3309 env
->ipr
[IPR_VIRBND
] = -1ULL;
3313 qemu_init_vcpu(env
);
3317 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
3318 unsigned long searched_pc
, int pc_pos
, void *puc
)
3320 env
->pc
= gen_opc_pc
[pc_pos
];