2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "host-utils.h"
29 #include "qemu-common.h"
35 #undef ALPHA_DEBUG_DISAS
36 #define CONFIG_SOFTFLOAT_INLINE
38 #ifdef ALPHA_DEBUG_DISAS
39 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
41 # define LOG_DISAS(...) do { } while (0)
44 typedef struct DisasContext DisasContext
;
48 #if !defined (CONFIG_USER_ONLY)
54 /* Current rounding mode for this TB. */
56 /* Current flush-to-zero setting for this TB. */
60 /* global register indexes */
61 static TCGv_ptr cpu_env
;
62 static TCGv cpu_ir
[31];
63 static TCGv cpu_fir
[31];
66 #ifdef CONFIG_USER_ONLY
71 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
73 #include "gen-icount.h"
75 static void alpha_translate_init(void)
79 static int done_init
= 0;
84 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
87 for (i
= 0; i
< 31; i
++) {
88 sprintf(p
, "ir%d", i
);
89 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
90 offsetof(CPUState
, ir
[i
]), p
);
91 p
+= (i
< 10) ? 4 : 5;
93 sprintf(p
, "fir%d", i
);
94 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
95 offsetof(CPUState
, fir
[i
]), p
);
96 p
+= (i
< 10) ? 5 : 6;
99 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
100 offsetof(CPUState
, pc
), "pc");
102 cpu_lock
= tcg_global_mem_new_i64(TCG_AREG0
,
103 offsetof(CPUState
, lock
), "lock");
105 #ifdef CONFIG_USER_ONLY
106 cpu_uniq
= tcg_global_mem_new_i64(TCG_AREG0
,
107 offsetof(CPUState
, unique
), "uniq");
110 /* register helpers */
117 static inline void gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
121 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
122 tmp1
= tcg_const_i32(exception
);
123 tmp2
= tcg_const_i32(error_code
);
124 gen_helper_excp(tmp1
, tmp2
);
125 tcg_temp_free_i32(tmp2
);
126 tcg_temp_free_i32(tmp1
);
129 static inline void gen_invalid(DisasContext
*ctx
)
131 gen_excp(ctx
, EXCP_OPCDEC
, 0);
134 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
136 TCGv tmp
= tcg_temp_new();
137 TCGv_i32 tmp32
= tcg_temp_new_i32();
138 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
139 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
140 gen_helper_memory_to_f(t0
, tmp32
);
141 tcg_temp_free_i32(tmp32
);
145 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
147 TCGv tmp
= tcg_temp_new();
148 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
149 gen_helper_memory_to_g(t0
, tmp
);
153 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
155 TCGv tmp
= tcg_temp_new();
156 TCGv_i32 tmp32
= tcg_temp_new_i32();
157 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
158 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
159 gen_helper_memory_to_s(t0
, tmp32
);
160 tcg_temp_free_i32(tmp32
);
164 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
166 tcg_gen_mov_i64(cpu_lock
, t1
);
167 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
170 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
172 tcg_gen_mov_i64(cpu_lock
, t1
);
173 tcg_gen_qemu_ld64(t0
, t1
, flags
);
176 static inline void gen_load_mem(DisasContext
*ctx
,
177 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
179 int ra
, int rb
, int32_t disp16
, int fp
,
184 if (unlikely(ra
== 31))
187 addr
= tcg_temp_new();
189 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
191 tcg_gen_andi_i64(addr
, addr
, ~0x7);
195 tcg_gen_movi_i64(addr
, disp16
);
198 tcg_gen_qemu_load(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
200 tcg_gen_qemu_load(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
204 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
206 TCGv_i32 tmp32
= tcg_temp_new_i32();
207 TCGv tmp
= tcg_temp_new();
208 gen_helper_f_to_memory(tmp32
, t0
);
209 tcg_gen_extu_i32_i64(tmp
, tmp32
);
210 tcg_gen_qemu_st32(tmp
, t1
, flags
);
212 tcg_temp_free_i32(tmp32
);
215 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
217 TCGv tmp
= tcg_temp_new();
218 gen_helper_g_to_memory(tmp
, t0
);
219 tcg_gen_qemu_st64(tmp
, t1
, flags
);
223 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
225 TCGv_i32 tmp32
= tcg_temp_new_i32();
226 TCGv tmp
= tcg_temp_new();
227 gen_helper_s_to_memory(tmp32
, t0
);
228 tcg_gen_extu_i32_i64(tmp
, tmp32
);
229 tcg_gen_qemu_st32(tmp
, t1
, flags
);
231 tcg_temp_free_i32(tmp32
);
234 static inline void gen_qemu_stl_c(TCGv t0
, TCGv t1
, int flags
)
238 l1
= gen_new_label();
239 l2
= gen_new_label();
240 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
241 tcg_gen_qemu_st32(t0
, t1
, flags
);
242 tcg_gen_movi_i64(t0
, 1);
245 tcg_gen_movi_i64(t0
, 0);
247 tcg_gen_movi_i64(cpu_lock
, -1);
250 static inline void gen_qemu_stq_c(TCGv t0
, TCGv t1
, int flags
)
254 l1
= gen_new_label();
255 l2
= gen_new_label();
256 tcg_gen_brcond_i64(TCG_COND_NE
, cpu_lock
, t1
, l1
);
257 tcg_gen_qemu_st64(t0
, t1
, flags
);
258 tcg_gen_movi_i64(t0
, 1);
261 tcg_gen_movi_i64(t0
, 0);
263 tcg_gen_movi_i64(cpu_lock
, -1);
266 static inline void gen_store_mem(DisasContext
*ctx
,
267 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
269 int ra
, int rb
, int32_t disp16
, int fp
,
270 int clear
, int local
)
274 addr
= tcg_temp_local_new();
276 addr
= tcg_temp_new();
278 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
280 tcg_gen_andi_i64(addr
, addr
, ~0x7);
284 tcg_gen_movi_i64(addr
, disp16
);
288 tcg_gen_qemu_store(cpu_fir
[ra
], addr
, ctx
->mem_idx
);
290 tcg_gen_qemu_store(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
294 zero
= tcg_const_local_i64(0);
296 zero
= tcg_const_i64(0);
297 tcg_gen_qemu_store(zero
, addr
, ctx
->mem_idx
);
303 static void gen_bcond_pcload(DisasContext
*ctx
, int32_t disp
, int lab_true
)
305 int lab_over
= gen_new_label();
307 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
308 tcg_gen_br(lab_over
);
309 gen_set_label(lab_true
);
310 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp
<< 2));
311 gen_set_label(lab_over
);
314 static void gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
315 int32_t disp
, int mask
)
317 int lab_true
= gen_new_label();
319 if (likely(ra
!= 31)) {
321 TCGv tmp
= tcg_temp_new();
322 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
323 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
326 tcg_gen_brcondi_i64(cond
, cpu_ir
[ra
], 0, lab_true
);
329 /* Very uncommon case - Do not bother to optimize. */
330 TCGv tmp
= tcg_const_i64(0);
331 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
334 gen_bcond_pcload(ctx
, disp
, lab_true
);
337 /* Generate a forward TCG branch to LAB_TRUE if RA cmp 0.0.
338 This is complicated by the fact that -0.0 compares the same as +0.0. */
340 static void gen_fbcond_internal(TCGCond cond
, TCGv src
, int lab_true
)
343 uint64_t mzero
= 1ull << 63;
349 /* For <= or >, the -0.0 value directly compares the way we want. */
350 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
355 /* For == or !=, we can simply mask off the sign bit and compare. */
356 /* ??? Assume that the temporary is reclaimed at the branch. */
357 tmp
= tcg_temp_new();
358 tcg_gen_andi_i64(tmp
, src
, mzero
- 1);
359 tcg_gen_brcondi_i64(cond
, tmp
, 0, lab_true
);
363 /* For >=, emit two branches to the destination. */
364 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
365 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_true
);
369 /* For <, first filter out -0.0 to what will be the fallthru. */
370 lab_false
= gen_new_label();
371 tcg_gen_brcondi_i64(TCG_COND_EQ
, src
, mzero
, lab_false
);
372 tcg_gen_brcondi_i64(cond
, src
, 0, lab_true
);
373 gen_set_label(lab_false
);
381 static void gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
, int32_t disp
)
385 if (unlikely(ra
== 31)) {
386 /* Very uncommon case, but easier to optimize it to an integer
387 comparison than continuing with the floating point comparison. */
388 gen_bcond(ctx
, cond
, ra
, disp
, 0);
392 lab_true
= gen_new_label();
393 gen_fbcond_internal(cond
, cpu_fir
[ra
], lab_true
);
394 gen_bcond_pcload(ctx
, disp
, lab_true
);
397 static inline void gen_cmov(TCGCond inv_cond
, int ra
, int rb
, int rc
,
398 int islit
, uint8_t lit
, int mask
)
402 if (unlikely(rc
== 31))
405 l1
= gen_new_label();
409 TCGv tmp
= tcg_temp_new();
410 tcg_gen_andi_i64(tmp
, cpu_ir
[ra
], 1);
411 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
414 tcg_gen_brcondi_i64(inv_cond
, cpu_ir
[ra
], 0, l1
);
416 /* Very uncommon case - Do not bother to optimize. */
417 TCGv tmp
= tcg_const_i64(0);
418 tcg_gen_brcondi_i64(inv_cond
, tmp
, 0, l1
);
423 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
425 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
429 static void gen_fcmov(TCGCond inv_cond
, int ra
, int rb
, int rc
)
431 TCGv va
= cpu_fir
[ra
];
434 if (unlikely(rc
== 31))
436 if (unlikely(ra
== 31)) {
437 /* ??? Assume that the temporary is reclaimed at the branch. */
438 va
= tcg_const_i64(0);
441 l1
= gen_new_label();
442 gen_fbcond_internal(inv_cond
, va
, l1
);
445 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[rb
]);
447 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
451 #define QUAL_RM_N 0x080 /* Round mode nearest even */
452 #define QUAL_RM_C 0x000 /* Round mode chopped */
453 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
454 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
455 #define QUAL_RM_MASK 0x0c0
457 #define QUAL_U 0x100 /* Underflow enable (fp output) */
458 #define QUAL_V 0x100 /* Overflow enable (int output) */
459 #define QUAL_S 0x400 /* Software completion enable */
460 #define QUAL_I 0x200 /* Inexact detection enable */
462 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
466 fn11
&= QUAL_RM_MASK
;
467 if (fn11
== ctx
->tb_rm
) {
472 tmp
= tcg_temp_new_i32();
475 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
478 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
481 tcg_gen_movi_i32(tmp
, float_round_down
);
484 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_dyn_round
));
488 #if defined(CONFIG_SOFTFLOAT_INLINE)
489 /* ??? The "softfloat.h" interface is to call set_float_rounding_mode.
490 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
491 sets the one field. */
492 tcg_gen_st8_i32(tmp
, cpu_env
,
493 offsetof(CPUState
, fp_status
.float_rounding_mode
));
495 gen_helper_setroundmode(tmp
);
498 tcg_temp_free_i32(tmp
);
501 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
506 if (fn11
== ctx
->tb_ftz
) {
511 tmp
= tcg_temp_new_i32();
513 /* Underflow is enabled, use the FPCR setting. */
514 tcg_gen_ld8u_i32(tmp
, cpu_env
, offsetof(CPUState
, fpcr_flush_to_zero
));
516 /* Underflow is disabled, force flush-to-zero. */
517 tcg_gen_movi_i32(tmp
, 1);
520 #if defined(CONFIG_SOFTFLOAT_INLINE)
521 tcg_gen_st8_i32(tmp
, cpu_env
,
522 offsetof(CPUState
, fp_status
.flush_to_zero
));
524 gen_helper_setflushzero(tmp
);
527 tcg_temp_free_i32(tmp
);
530 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
532 TCGv val
= tcg_temp_new();
534 tcg_gen_movi_i64(val
, 0);
535 } else if (fn11
& QUAL_S
) {
536 gen_helper_ieee_input_s(val
, cpu_fir
[reg
]);
538 gen_helper_ieee_input_cmp(val
, cpu_fir
[reg
]);
540 gen_helper_ieee_input(val
, cpu_fir
[reg
]);
545 static void gen_fp_exc_clear(void)
547 #if defined(CONFIG_SOFTFLOAT_INLINE)
548 TCGv_i32 zero
= tcg_const_i32(0);
549 tcg_gen_st8_i32(zero
, cpu_env
,
550 offsetof(CPUState
, fp_status
.float_exception_flags
));
551 tcg_temp_free_i32(zero
);
553 gen_helper_fp_exc_clear();
557 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
559 /* ??? We ought to be able to do something with imprecise exceptions.
560 E.g. notice we're still in the trap shadow of something within the
561 TB and do not generate the code to signal the exception; end the TB
562 when an exception is forced to arrive, either by consumption of a
563 register value or TRAPB or EXCB. */
564 TCGv_i32 exc
= tcg_temp_new_i32();
567 #if defined(CONFIG_SOFTFLOAT_INLINE)
568 tcg_gen_ld8u_i32(exc
, cpu_env
,
569 offsetof(CPUState
, fp_status
.float_exception_flags
));
571 gen_helper_fp_exc_get(exc
);
575 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
578 /* ??? Pass in the regno of the destination so that the helper can
579 set EXC_MASK, which contains a bitmask of destination registers
580 that have caused arithmetic traps. A simple userspace emulation
581 does not require this. We do need it for a guest kernel's entArith,
582 or if we were to do something clever with imprecise exceptions. */
583 reg
= tcg_const_i32(rc
+ 32);
586 gen_helper_fp_exc_raise_s(exc
, reg
);
588 gen_helper_fp_exc_raise(exc
, reg
);
591 tcg_temp_free_i32(reg
);
592 tcg_temp_free_i32(exc
);
595 static inline void gen_fp_exc_raise(int rc
, int fn11
)
597 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
600 static void gen_fcvtql(int rb
, int rc
)
602 if (unlikely(rc
== 31)) {
605 if (unlikely(rb
== 31)) {
606 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
608 TCGv tmp
= tcg_temp_new();
610 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
611 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
612 tcg_gen_shli_i64(tmp
, tmp
, 32);
613 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
614 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
620 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
623 int lab
= gen_new_label();
624 TCGv tmp
= tcg_temp_new();
626 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
627 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
628 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
635 #define FARITH2(name) \
636 static inline void glue(gen_f, name)(int rb, int rc) \
638 if (unlikely(rc == 31)) { \
642 gen_helper_ ## name (cpu_fir[rc], cpu_fir[rb]); \
644 TCGv tmp = tcg_const_i64(0); \
645 gen_helper_ ## name (cpu_fir[rc], tmp); \
646 tcg_temp_free(tmp); \
651 /* ??? VAX instruction qualifiers ignored. */
659 static void gen_ieee_arith2(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
660 int rb
, int rc
, int fn11
)
664 /* ??? This is wrong: the instruction is not a nop, it still may
666 if (unlikely(rc
== 31)) {
670 gen_qual_roundmode(ctx
, fn11
);
671 gen_qual_flushzero(ctx
, fn11
);
674 vb
= gen_ieee_input(rb
, fn11
, 0);
675 helper(cpu_fir
[rc
], vb
);
678 gen_fp_exc_raise(rc
, fn11
);
681 #define IEEE_ARITH2(name) \
682 static inline void glue(gen_f, name)(DisasContext *ctx, \
683 int rb, int rc, int fn11) \
685 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
692 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
697 /* ??? This is wrong: the instruction is not a nop, it still may
699 if (unlikely(rc
== 31)) {
703 /* No need to set flushzero, since we have an integer output. */
705 vb
= gen_ieee_input(rb
, fn11
, 0);
707 /* Almost all integer conversions use cropped rounding, and most
708 also do not have integer overflow enabled. Special case that. */
711 gen_helper_cvttq_c(cpu_fir
[rc
], vb
);
713 case QUAL_V
| QUAL_RM_C
:
714 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
715 ignore
= float_flag_inexact
;
717 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
718 gen_helper_cvttq_svic(cpu_fir
[rc
], vb
);
721 gen_qual_roundmode(ctx
, fn11
);
722 gen_helper_cvttq(cpu_fir
[rc
], vb
);
723 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
724 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
729 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
732 static void gen_ieee_intcvt(DisasContext
*ctx
, void (*helper
)(TCGv
, TCGv
),
733 int rb
, int rc
, int fn11
)
737 /* ??? This is wrong: the instruction is not a nop, it still may
739 if (unlikely(rc
== 31)) {
743 gen_qual_roundmode(ctx
, fn11
);
746 vb
= tcg_const_i64(0);
751 /* The only exception that can be raised by integer conversion
752 is inexact. Thus we only need to worry about exceptions when
753 inexact handling is requested. */
756 helper(cpu_fir
[rc
], vb
);
757 gen_fp_exc_raise(rc
, fn11
);
759 helper(cpu_fir
[rc
], vb
);
767 #define IEEE_INTCVT(name) \
768 static inline void glue(gen_f, name)(DisasContext *ctx, \
769 int rb, int rc, int fn11) \
771 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
776 #define FARITH3(name) \
777 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
781 if (unlikely(rc == 31)) { \
785 va = tcg_const_i64(0); \
790 vb = tcg_const_i64(0); \
795 gen_helper_ ## name (cpu_fir[rc], va, vb); \
804 /* ??? Ought to expand these inline; simple masking operations. */
809 /* ??? VAX instruction qualifiers ignored. */
822 static void gen_ieee_arith3(DisasContext
*ctx
,
823 void (*helper
)(TCGv
, TCGv
, TCGv
),
824 int ra
, int rb
, int rc
, int fn11
)
828 /* ??? This is wrong: the instruction is not a nop, it still may
830 if (unlikely(rc
== 31)) {
834 gen_qual_roundmode(ctx
, fn11
);
835 gen_qual_flushzero(ctx
, fn11
);
838 va
= gen_ieee_input(ra
, fn11
, 0);
839 vb
= gen_ieee_input(rb
, fn11
, 0);
840 helper(cpu_fir
[rc
], va
, vb
);
844 gen_fp_exc_raise(rc
, fn11
);
847 #define IEEE_ARITH3(name) \
848 static inline void glue(gen_f, name)(DisasContext *ctx, \
849 int ra, int rb, int rc, int fn11) \
851 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
862 static void gen_ieee_compare(DisasContext
*ctx
,
863 void (*helper
)(TCGv
, TCGv
, TCGv
),
864 int ra
, int rb
, int rc
, int fn11
)
868 /* ??? This is wrong: the instruction is not a nop, it still may
870 if (unlikely(rc
== 31)) {
876 va
= gen_ieee_input(ra
, fn11
, 1);
877 vb
= gen_ieee_input(rb
, fn11
, 1);
878 helper(cpu_fir
[rc
], va
, vb
);
882 gen_fp_exc_raise(rc
, fn11
);
885 #define IEEE_CMP3(name) \
886 static inline void glue(gen_f, name)(DisasContext *ctx, \
887 int ra, int rb, int rc, int fn11) \
889 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
896 static inline uint64_t zapnot_mask(uint8_t lit
)
901 for (i
= 0; i
< 8; ++i
) {
903 mask
|= 0xffull
<< (i
* 8);
908 /* Implement zapnot with an immediate operand, which expands to some
909 form of immediate AND. This is a basic building block in the
910 definition of many of the other byte manipulation instructions. */
911 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
915 tcg_gen_movi_i64(dest
, 0);
918 tcg_gen_ext8u_i64(dest
, src
);
921 tcg_gen_ext16u_i64(dest
, src
);
924 tcg_gen_ext32u_i64(dest
, src
);
927 tcg_gen_mov_i64(dest
, src
);
930 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
935 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
937 if (unlikely(rc
== 31))
939 else if (unlikely(ra
== 31))
940 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
942 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
944 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
947 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
949 if (unlikely(rc
== 31))
951 else if (unlikely(ra
== 31))
952 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
954 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
956 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
960 /* EXTWH, EXTLH, EXTQH */
961 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
962 uint8_t lit
, uint8_t byte_mask
)
964 if (unlikely(rc
== 31))
966 else if (unlikely(ra
== 31))
967 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
970 lit
= (64 - (lit
& 7) * 8) & 0x3f;
971 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
973 TCGv tmp1
= tcg_temp_new();
974 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
975 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
976 tcg_gen_neg_i64(tmp1
, tmp1
);
977 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
978 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
981 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
985 /* EXTBL, EXTWL, EXTLL, EXTQL */
986 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
987 uint8_t lit
, uint8_t byte_mask
)
989 if (unlikely(rc
== 31))
991 else if (unlikely(ra
== 31))
992 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
995 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
997 TCGv tmp
= tcg_temp_new();
998 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
999 tcg_gen_shli_i64(tmp
, tmp
, 3);
1000 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1003 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1007 /* INSWH, INSLH, INSQH */
1008 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1009 uint8_t lit
, uint8_t byte_mask
)
1011 if (unlikely(rc
== 31))
1013 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1014 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1016 TCGv tmp
= tcg_temp_new();
1018 /* The instruction description has us left-shift the byte mask
1019 and extract bits <15:8> and apply that zap at the end. This
1020 is equivalent to simply performing the zap first and shifting
1022 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1025 /* Note that we have handled the lit==0 case above. */
1026 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1028 TCGv shift
= tcg_temp_new();
1030 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1031 Do this portably by splitting the shift into two parts:
1032 shift_count-1 and 1. Arrange for the -1 by using
1033 ones-complement instead of twos-complement in the negation:
1034 ~((B & 7) * 8) & 63. */
1036 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1037 tcg_gen_shli_i64(shift
, shift
, 3);
1038 tcg_gen_not_i64(shift
, shift
);
1039 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1041 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1042 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1043 tcg_temp_free(shift
);
1049 /* INSBL, INSWL, INSLL, INSQL */
1050 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1051 uint8_t lit
, uint8_t byte_mask
)
1053 if (unlikely(rc
== 31))
1055 else if (unlikely(ra
== 31))
1056 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1058 TCGv tmp
= tcg_temp_new();
1060 /* The instruction description has us left-shift the byte mask
1061 the same number of byte slots as the data and apply the zap
1062 at the end. This is equivalent to simply performing the zap
1063 first and shifting afterward. */
1064 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1067 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1069 TCGv shift
= tcg_temp_new();
1070 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1071 tcg_gen_shli_i64(shift
, shift
, 3);
1072 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1073 tcg_temp_free(shift
);
1079 /* MSKWH, MSKLH, MSKQH */
1080 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1081 uint8_t lit
, uint8_t byte_mask
)
1083 if (unlikely(rc
== 31))
1085 else if (unlikely(ra
== 31))
1086 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1088 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1090 TCGv shift
= tcg_temp_new();
1091 TCGv mask
= tcg_temp_new();
1093 /* The instruction description is as above, where the byte_mask
1094 is shifted left, and then we extract bits <15:8>. This can be
1095 emulated with a right-shift on the expanded byte mask. This
1096 requires extra care because for an input <2:0> == 0 we need a
1097 shift of 64 bits in order to generate a zero. This is done by
1098 splitting the shift into two parts, the variable shift - 1
1099 followed by a constant 1 shift. The code we expand below is
1100 equivalent to ~((B & 7) * 8) & 63. */
1102 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1103 tcg_gen_shli_i64(shift
, shift
, 3);
1104 tcg_gen_not_i64(shift
, shift
);
1105 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1106 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1107 tcg_gen_shr_i64(mask
, mask
, shift
);
1108 tcg_gen_shri_i64(mask
, mask
, 1);
1110 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1112 tcg_temp_free(mask
);
1113 tcg_temp_free(shift
);
1117 /* MSKBL, MSKWL, MSKLL, MSKQL */
1118 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1119 uint8_t lit
, uint8_t byte_mask
)
1121 if (unlikely(rc
== 31))
1123 else if (unlikely(ra
== 31))
1124 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1126 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1128 TCGv shift
= tcg_temp_new();
1129 TCGv mask
= tcg_temp_new();
1131 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1132 tcg_gen_shli_i64(shift
, shift
, 3);
1133 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1134 tcg_gen_shl_i64(mask
, mask
, shift
);
1136 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1138 tcg_temp_free(mask
);
1139 tcg_temp_free(shift
);
1143 /* Code to call arith3 helpers */
1144 #define ARITH3(name) \
1145 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1148 if (unlikely(rc == 31)) \
1153 TCGv tmp = tcg_const_i64(lit); \
1154 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1155 tcg_temp_free(tmp); \
1157 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1159 TCGv tmp1 = tcg_const_i64(0); \
1161 TCGv tmp2 = tcg_const_i64(lit); \
1162 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1163 tcg_temp_free(tmp2); \
1165 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1166 tcg_temp_free(tmp1); \
1187 #define MVIOP2(name) \
1188 static inline void glue(gen_, name)(int rb, int rc) \
1190 if (unlikely(rc == 31)) \
1192 if (unlikely(rb == 31)) \
1193 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1195 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1202 static inline void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
, int islit
,
1208 if (unlikely(rc
== 31))
1211 l1
= gen_new_label();
1212 l2
= gen_new_label();
1215 tmp
= tcg_temp_new();
1216 tcg_gen_mov_i64(tmp
, cpu_ir
[ra
]);
1218 tmp
= tcg_const_i64(0);
1220 tcg_gen_brcondi_i64(cond
, tmp
, lit
, l1
);
1222 tcg_gen_brcond_i64(cond
, tmp
, cpu_ir
[rb
], l1
);
1224 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1227 tcg_gen_movi_i64(cpu_ir
[rc
], 1);
1231 static inline int translate_one(DisasContext
*ctx
, uint32_t insn
)
1234 int32_t disp21
, disp16
, disp12
;
1235 uint16_t fn11
, fn16
;
1236 uint8_t opc
, ra
, rb
, rc
, sbz
, fpfn
, fn7
, fn2
, islit
, real_islit
;
1240 /* Decode all instruction fields */
1242 ra
= (insn
>> 21) & 0x1F;
1243 rb
= (insn
>> 16) & 0x1F;
1245 sbz
= (insn
>> 13) & 0x07;
1246 real_islit
= islit
= (insn
>> 12) & 1;
1247 if (rb
== 31 && !islit
) {
1251 lit
= (insn
>> 13) & 0xFF;
1252 palcode
= insn
& 0x03FFFFFF;
1253 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1254 disp16
= (int16_t)(insn
& 0x0000FFFF);
1255 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1256 fn16
= insn
& 0x0000FFFF;
1257 fn11
= (insn
>> 5) & 0x000007FF;
1259 fn7
= (insn
>> 5) & 0x0000007F;
1260 fn2
= (insn
>> 5) & 0x00000003;
1262 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1263 opc
, ra
, rb
, rc
, disp16
);
1268 #ifdef CONFIG_USER_ONLY
1269 if (palcode
== 0x9E) {
1271 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_uniq
);
1273 } else if (palcode
== 0x9F) {
1275 tcg_gen_mov_i64(cpu_uniq
, cpu_ir
[IR_A0
]);
1279 if (palcode
>= 0x80 && palcode
< 0xC0) {
1280 /* Unprivileged PAL call */
1281 gen_excp(ctx
, EXCP_CALL_PAL
+ ((palcode
& 0x3F) << 6), 0);
1285 #ifndef CONFIG_USER_ONLY
1286 if (palcode
< 0x40) {
1287 /* Privileged PAL code */
1288 if (ctx
->mem_idx
& 1)
1290 gen_excp(ctx
, EXCP_CALL_PALP
+ ((palcode
& 0x3F) << 6), 0);
1294 /* Invalid PAL call */
1319 if (likely(ra
!= 31)) {
1321 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1323 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1328 if (likely(ra
!= 31)) {
1330 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1332 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1337 if (!(ctx
->amask
& AMASK_BWX
))
1339 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1343 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1347 if (!(ctx
->amask
& AMASK_BWX
))
1349 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1353 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0, 0);
1357 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0, 0);
1361 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1, 0);
1367 if (likely(rc
!= 31)) {
1370 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1371 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1373 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1374 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1378 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1380 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1386 if (likely(rc
!= 31)) {
1388 TCGv tmp
= tcg_temp_new();
1389 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1391 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1393 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1394 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1398 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1400 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1406 if (likely(rc
!= 31)) {
1409 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1411 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1412 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1415 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1417 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1418 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1424 if (likely(rc
!= 31)) {
1426 TCGv tmp
= tcg_temp_new();
1427 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1429 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1431 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1432 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1436 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1438 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1439 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1446 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1450 if (likely(rc
!= 31)) {
1452 TCGv tmp
= tcg_temp_new();
1453 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1455 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1457 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1458 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1462 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1464 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1470 if (likely(rc
!= 31)) {
1472 TCGv tmp
= tcg_temp_new();
1473 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1475 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1477 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1478 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1482 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1484 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1485 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1492 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
1496 if (likely(rc
!= 31)) {
1499 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1501 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1504 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1506 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1512 if (likely(rc
!= 31)) {
1514 TCGv tmp
= tcg_temp_new();
1515 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1517 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1519 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1523 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1525 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1531 if (likely(rc
!= 31)) {
1534 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1536 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1539 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1541 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1547 if (likely(rc
!= 31)) {
1549 TCGv tmp
= tcg_temp_new();
1550 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1552 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1554 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1558 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1560 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1566 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
1570 if (likely(rc
!= 31)) {
1572 TCGv tmp
= tcg_temp_new();
1573 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1575 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
1577 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1581 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1583 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1589 if (likely(rc
!= 31)) {
1591 TCGv tmp
= tcg_temp_new();
1592 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1594 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
1596 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
1600 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1602 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1608 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
1612 gen_addlv(ra
, rb
, rc
, islit
, lit
);
1616 gen_sublv(ra
, rb
, rc
, islit
, lit
);
1620 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
1624 gen_addqv(ra
, rb
, rc
, islit
, lit
);
1628 gen_subqv(ra
, rb
, rc
, islit
, lit
);
1632 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
1642 if (likely(rc
!= 31)) {
1644 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1646 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1648 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1653 if (likely(rc
!= 31)) {
1656 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1658 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1660 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1665 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
1669 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
1673 if (likely(rc
!= 31)) {
1676 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1678 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1681 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1683 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1689 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
1693 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
1697 if (likely(rc
!= 31)) {
1700 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1702 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1705 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1707 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1713 if (likely(rc
!= 31)) {
1716 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1718 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1721 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1723 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1729 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
1733 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
1737 if (likely(rc
!= 31)) {
1740 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1742 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1745 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
1747 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1753 if (likely(rc
!= 31)) {
1755 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1757 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1758 switch (ctx
->env
->implver
) {
1760 /* EV4, EV45, LCA, LCA45 & EV5 */
1765 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rc
],
1766 ~(uint64_t)ctx
->amask
);
1773 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
1777 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
1782 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->env
->implver
);
1792 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1796 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1800 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
1804 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1808 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1812 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
1816 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1820 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1824 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
1828 gen_zap(ra
, rb
, rc
, islit
, lit
);
1832 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
1836 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1840 if (likely(rc
!= 31)) {
1843 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1845 TCGv shift
= tcg_temp_new();
1846 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1847 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1848 tcg_temp_free(shift
);
1851 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1856 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1860 if (likely(rc
!= 31)) {
1863 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1865 TCGv shift
= tcg_temp_new();
1866 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1867 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1868 tcg_temp_free(shift
);
1871 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1876 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
1880 if (likely(rc
!= 31)) {
1883 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
1885 TCGv shift
= tcg_temp_new();
1886 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
1887 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
1888 tcg_temp_free(shift
);
1891 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1896 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1900 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1904 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
1908 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1912 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1916 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
1920 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1924 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1928 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
1938 if (likely(rc
!= 31)) {
1940 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1943 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1945 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1946 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1952 if (likely(rc
!= 31)) {
1954 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1956 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1958 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1963 gen_umulh(ra
, rb
, rc
, islit
, lit
);
1967 gen_mullv(ra
, rb
, rc
, islit
, lit
);
1971 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
1978 switch (fpfn
) { /* fn11 & 0x3F */
1981 if (!(ctx
->amask
& AMASK_FIX
))
1983 if (likely(rc
!= 31)) {
1985 TCGv_i32 tmp
= tcg_temp_new_i32();
1986 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
1987 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
1988 tcg_temp_free_i32(tmp
);
1990 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
1995 if (!(ctx
->amask
& AMASK_FIX
))
2001 if (!(ctx
->amask
& AMASK_FIX
))
2003 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2007 if (!(ctx
->amask
& AMASK_FIX
))
2009 if (likely(rc
!= 31)) {
2011 TCGv_i32 tmp
= tcg_temp_new_i32();
2012 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2013 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2014 tcg_temp_free_i32(tmp
);
2016 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2021 if (!(ctx
->amask
& AMASK_FIX
))
2023 if (likely(rc
!= 31)) {
2025 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2027 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2032 if (!(ctx
->amask
& AMASK_FIX
))
2038 if (!(ctx
->amask
& AMASK_FIX
))
2040 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2047 /* VAX floating point */
2048 /* XXX: rounding mode and trap are ignored (!) */
2049 switch (fpfn
) { /* fn11 & 0x3F */
2052 gen_faddf(ra
, rb
, rc
);
2056 gen_fsubf(ra
, rb
, rc
);
2060 gen_fmulf(ra
, rb
, rc
);
2064 gen_fdivf(ra
, rb
, rc
);
2076 gen_faddg(ra
, rb
, rc
);
2080 gen_fsubg(ra
, rb
, rc
);
2084 gen_fmulg(ra
, rb
, rc
);
2088 gen_fdivg(ra
, rb
, rc
);
2092 gen_fcmpgeq(ra
, rb
, rc
);
2096 gen_fcmpglt(ra
, rb
, rc
);
2100 gen_fcmpgle(ra
, rb
, rc
);
2131 /* IEEE floating-point */
2132 switch (fpfn
) { /* fn11 & 0x3F */
2135 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2139 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2143 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2147 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2151 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2155 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2159 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2163 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2167 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2171 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2175 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2179 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2182 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2184 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2187 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2192 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2196 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2200 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2213 if (likely(rc
!= 31)) {
2217 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2219 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2222 gen_fcpys(ra
, rb
, rc
);
2228 gen_fcpysn(ra
, rb
, rc
);
2232 gen_fcpyse(ra
, rb
, rc
);
2236 if (likely(ra
!= 31))
2237 gen_helper_store_fpcr(cpu_fir
[ra
]);
2239 TCGv tmp
= tcg_const_i64(0);
2240 gen_helper_store_fpcr(tmp
);
2246 if (likely(ra
!= 31))
2247 gen_helper_load_fpcr(cpu_fir
[ra
]);
2251 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2255 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2259 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2263 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2267 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2271 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2281 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2282 /v doesn't do. The only thing I can think is that /sv is a
2283 valid instruction merely for completeness in the ISA. */
2284 gen_fcvtql_v(ctx
, rb
, rc
);
2291 switch ((uint16_t)disp16
) {
2294 /* No-op. Just exit from the current tb */
2299 /* No-op. Just exit from the current tb */
2321 gen_helper_load_pcc(cpu_ir
[ra
]);
2326 gen_helper_rc(cpu_ir
[ra
]);
2334 gen_helper_rs(cpu_ir
[ra
]);
2345 /* HW_MFPR (PALcode) */
2346 #if defined (CONFIG_USER_ONLY)
2352 TCGv tmp
= tcg_const_i32(insn
& 0xFF);
2353 gen_helper_mfpr(cpu_ir
[ra
], tmp
, cpu_ir
[ra
]);
2360 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2362 tcg_gen_movi_i64(cpu_pc
, 0);
2364 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2365 /* Those four jumps only differ by the branch prediction hint */
2383 /* HW_LD (PALcode) */
2384 #if defined (CONFIG_USER_ONLY)
2390 TCGv addr
= tcg_temp_new();
2392 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2394 tcg_gen_movi_i64(addr
, disp12
);
2395 switch ((insn
>> 12) & 0xF) {
2397 /* Longword physical access (hw_ldl/p) */
2398 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2401 /* Quadword physical access (hw_ldq/p) */
2402 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2405 /* Longword physical access with lock (hw_ldl_l/p) */
2406 gen_helper_ldl_l_raw(cpu_ir
[ra
], addr
);
2409 /* Quadword physical access with lock (hw_ldq_l/p) */
2410 gen_helper_ldq_l_raw(cpu_ir
[ra
], addr
);
2413 /* Longword virtual PTE fetch (hw_ldl/v) */
2414 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2417 /* Quadword virtual PTE fetch (hw_ldq/v) */
2418 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2421 /* Incpu_ir[ra]id */
2424 /* Incpu_ir[ra]id */
2427 /* Longword virtual access (hw_ldl) */
2428 gen_helper_st_virt_to_phys(addr
, addr
);
2429 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2432 /* Quadword virtual access (hw_ldq) */
2433 gen_helper_st_virt_to_phys(addr
, addr
);
2434 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2437 /* Longword virtual access with protection check (hw_ldl/w) */
2438 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, 0);
2441 /* Quadword virtual access with protection check (hw_ldq/w) */
2442 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, 0);
2445 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2446 gen_helper_set_alt_mode();
2447 gen_helper_st_virt_to_phys(addr
, addr
);
2448 gen_helper_ldl_raw(cpu_ir
[ra
], addr
);
2449 gen_helper_restore_mode();
2452 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2453 gen_helper_set_alt_mode();
2454 gen_helper_st_virt_to_phys(addr
, addr
);
2455 gen_helper_ldq_raw(cpu_ir
[ra
], addr
);
2456 gen_helper_restore_mode();
2459 /* Longword virtual access with alternate access mode and
2460 * protection checks (hw_ldl/wa)
2462 gen_helper_set_alt_mode();
2463 gen_helper_ldl_data(cpu_ir
[ra
], addr
);
2464 gen_helper_restore_mode();
2467 /* Quadword virtual access with alternate access mode and
2468 * protection checks (hw_ldq/wa)
2470 gen_helper_set_alt_mode();
2471 gen_helper_ldq_data(cpu_ir
[ra
], addr
);
2472 gen_helper_restore_mode();
2475 tcg_temp_free(addr
);
2483 if (!(ctx
->amask
& AMASK_BWX
))
2485 if (likely(rc
!= 31)) {
2487 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
2489 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2494 if (!(ctx
->amask
& AMASK_BWX
))
2496 if (likely(rc
!= 31)) {
2498 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
2500 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2505 if (!(ctx
->amask
& AMASK_CIX
))
2507 if (likely(rc
!= 31)) {
2509 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
2511 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
2516 if (!(ctx
->amask
& AMASK_MVI
))
2518 gen_perr(ra
, rb
, rc
, islit
, lit
);
2522 if (!(ctx
->amask
& AMASK_CIX
))
2524 if (likely(rc
!= 31)) {
2526 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
2528 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
2533 if (!(ctx
->amask
& AMASK_CIX
))
2535 if (likely(rc
!= 31)) {
2537 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
2539 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
2544 if (!(ctx
->amask
& AMASK_MVI
))
2546 if (real_islit
|| ra
!= 31)
2548 gen_unpkbw (rb
, rc
);
2552 if (!(ctx
->amask
& AMASK_MVI
))
2554 if (real_islit
|| ra
!= 31)
2556 gen_unpkbl (rb
, rc
);
2560 if (!(ctx
->amask
& AMASK_MVI
))
2562 if (real_islit
|| ra
!= 31)
2568 if (!(ctx
->amask
& AMASK_MVI
))
2570 if (real_islit
|| ra
!= 31)
2576 if (!(ctx
->amask
& AMASK_MVI
))
2578 gen_minsb8 (ra
, rb
, rc
, islit
, lit
);
2582 if (!(ctx
->amask
& AMASK_MVI
))
2584 gen_minsw4 (ra
, rb
, rc
, islit
, lit
);
2588 if (!(ctx
->amask
& AMASK_MVI
))
2590 gen_minub8 (ra
, rb
, rc
, islit
, lit
);
2594 if (!(ctx
->amask
& AMASK_MVI
))
2596 gen_minuw4 (ra
, rb
, rc
, islit
, lit
);
2600 if (!(ctx
->amask
& AMASK_MVI
))
2602 gen_maxub8 (ra
, rb
, rc
, islit
, lit
);
2606 if (!(ctx
->amask
& AMASK_MVI
))
2608 gen_maxuw4 (ra
, rb
, rc
, islit
, lit
);
2612 if (!(ctx
->amask
& AMASK_MVI
))
2614 gen_maxsb8 (ra
, rb
, rc
, islit
, lit
);
2618 if (!(ctx
->amask
& AMASK_MVI
))
2620 gen_maxsw4 (ra
, rb
, rc
, islit
, lit
);
2624 if (!(ctx
->amask
& AMASK_FIX
))
2626 if (likely(rc
!= 31)) {
2628 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
2630 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2635 if (!(ctx
->amask
& AMASK_FIX
))
2638 TCGv_i32 tmp1
= tcg_temp_new_i32();
2640 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
2642 TCGv tmp2
= tcg_const_i64(0);
2643 gen_helper_s_to_memory(tmp1
, tmp2
);
2644 tcg_temp_free(tmp2
);
2646 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
2647 tcg_temp_free_i32(tmp1
);
2655 /* HW_MTPR (PALcode) */
2656 #if defined (CONFIG_USER_ONLY)
2662 TCGv tmp1
= tcg_const_i32(insn
& 0xFF);
2664 gen_helper_mtpr(tmp1
, cpu_ir
[ra
]);
2666 TCGv tmp2
= tcg_const_i64(0);
2667 gen_helper_mtpr(tmp1
, tmp2
);
2668 tcg_temp_free(tmp2
);
2670 tcg_temp_free(tmp1
);
2676 /* HW_REI (PALcode) */
2677 #if defined (CONFIG_USER_ONLY)
2684 gen_helper_hw_rei();
2689 tmp
= tcg_temp_new();
2690 tcg_gen_addi_i64(tmp
, cpu_ir
[rb
], (((int64_t)insn
<< 51) >> 51));
2692 tmp
= tcg_const_i64(((int64_t)insn
<< 51) >> 51);
2693 gen_helper_hw_ret(tmp
);
2700 /* HW_ST (PALcode) */
2701 #if defined (CONFIG_USER_ONLY)
2708 addr
= tcg_temp_new();
2710 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2712 tcg_gen_movi_i64(addr
, disp12
);
2716 val
= tcg_temp_new();
2717 tcg_gen_movi_i64(val
, 0);
2719 switch ((insn
>> 12) & 0xF) {
2721 /* Longword physical access */
2722 gen_helper_stl_raw(val
, addr
);
2725 /* Quadword physical access */
2726 gen_helper_stq_raw(val
, addr
);
2729 /* Longword physical access with lock */
2730 gen_helper_stl_c_raw(val
, val
, addr
);
2733 /* Quadword physical access with lock */
2734 gen_helper_stq_c_raw(val
, val
, addr
);
2737 /* Longword virtual access */
2738 gen_helper_st_virt_to_phys(addr
, addr
);
2739 gen_helper_stl_raw(val
, addr
);
2742 /* Quadword virtual access */
2743 gen_helper_st_virt_to_phys(addr
, addr
);
2744 gen_helper_stq_raw(val
, addr
);
2765 /* Longword virtual access with alternate access mode */
2766 gen_helper_set_alt_mode();
2767 gen_helper_st_virt_to_phys(addr
, addr
);
2768 gen_helper_stl_raw(val
, addr
);
2769 gen_helper_restore_mode();
2772 /* Quadword virtual access with alternate access mode */
2773 gen_helper_set_alt_mode();
2774 gen_helper_st_virt_to_phys(addr
, addr
);
2775 gen_helper_stl_raw(val
, addr
);
2776 gen_helper_restore_mode();
2787 tcg_temp_free(addr
);
2793 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
2797 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
2801 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
2805 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
2809 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0, 0);
2813 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0, 0);
2817 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0, 0);
2821 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0, 0);
2825 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
2829 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
2833 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
2837 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
2841 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0, 0);
2845 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0, 0);
2849 gen_store_mem(ctx
, &gen_qemu_stl_c
, ra
, rb
, disp16
, 0, 0, 1);
2853 gen_store_mem(ctx
, &gen_qemu_stq_c
, ra
, rb
, disp16
, 0, 0, 1);
2858 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2859 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2862 case 0x31: /* FBEQ */
2863 gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
2866 case 0x32: /* FBLT */
2867 gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
2870 case 0x33: /* FBLE */
2871 gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
2877 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2878 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
+ (int64_t)(disp21
<< 2));
2881 case 0x35: /* FBNE */
2882 gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
2885 case 0x36: /* FBGE */
2886 gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
2889 case 0x37: /* FBGT */
2890 gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
2895 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
2900 gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
2905 gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
2910 gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
2915 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
2920 gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
2925 gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
2930 gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
2942 static inline void gen_intermediate_code_internal(CPUState
*env
,
2943 TranslationBlock
*tb
,
2946 DisasContext ctx
, *ctxp
= &ctx
;
2947 target_ulong pc_start
;
2949 uint16_t *gen_opc_end
;
2957 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2959 ctx
.amask
= env
->amask
;
2961 #if defined (CONFIG_USER_ONLY)
2964 ctx
.mem_idx
= ((env
->ps
>> 3) & 3);
2965 ctx
.pal_mode
= env
->ipr
[IPR_EXC_ADDR
] & 1;
2968 /* ??? Every TB begins with unset rounding mode, to be initialized on
2969 the first fp insn of the TB. Alternately we could define a proper
2970 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
2971 to reset the FP_STATUS to that default at the end of any TB that
2972 changes the default. We could even (gasp) dynamiclly figure out
2973 what default would be most efficient given the running program. */
2975 /* Similarly for flush-to-zero. */
2979 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2981 max_insns
= CF_COUNT_MASK
;
2984 for (ret
= 0; ret
== 0;) {
2985 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
2986 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
2987 if (bp
->pc
== ctx
.pc
) {
2988 gen_excp(&ctx
, EXCP_DEBUG
, 0);
2994 j
= gen_opc_ptr
- gen_opc_buf
;
2998 gen_opc_instr_start
[lj
++] = 0;
3000 gen_opc_pc
[lj
] = ctx
.pc
;
3001 gen_opc_instr_start
[lj
] = 1;
3002 gen_opc_icount
[lj
] = num_insns
;
3004 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3006 insn
= ldl_code(ctx
.pc
);
3009 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
))) {
3010 tcg_gen_debug_insn_start(ctx
.pc
);
3014 ret
= translate_one(ctxp
, insn
);
3017 /* if we reach a page boundary or are single stepping, stop
3020 if (env
->singlestep_enabled
) {
3021 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3025 if ((ctx
.pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
3028 if (gen_opc_ptr
>= gen_opc_end
)
3031 if (num_insns
>= max_insns
)
3038 if (ret
!= 1 && ret
!= 3) {
3039 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3041 if (tb
->cflags
& CF_LAST_IO
)
3043 /* Generate the return instruction */
3045 gen_icount_end(tb
, num_insns
);
3046 *gen_opc_ptr
= INDEX_op_end
;
3048 j
= gen_opc_ptr
- gen_opc_buf
;
3051 gen_opc_instr_start
[lj
++] = 0;
3053 tb
->size
= ctx
.pc
- pc_start
;
3054 tb
->icount
= num_insns
;
3057 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3058 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3059 log_target_disas(pc_start
, ctx
.pc
- pc_start
, 1);
3065 void gen_intermediate_code (CPUState
*env
, struct TranslationBlock
*tb
)
3067 gen_intermediate_code_internal(env
, tb
, 0);
3070 void gen_intermediate_code_pc (CPUState
*env
, struct TranslationBlock
*tb
)
3072 gen_intermediate_code_internal(env
, tb
, 1);
3080 static const struct cpu_def_t cpu_defs
[] = {
3081 { "ev4", IMPLVER_2106x
, 0 },
3082 { "ev5", IMPLVER_21164
, 0 },
3083 { "ev56", IMPLVER_21164
, AMASK_BWX
},
3084 { "pca56", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3085 { "ev6", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3086 { "ev67", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3087 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3088 { "ev68", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3089 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), },
3090 { "21064", IMPLVER_2106x
, 0 },
3091 { "21164", IMPLVER_21164
, 0 },
3092 { "21164a", IMPLVER_21164
, AMASK_BWX
},
3093 { "21164pc", IMPLVER_21164
, AMASK_BWX
| AMASK_MVI
},
3094 { "21264", IMPLVER_21264
, AMASK_BWX
| AMASK_FIX
| AMASK_MVI
| AMASK_TRAP
},
3095 { "21264a", IMPLVER_21264
, (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
3096 | AMASK_MVI
| AMASK_TRAP
| AMASK_PREFETCH
), }
3099 CPUAlphaState
* cpu_alpha_init (const char *cpu_model
)
3102 int implver
, amask
, i
, max
;
3104 env
= qemu_mallocz(sizeof(CPUAlphaState
));
3106 alpha_translate_init();
3109 /* Default to ev67; no reason not to emulate insns by default. */
3110 implver
= IMPLVER_21264
;
3111 amask
= (AMASK_BWX
| AMASK_FIX
| AMASK_CIX
| AMASK_MVI
3112 | AMASK_TRAP
| AMASK_PREFETCH
);
3114 max
= ARRAY_SIZE(cpu_defs
);
3115 for (i
= 0; i
< max
; i
++) {
3116 if (strcmp (cpu_model
, cpu_defs
[i
].name
) == 0) {
3117 implver
= cpu_defs
[i
].implver
;
3118 amask
= cpu_defs
[i
].amask
;
3122 env
->implver
= implver
;
3126 #if defined (CONFIG_USER_ONLY)
3128 cpu_alpha_store_fpcr(env
, (FPCR_INVD
| FPCR_DZED
| FPCR_OVFD
3129 | FPCR_UNFD
| FPCR_INED
| FPCR_DNOD
));
3134 /* Initialize IPR */
3135 #if defined (CONFIG_USER_ONLY)
3136 env
->ipr
[IPR_EXC_ADDR
] = 0;
3137 env
->ipr
[IPR_EXC_SUM
] = 0;
3138 env
->ipr
[IPR_EXC_MASK
] = 0;
3142 hwpcb
= env
->ipr
[IPR_PCBB
];
3143 env
->ipr
[IPR_ASN
] = 0;
3144 env
->ipr
[IPR_ASTEN
] = 0;
3145 env
->ipr
[IPR_ASTSR
] = 0;
3146 env
->ipr
[IPR_DATFX
] = 0;
3148 // env->ipr[IPR_ESP] = ldq_raw(hwpcb + 8);
3149 // env->ipr[IPR_KSP] = ldq_raw(hwpcb + 0);
3150 // env->ipr[IPR_SSP] = ldq_raw(hwpcb + 16);
3151 // env->ipr[IPR_USP] = ldq_raw(hwpcb + 24);
3152 env
->ipr
[IPR_FEN
] = 0;
3153 env
->ipr
[IPR_IPL
] = 31;
3154 env
->ipr
[IPR_MCES
] = 0;
3155 env
->ipr
[IPR_PERFMON
] = 0; /* Implementation specific */
3156 // env->ipr[IPR_PTBR] = ldq_raw(hwpcb + 32);
3157 env
->ipr
[IPR_SISR
] = 0;
3158 env
->ipr
[IPR_VIRBND
] = -1ULL;
3162 qemu_init_vcpu(env
);
3166 void gen_pc_load(CPUState
*env
, TranslationBlock
*tb
,
3167 unsigned long searched_pc
, int pc_pos
, void *puc
)
3169 env
->pc
= gen_opc_pc
[pc_pos
];