2 * Alpha emulation cpu translation for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "disas/disas.h"
22 #include "qemu/host-utils.h"
29 #undef ALPHA_DEBUG_DISAS
30 #define CONFIG_SOFTFLOAT_INLINE
32 #ifdef ALPHA_DEBUG_DISAS
33 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
35 # define LOG_DISAS(...) do { } while (0)
38 typedef struct DisasContext DisasContext
;
40 struct TranslationBlock
*tb
;
44 /* Current rounding mode for this TB. */
46 /* Current flush-to-zero setting for this TB. */
49 /* implver value for this CPU. */
52 bool singlestep_enabled
;
55 /* Return values from translate_one, indicating the state of the TB.
56 Note that zero indicates that we are not exiting the TB. */
61 /* We have emitted one or more goto_tb. No fixup required. */
64 /* We are not using a goto_tb (for whatever reason), but have updated
65 the PC (for whatever reason), so there's no need to do it again on
69 /* We are exiting the TB, but have neither emitted a goto_tb, nor
70 updated the PC for the next instruction to be executed. */
73 /* We are ending the TB with a noreturn function call, e.g. longjmp.
74 No following code will be executed. */
78 /* global register indexes */
79 static TCGv_ptr cpu_env
;
80 static TCGv cpu_ir
[31];
81 static TCGv cpu_fir
[31];
83 static TCGv cpu_lock_addr
;
84 static TCGv cpu_lock_st_addr
;
85 static TCGv cpu_lock_value
;
86 static TCGv cpu_unique
;
87 #ifndef CONFIG_USER_ONLY
88 static TCGv cpu_sysval
;
93 static char cpu_reg_names
[10*4+21*5 + 10*5+21*6];
95 #include "exec/gen-icount.h"
97 void alpha_translate_init(void)
101 static int done_init
= 0;
106 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
109 for (i
= 0; i
< 31; i
++) {
110 sprintf(p
, "ir%d", i
);
111 cpu_ir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
112 offsetof(CPUAlphaState
, ir
[i
]), p
);
113 p
+= (i
< 10) ? 4 : 5;
115 sprintf(p
, "fir%d", i
);
116 cpu_fir
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
117 offsetof(CPUAlphaState
, fir
[i
]), p
);
118 p
+= (i
< 10) ? 5 : 6;
121 cpu_pc
= tcg_global_mem_new_i64(TCG_AREG0
,
122 offsetof(CPUAlphaState
, pc
), "pc");
124 cpu_lock_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
125 offsetof(CPUAlphaState
, lock_addr
),
127 cpu_lock_st_addr
= tcg_global_mem_new_i64(TCG_AREG0
,
128 offsetof(CPUAlphaState
, lock_st_addr
),
130 cpu_lock_value
= tcg_global_mem_new_i64(TCG_AREG0
,
131 offsetof(CPUAlphaState
, lock_value
),
134 cpu_unique
= tcg_global_mem_new_i64(TCG_AREG0
,
135 offsetof(CPUAlphaState
, unique
), "unique");
136 #ifndef CONFIG_USER_ONLY
137 cpu_sysval
= tcg_global_mem_new_i64(TCG_AREG0
,
138 offsetof(CPUAlphaState
, sysval
), "sysval");
139 cpu_usp
= tcg_global_mem_new_i64(TCG_AREG0
,
140 offsetof(CPUAlphaState
, usp
), "usp");
146 static void gen_excp_1(int exception
, int error_code
)
150 tmp1
= tcg_const_i32(exception
);
151 tmp2
= tcg_const_i32(error_code
);
152 gen_helper_excp(cpu_env
, tmp1
, tmp2
);
153 tcg_temp_free_i32(tmp2
);
154 tcg_temp_free_i32(tmp1
);
157 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
, int error_code
)
159 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
160 gen_excp_1(exception
, error_code
);
161 return EXIT_NORETURN
;
164 static inline ExitStatus
gen_invalid(DisasContext
*ctx
)
166 return gen_excp(ctx
, EXCP_OPCDEC
, 0);
169 static inline void gen_qemu_ldf(TCGv t0
, TCGv t1
, int flags
)
171 TCGv tmp
= tcg_temp_new();
172 TCGv_i32 tmp32
= tcg_temp_new_i32();
173 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
174 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
175 gen_helper_memory_to_f(t0
, tmp32
);
176 tcg_temp_free_i32(tmp32
);
180 static inline void gen_qemu_ldg(TCGv t0
, TCGv t1
, int flags
)
182 TCGv tmp
= tcg_temp_new();
183 tcg_gen_qemu_ld64(tmp
, t1
, flags
);
184 gen_helper_memory_to_g(t0
, tmp
);
188 static inline void gen_qemu_lds(TCGv t0
, TCGv t1
, int flags
)
190 TCGv tmp
= tcg_temp_new();
191 TCGv_i32 tmp32
= tcg_temp_new_i32();
192 tcg_gen_qemu_ld32u(tmp
, t1
, flags
);
193 tcg_gen_trunc_i64_i32(tmp32
, tmp
);
194 gen_helper_memory_to_s(t0
, tmp32
);
195 tcg_temp_free_i32(tmp32
);
199 static inline void gen_qemu_ldl_l(TCGv t0
, TCGv t1
, int flags
)
201 tcg_gen_qemu_ld32s(t0
, t1
, flags
);
202 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
203 tcg_gen_mov_i64(cpu_lock_value
, t0
);
206 static inline void gen_qemu_ldq_l(TCGv t0
, TCGv t1
, int flags
)
208 tcg_gen_qemu_ld64(t0
, t1
, flags
);
209 tcg_gen_mov_i64(cpu_lock_addr
, t1
);
210 tcg_gen_mov_i64(cpu_lock_value
, t0
);
213 static inline void gen_load_mem(DisasContext
*ctx
,
214 void (*tcg_gen_qemu_load
)(TCGv t0
, TCGv t1
,
216 int ra
, int rb
, int32_t disp16
, int fp
,
221 /* LDQ_U with ra $31 is UNOP. Other various loads are forms of
222 prefetches, which we can treat as nops. No worries about
223 missed exceptions here. */
224 if (unlikely(ra
== 31)) {
228 addr
= tcg_temp_new();
230 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
232 tcg_gen_andi_i64(addr
, addr
, ~0x7);
238 tcg_gen_movi_i64(addr
, disp16
);
241 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
242 tcg_gen_qemu_load(va
, addr
, ctx
->mem_idx
);
247 static inline void gen_qemu_stf(TCGv t0
, TCGv t1
, int flags
)
249 TCGv_i32 tmp32
= tcg_temp_new_i32();
250 TCGv tmp
= tcg_temp_new();
251 gen_helper_f_to_memory(tmp32
, t0
);
252 tcg_gen_extu_i32_i64(tmp
, tmp32
);
253 tcg_gen_qemu_st32(tmp
, t1
, flags
);
255 tcg_temp_free_i32(tmp32
);
258 static inline void gen_qemu_stg(TCGv t0
, TCGv t1
, int flags
)
260 TCGv tmp
= tcg_temp_new();
261 gen_helper_g_to_memory(tmp
, t0
);
262 tcg_gen_qemu_st64(tmp
, t1
, flags
);
266 static inline void gen_qemu_sts(TCGv t0
, TCGv t1
, int flags
)
268 TCGv_i32 tmp32
= tcg_temp_new_i32();
269 TCGv tmp
= tcg_temp_new();
270 gen_helper_s_to_memory(tmp32
, t0
);
271 tcg_gen_extu_i32_i64(tmp
, tmp32
);
272 tcg_gen_qemu_st32(tmp
, t1
, flags
);
274 tcg_temp_free_i32(tmp32
);
277 static inline void gen_store_mem(DisasContext
*ctx
,
278 void (*tcg_gen_qemu_store
)(TCGv t0
, TCGv t1
,
280 int ra
, int rb
, int32_t disp16
, int fp
,
285 addr
= tcg_temp_new();
287 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
289 tcg_gen_andi_i64(addr
, addr
, ~0x7);
295 tcg_gen_movi_i64(addr
, disp16
);
299 va
= tcg_const_i64(0);
301 va
= (fp
? cpu_fir
[ra
] : cpu_ir
[ra
]);
303 tcg_gen_qemu_store(va
, addr
, ctx
->mem_idx
);
311 static ExitStatus
gen_store_conditional(DisasContext
*ctx
, int ra
, int rb
,
312 int32_t disp16
, int quad
)
317 /* ??? Don't bother storing anything. The user can't tell
318 the difference, since the zero register always reads zero. */
322 #if defined(CONFIG_USER_ONLY)
323 addr
= cpu_lock_st_addr
;
325 addr
= tcg_temp_local_new();
329 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp16
);
331 tcg_gen_movi_i64(addr
, disp16
);
334 #if defined(CONFIG_USER_ONLY)
335 /* ??? This is handled via a complicated version of compare-and-swap
336 in the cpu_loop. Hopefully one day we'll have a real CAS opcode
337 in TCG so that this isn't necessary. */
338 return gen_excp(ctx
, quad
? EXCP_STQ_C
: EXCP_STL_C
, ra
);
340 /* ??? In system mode we are never multi-threaded, so CAS can be
341 implemented via a non-atomic load-compare-store sequence. */
343 int lab_fail
, lab_done
;
346 lab_fail
= gen_new_label();
347 lab_done
= gen_new_label();
348 tcg_gen_brcond_i64(TCG_COND_NE
, addr
, cpu_lock_addr
, lab_fail
);
350 val
= tcg_temp_new();
352 tcg_gen_qemu_ld64(val
, addr
, ctx
->mem_idx
);
354 tcg_gen_qemu_ld32s(val
, addr
, ctx
->mem_idx
);
356 tcg_gen_brcond_i64(TCG_COND_NE
, val
, cpu_lock_value
, lab_fail
);
359 tcg_gen_qemu_st64(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
361 tcg_gen_qemu_st32(cpu_ir
[ra
], addr
, ctx
->mem_idx
);
363 tcg_gen_movi_i64(cpu_ir
[ra
], 1);
364 tcg_gen_br(lab_done
);
366 gen_set_label(lab_fail
);
367 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
369 gen_set_label(lab_done
);
370 tcg_gen_movi_i64(cpu_lock_addr
, -1);
378 static bool in_superpage(DisasContext
*ctx
, int64_t addr
)
380 return ((ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0
382 && ((addr
>> 41) & 3) == 2
383 && addr
>> TARGET_VIRT_ADDR_SPACE_BITS
== addr
>> 63);
386 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
388 /* Suppress goto_tb in the case of single-steping and IO. */
389 if (ctx
->singlestep_enabled
|| (ctx
->tb
->cflags
& CF_LAST_IO
)) {
392 /* If the destination is in the superpage, the page perms can't change. */
393 if (in_superpage(ctx
, dest
)) {
396 /* Check for the dest on the same page as the start of the TB. */
397 return ((ctx
->tb
->pc
^ dest
) & TARGET_PAGE_MASK
) == 0;
400 static ExitStatus
gen_bdirect(DisasContext
*ctx
, int ra
, int32_t disp
)
402 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
405 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
408 /* Notice branch-to-next; used to initialize RA with the PC. */
411 } else if (use_goto_tb(ctx
, dest
)) {
413 tcg_gen_movi_i64(cpu_pc
, dest
);
414 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
417 tcg_gen_movi_i64(cpu_pc
, dest
);
418 return EXIT_PC_UPDATED
;
422 static ExitStatus
gen_bcond_internal(DisasContext
*ctx
, TCGCond cond
,
423 TCGv cmp
, int32_t disp
)
425 uint64_t dest
= ctx
->pc
+ (disp
<< 2);
426 int lab_true
= gen_new_label();
428 if (use_goto_tb(ctx
, dest
)) {
429 tcg_gen_brcondi_i64(cond
, cmp
, 0, lab_true
);
432 tcg_gen_movi_i64(cpu_pc
, ctx
->pc
);
433 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
435 gen_set_label(lab_true
);
437 tcg_gen_movi_i64(cpu_pc
, dest
);
438 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ 1);
442 TCGv_i64 z
= tcg_const_i64(0);
443 TCGv_i64 d
= tcg_const_i64(dest
);
444 TCGv_i64 p
= tcg_const_i64(ctx
->pc
);
446 tcg_gen_movcond_i64(cond
, cpu_pc
, cmp
, z
, d
, p
);
448 tcg_temp_free_i64(z
);
449 tcg_temp_free_i64(d
);
450 tcg_temp_free_i64(p
);
451 return EXIT_PC_UPDATED
;
455 static ExitStatus
gen_bcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
456 int32_t disp
, int mask
)
460 if (unlikely(ra
== 31)) {
461 cmp_tmp
= tcg_const_i64(0);
463 cmp_tmp
= tcg_temp_new();
465 tcg_gen_andi_i64(cmp_tmp
, cpu_ir
[ra
], 1);
467 tcg_gen_mov_i64(cmp_tmp
, cpu_ir
[ra
]);
471 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
474 /* Fold -0.0 for comparison with COND. */
476 static void gen_fold_mzero(TCGCond cond
, TCGv dest
, TCGv src
)
478 uint64_t mzero
= 1ull << 63;
483 /* For <= or >, the -0.0 value directly compares the way we want. */
484 tcg_gen_mov_i64(dest
, src
);
489 /* For == or !=, we can simply mask off the sign bit and compare. */
490 tcg_gen_andi_i64(dest
, src
, mzero
- 1);
495 /* For >= or <, map -0.0 to +0.0 via comparison and mask. */
496 tcg_gen_setcondi_i64(TCG_COND_NE
, dest
, src
, mzero
);
497 tcg_gen_neg_i64(dest
, dest
);
498 tcg_gen_and_i64(dest
, dest
, src
);
506 static ExitStatus
gen_fbcond(DisasContext
*ctx
, TCGCond cond
, int ra
,
511 if (unlikely(ra
== 31)) {
512 /* Very uncommon case, but easier to optimize it to an integer
513 comparison than continuing with the floating point comparison. */
514 return gen_bcond(ctx
, cond
, ra
, disp
, 0);
517 cmp_tmp
= tcg_temp_new();
518 gen_fold_mzero(cond
, cmp_tmp
, cpu_fir
[ra
]);
519 return gen_bcond_internal(ctx
, cond
, cmp_tmp
, disp
);
522 static void gen_cmov(TCGCond cond
, int ra
, int rb
, int rc
,
523 int islit
, uint8_t lit
, int mask
)
527 if (unlikely(rc
== 31)) {
532 /* Very uncommon case - Do not bother to optimize. */
533 c1
= tcg_const_i64(0);
535 c1
= tcg_const_i64(1);
536 tcg_gen_and_i64(c1
, c1
, cpu_ir
[ra
]);
541 v1
= tcg_const_i64(lit
);
545 z
= tcg_const_i64(0);
547 tcg_gen_movcond_i64(cond
, cpu_ir
[rc
], c1
, z
, v1
, cpu_ir
[rc
]);
549 tcg_temp_free_i64(z
);
550 if (ra
== 31 || mask
) {
551 tcg_temp_free_i64(c1
);
554 tcg_temp_free_i64(v1
);
558 static void gen_fcmov(TCGCond cond
, int ra
, int rb
, int rc
)
562 if (unlikely(rc
== 31)) {
566 c1
= tcg_temp_new_i64();
567 if (unlikely(ra
== 31)) {
568 tcg_gen_movi_i64(c1
, 0);
570 gen_fold_mzero(cond
, c1
, cpu_fir
[ra
]);
573 v1
= tcg_const_i64(0);
577 z
= tcg_const_i64(0);
579 tcg_gen_movcond_i64(cond
, cpu_fir
[rc
], c1
, z
, v1
, cpu_fir
[rc
]);
581 tcg_temp_free_i64(z
);
582 tcg_temp_free_i64(c1
);
584 tcg_temp_free_i64(v1
);
588 #define QUAL_RM_N 0x080 /* Round mode nearest even */
589 #define QUAL_RM_C 0x000 /* Round mode chopped */
590 #define QUAL_RM_M 0x040 /* Round mode minus infinity */
591 #define QUAL_RM_D 0x0c0 /* Round mode dynamic */
592 #define QUAL_RM_MASK 0x0c0
594 #define QUAL_U 0x100 /* Underflow enable (fp output) */
595 #define QUAL_V 0x100 /* Overflow enable (int output) */
596 #define QUAL_S 0x400 /* Software completion enable */
597 #define QUAL_I 0x200 /* Inexact detection enable */
599 static void gen_qual_roundmode(DisasContext
*ctx
, int fn11
)
603 fn11
&= QUAL_RM_MASK
;
604 if (fn11
== ctx
->tb_rm
) {
609 tmp
= tcg_temp_new_i32();
612 tcg_gen_movi_i32(tmp
, float_round_nearest_even
);
615 tcg_gen_movi_i32(tmp
, float_round_to_zero
);
618 tcg_gen_movi_i32(tmp
, float_round_down
);
621 tcg_gen_ld8u_i32(tmp
, cpu_env
,
622 offsetof(CPUAlphaState
, fpcr_dyn_round
));
626 #if defined(CONFIG_SOFTFLOAT_INLINE)
627 /* ??? The "fpu/softfloat.h" interface is to call set_float_rounding_mode.
628 With CONFIG_SOFTFLOAT that expands to an out-of-line call that just
629 sets the one field. */
630 tcg_gen_st8_i32(tmp
, cpu_env
,
631 offsetof(CPUAlphaState
, fp_status
.float_rounding_mode
));
633 gen_helper_setroundmode(tmp
);
636 tcg_temp_free_i32(tmp
);
639 static void gen_qual_flushzero(DisasContext
*ctx
, int fn11
)
644 if (fn11
== ctx
->tb_ftz
) {
649 tmp
= tcg_temp_new_i32();
651 /* Underflow is enabled, use the FPCR setting. */
652 tcg_gen_ld8u_i32(tmp
, cpu_env
,
653 offsetof(CPUAlphaState
, fpcr_flush_to_zero
));
655 /* Underflow is disabled, force flush-to-zero. */
656 tcg_gen_movi_i32(tmp
, 1);
659 #if defined(CONFIG_SOFTFLOAT_INLINE)
660 tcg_gen_st8_i32(tmp
, cpu_env
,
661 offsetof(CPUAlphaState
, fp_status
.flush_to_zero
));
663 gen_helper_setflushzero(tmp
);
666 tcg_temp_free_i32(tmp
);
669 static TCGv
gen_ieee_input(int reg
, int fn11
, int is_cmp
)
673 val
= tcg_const_i64(0);
675 if ((fn11
& QUAL_S
) == 0) {
677 gen_helper_ieee_input_cmp(cpu_env
, cpu_fir
[reg
]);
679 gen_helper_ieee_input(cpu_env
, cpu_fir
[reg
]);
682 val
= tcg_temp_new();
683 tcg_gen_mov_i64(val
, cpu_fir
[reg
]);
688 static void gen_fp_exc_clear(void)
690 #if defined(CONFIG_SOFTFLOAT_INLINE)
691 TCGv_i32 zero
= tcg_const_i32(0);
692 tcg_gen_st8_i32(zero
, cpu_env
,
693 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
694 tcg_temp_free_i32(zero
);
696 gen_helper_fp_exc_clear(cpu_env
);
700 static void gen_fp_exc_raise_ignore(int rc
, int fn11
, int ignore
)
702 /* ??? We ought to be able to do something with imprecise exceptions.
703 E.g. notice we're still in the trap shadow of something within the
704 TB and do not generate the code to signal the exception; end the TB
705 when an exception is forced to arrive, either by consumption of a
706 register value or TRAPB or EXCB. */
707 TCGv_i32 exc
= tcg_temp_new_i32();
710 #if defined(CONFIG_SOFTFLOAT_INLINE)
711 tcg_gen_ld8u_i32(exc
, cpu_env
,
712 offsetof(CPUAlphaState
, fp_status
.float_exception_flags
));
714 gen_helper_fp_exc_get(exc
, cpu_env
);
718 tcg_gen_andi_i32(exc
, exc
, ~ignore
);
721 /* ??? Pass in the regno of the destination so that the helper can
722 set EXC_MASK, which contains a bitmask of destination registers
723 that have caused arithmetic traps. A simple userspace emulation
724 does not require this. We do need it for a guest kernel's entArith,
725 or if we were to do something clever with imprecise exceptions. */
726 reg
= tcg_const_i32(rc
+ 32);
729 gen_helper_fp_exc_raise_s(cpu_env
, exc
, reg
);
731 gen_helper_fp_exc_raise(cpu_env
, exc
, reg
);
734 tcg_temp_free_i32(reg
);
735 tcg_temp_free_i32(exc
);
738 static inline void gen_fp_exc_raise(int rc
, int fn11
)
740 gen_fp_exc_raise_ignore(rc
, fn11
, fn11
& QUAL_I
? 0 : float_flag_inexact
);
743 static void gen_fcvtlq(int rb
, int rc
)
745 if (unlikely(rc
== 31)) {
748 if (unlikely(rb
== 31)) {
749 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
751 TCGv tmp
= tcg_temp_new();
753 /* The arithmetic right shift here, plus the sign-extended mask below
754 yields a sign-extended result without an explicit ext32s_i64. */
755 tcg_gen_sari_i64(tmp
, cpu_fir
[rb
], 32);
756 tcg_gen_shri_i64(cpu_fir
[rc
], cpu_fir
[rb
], 29);
757 tcg_gen_andi_i64(tmp
, tmp
, (int32_t)0xc0000000);
758 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rc
], 0x3fffffff);
759 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
765 static void gen_fcvtql(int rb
, int rc
)
767 if (unlikely(rc
== 31)) {
770 if (unlikely(rb
== 31)) {
771 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
773 TCGv tmp
= tcg_temp_new();
775 tcg_gen_andi_i64(tmp
, cpu_fir
[rb
], 0xC0000000);
776 tcg_gen_andi_i64(cpu_fir
[rc
], cpu_fir
[rb
], 0x3FFFFFFF);
777 tcg_gen_shli_i64(tmp
, tmp
, 32);
778 tcg_gen_shli_i64(cpu_fir
[rc
], cpu_fir
[rc
], 29);
779 tcg_gen_or_i64(cpu_fir
[rc
], cpu_fir
[rc
], tmp
);
785 static void gen_fcvtql_v(DisasContext
*ctx
, int rb
, int rc
)
788 int lab
= gen_new_label();
789 TCGv tmp
= tcg_temp_new();
791 tcg_gen_ext32s_i64(tmp
, cpu_fir
[rb
]);
792 tcg_gen_brcond_i64(TCG_COND_EQ
, tmp
, cpu_fir
[rb
], lab
);
793 gen_excp(ctx
, EXCP_ARITH
, EXC_M_IOV
);
800 #define FARITH2(name) \
801 static inline void glue(gen_f, name)(int rb, int rc) \
803 if (unlikely(rc == 31)) { \
807 gen_helper_ ## name(cpu_fir[rc], cpu_env, cpu_fir[rb]); \
809 TCGv tmp = tcg_const_i64(0); \
810 gen_helper_ ## name(cpu_fir[rc], cpu_env, tmp); \
811 tcg_temp_free(tmp); \
815 /* ??? VAX instruction qualifiers ignored. */
823 static void gen_ieee_arith2(DisasContext
*ctx
,
824 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
825 int rb
, int rc
, int fn11
)
829 /* ??? This is wrong: the instruction is not a nop, it still may
831 if (unlikely(rc
== 31)) {
835 gen_qual_roundmode(ctx
, fn11
);
836 gen_qual_flushzero(ctx
, fn11
);
839 vb
= gen_ieee_input(rb
, fn11
, 0);
840 helper(cpu_fir
[rc
], cpu_env
, vb
);
843 gen_fp_exc_raise(rc
, fn11
);
846 #define IEEE_ARITH2(name) \
847 static inline void glue(gen_f, name)(DisasContext *ctx, \
848 int rb, int rc, int fn11) \
850 gen_ieee_arith2(ctx, gen_helper_##name, rb, rc, fn11); \
857 static void gen_fcvttq(DisasContext
*ctx
, int rb
, int rc
, int fn11
)
862 /* ??? This is wrong: the instruction is not a nop, it still may
864 if (unlikely(rc
== 31)) {
868 /* No need to set flushzero, since we have an integer output. */
870 vb
= gen_ieee_input(rb
, fn11
, 0);
872 /* Almost all integer conversions use cropped rounding, and most
873 also do not have integer overflow enabled. Special case that. */
876 gen_helper_cvttq_c(cpu_fir
[rc
], cpu_env
, vb
);
878 case QUAL_V
| QUAL_RM_C
:
879 case QUAL_S
| QUAL_V
| QUAL_RM_C
:
880 ignore
= float_flag_inexact
;
882 case QUAL_S
| QUAL_V
| QUAL_I
| QUAL_RM_C
:
883 gen_helper_cvttq_svic(cpu_fir
[rc
], cpu_env
, vb
);
886 gen_qual_roundmode(ctx
, fn11
);
887 gen_helper_cvttq(cpu_fir
[rc
], cpu_env
, vb
);
888 ignore
|= (fn11
& QUAL_V
? 0 : float_flag_overflow
);
889 ignore
|= (fn11
& QUAL_I
? 0 : float_flag_inexact
);
894 gen_fp_exc_raise_ignore(rc
, fn11
, ignore
);
897 static void gen_ieee_intcvt(DisasContext
*ctx
,
898 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
),
899 int rb
, int rc
, int fn11
)
903 /* ??? This is wrong: the instruction is not a nop, it still may
905 if (unlikely(rc
== 31)) {
909 gen_qual_roundmode(ctx
, fn11
);
912 vb
= tcg_const_i64(0);
917 /* The only exception that can be raised by integer conversion
918 is inexact. Thus we only need to worry about exceptions when
919 inexact handling is requested. */
922 helper(cpu_fir
[rc
], cpu_env
, vb
);
923 gen_fp_exc_raise(rc
, fn11
);
925 helper(cpu_fir
[rc
], cpu_env
, vb
);
933 #define IEEE_INTCVT(name) \
934 static inline void glue(gen_f, name)(DisasContext *ctx, \
935 int rb, int rc, int fn11) \
937 gen_ieee_intcvt(ctx, gen_helper_##name, rb, rc, fn11); \
942 static void gen_cpys_internal(int ra
, int rb
, int rc
, int inv_a
, uint64_t mask
)
947 if (unlikely(rc
== 31)) {
951 vmask
= tcg_const_i64(mask
);
961 va
= tcg_temp_new_i64();
962 tcg_gen_mov_i64(va
, cpu_fir
[ra
]);
964 tcg_gen_andc_i64(va
, vmask
, va
);
966 tcg_gen_and_i64(va
, va
, vmask
);
974 vb
= tcg_temp_new_i64();
975 tcg_gen_andc_i64(vb
, cpu_fir
[rb
], vmask
);
978 switch (za
<< 1 | zb
) {
980 tcg_gen_or_i64(cpu_fir
[rc
], va
, vb
);
983 tcg_gen_mov_i64(cpu_fir
[rc
], va
);
986 tcg_gen_mov_i64(cpu_fir
[rc
], vb
);
989 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
993 tcg_temp_free(vmask
);
1002 static inline void gen_fcpys(int ra
, int rb
, int rc
)
1004 gen_cpys_internal(ra
, rb
, rc
, 0, 0x8000000000000000ULL
);
1007 static inline void gen_fcpysn(int ra
, int rb
, int rc
)
1009 gen_cpys_internal(ra
, rb
, rc
, 1, 0x8000000000000000ULL
);
1012 static inline void gen_fcpyse(int ra
, int rb
, int rc
)
1014 gen_cpys_internal(ra
, rb
, rc
, 0, 0xFFF0000000000000ULL
);
1017 #define FARITH3(name) \
1018 static inline void glue(gen_f, name)(int ra, int rb, int rc) \
1022 if (unlikely(rc == 31)) { \
1026 va = tcg_const_i64(0); \
1031 vb = tcg_const_i64(0); \
1036 gen_helper_ ## name(cpu_fir[rc], cpu_env, va, vb); \
1039 tcg_temp_free(va); \
1042 tcg_temp_free(vb); \
1046 /* ??? VAX instruction qualifiers ignored. */
1059 static void gen_ieee_arith3(DisasContext
*ctx
,
1060 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1061 int ra
, int rb
, int rc
, int fn11
)
1065 /* ??? This is wrong: the instruction is not a nop, it still may
1066 raise exceptions. */
1067 if (unlikely(rc
== 31)) {
1071 gen_qual_roundmode(ctx
, fn11
);
1072 gen_qual_flushzero(ctx
, fn11
);
1075 va
= gen_ieee_input(ra
, fn11
, 0);
1076 vb
= gen_ieee_input(rb
, fn11
, 0);
1077 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1081 gen_fp_exc_raise(rc
, fn11
);
1084 #define IEEE_ARITH3(name) \
1085 static inline void glue(gen_f, name)(DisasContext *ctx, \
1086 int ra, int rb, int rc, int fn11) \
1088 gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1099 static void gen_ieee_compare(DisasContext
*ctx
,
1100 void (*helper
)(TCGv
, TCGv_ptr
, TCGv
, TCGv
),
1101 int ra
, int rb
, int rc
, int fn11
)
1105 /* ??? This is wrong: the instruction is not a nop, it still may
1106 raise exceptions. */
1107 if (unlikely(rc
== 31)) {
1113 va
= gen_ieee_input(ra
, fn11
, 1);
1114 vb
= gen_ieee_input(rb
, fn11
, 1);
1115 helper(cpu_fir
[rc
], cpu_env
, va
, vb
);
1119 gen_fp_exc_raise(rc
, fn11
);
1122 #define IEEE_CMP3(name) \
1123 static inline void glue(gen_f, name)(DisasContext *ctx, \
1124 int ra, int rb, int rc, int fn11) \
1126 gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc, fn11); \
1133 static inline uint64_t zapnot_mask(uint8_t lit
)
1138 for (i
= 0; i
< 8; ++i
) {
1140 mask
|= 0xffull
<< (i
* 8);
1145 /* Implement zapnot with an immediate operand, which expands to some
1146 form of immediate AND. This is a basic building block in the
1147 definition of many of the other byte manipulation instructions. */
1148 static void gen_zapnoti(TCGv dest
, TCGv src
, uint8_t lit
)
1152 tcg_gen_movi_i64(dest
, 0);
1155 tcg_gen_ext8u_i64(dest
, src
);
1158 tcg_gen_ext16u_i64(dest
, src
);
1161 tcg_gen_ext32u_i64(dest
, src
);
1164 tcg_gen_mov_i64(dest
, src
);
1167 tcg_gen_andi_i64 (dest
, src
, zapnot_mask (lit
));
1172 static inline void gen_zapnot(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1174 if (unlikely(rc
== 31))
1176 else if (unlikely(ra
== 31))
1177 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1179 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1181 gen_helper_zapnot (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1184 static inline void gen_zap(int ra
, int rb
, int rc
, int islit
, uint8_t lit
)
1186 if (unlikely(rc
== 31))
1188 else if (unlikely(ra
== 31))
1189 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1191 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
1193 gen_helper_zap (cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1197 /* EXTWH, EXTLH, EXTQH */
1198 static void gen_ext_h(int ra
, int rb
, int rc
, int islit
,
1199 uint8_t lit
, uint8_t byte_mask
)
1201 if (unlikely(rc
== 31))
1203 else if (unlikely(ra
== 31))
1204 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1207 lit
= (64 - (lit
& 7) * 8) & 0x3f;
1208 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1210 TCGv tmp1
= tcg_temp_new();
1211 tcg_gen_andi_i64(tmp1
, cpu_ir
[rb
], 7);
1212 tcg_gen_shli_i64(tmp1
, tmp1
, 3);
1213 tcg_gen_neg_i64(tmp1
, tmp1
);
1214 tcg_gen_andi_i64(tmp1
, tmp1
, 0x3f);
1215 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp1
);
1216 tcg_temp_free(tmp1
);
1218 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1222 /* EXTBL, EXTWL, EXTLL, EXTQL */
1223 static void gen_ext_l(int ra
, int rb
, int rc
, int islit
,
1224 uint8_t lit
, uint8_t byte_mask
)
1226 if (unlikely(rc
== 31))
1228 else if (unlikely(ra
== 31))
1229 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1232 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], (lit
& 7) * 8);
1234 TCGv tmp
= tcg_temp_new();
1235 tcg_gen_andi_i64(tmp
, cpu_ir
[rb
], 7);
1236 tcg_gen_shli_i64(tmp
, tmp
, 3);
1237 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], tmp
);
1240 gen_zapnoti(cpu_ir
[rc
], cpu_ir
[rc
], byte_mask
);
1244 /* INSWH, INSLH, INSQH */
1245 static void gen_ins_h(int ra
, int rb
, int rc
, int islit
,
1246 uint8_t lit
, uint8_t byte_mask
)
1248 if (unlikely(rc
== 31))
1250 else if (unlikely(ra
== 31) || (islit
&& (lit
& 7) == 0))
1251 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1253 TCGv tmp
= tcg_temp_new();
1255 /* The instruction description has us left-shift the byte mask
1256 and extract bits <15:8> and apply that zap at the end. This
1257 is equivalent to simply performing the zap first and shifting
1259 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1262 /* Note that we have handled the lit==0 case above. */
1263 tcg_gen_shri_i64 (cpu_ir
[rc
], tmp
, 64 - (lit
& 7) * 8);
1265 TCGv shift
= tcg_temp_new();
1267 /* If (B & 7) == 0, we need to shift by 64 and leave a zero.
1268 Do this portably by splitting the shift into two parts:
1269 shift_count-1 and 1. Arrange for the -1 by using
1270 ones-complement instead of twos-complement in the negation:
1271 ~((B & 7) * 8) & 63. */
1273 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1274 tcg_gen_shli_i64(shift
, shift
, 3);
1275 tcg_gen_not_i64(shift
, shift
);
1276 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1278 tcg_gen_shr_i64(cpu_ir
[rc
], tmp
, shift
);
1279 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[rc
], 1);
1280 tcg_temp_free(shift
);
1286 /* INSBL, INSWL, INSLL, INSQL */
1287 static void gen_ins_l(int ra
, int rb
, int rc
, int islit
,
1288 uint8_t lit
, uint8_t byte_mask
)
1290 if (unlikely(rc
== 31))
1292 else if (unlikely(ra
== 31))
1293 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1295 TCGv tmp
= tcg_temp_new();
1297 /* The instruction description has us left-shift the byte mask
1298 the same number of byte slots as the data and apply the zap
1299 at the end. This is equivalent to simply performing the zap
1300 first and shifting afterward. */
1301 gen_zapnoti (tmp
, cpu_ir
[ra
], byte_mask
);
1304 tcg_gen_shli_i64(cpu_ir
[rc
], tmp
, (lit
& 7) * 8);
1306 TCGv shift
= tcg_temp_new();
1307 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1308 tcg_gen_shli_i64(shift
, shift
, 3);
1309 tcg_gen_shl_i64(cpu_ir
[rc
], tmp
, shift
);
1310 tcg_temp_free(shift
);
1316 /* MSKWH, MSKLH, MSKQH */
1317 static void gen_msk_h(int ra
, int rb
, int rc
, int islit
,
1318 uint8_t lit
, uint8_t byte_mask
)
1320 if (unlikely(rc
== 31))
1322 else if (unlikely(ra
== 31))
1323 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1325 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~((byte_mask
<< (lit
& 7)) >> 8));
1327 TCGv shift
= tcg_temp_new();
1328 TCGv mask
= tcg_temp_new();
1330 /* The instruction description is as above, where the byte_mask
1331 is shifted left, and then we extract bits <15:8>. This can be
1332 emulated with a right-shift on the expanded byte mask. This
1333 requires extra care because for an input <2:0> == 0 we need a
1334 shift of 64 bits in order to generate a zero. This is done by
1335 splitting the shift into two parts, the variable shift - 1
1336 followed by a constant 1 shift. The code we expand below is
1337 equivalent to ~((B & 7) * 8) & 63. */
1339 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1340 tcg_gen_shli_i64(shift
, shift
, 3);
1341 tcg_gen_not_i64(shift
, shift
);
1342 tcg_gen_andi_i64(shift
, shift
, 0x3f);
1343 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1344 tcg_gen_shr_i64(mask
, mask
, shift
);
1345 tcg_gen_shri_i64(mask
, mask
, 1);
1347 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1349 tcg_temp_free(mask
);
1350 tcg_temp_free(shift
);
1354 /* MSKBL, MSKWL, MSKLL, MSKQL */
1355 static void gen_msk_l(int ra
, int rb
, int rc
, int islit
,
1356 uint8_t lit
, uint8_t byte_mask
)
1358 if (unlikely(rc
== 31))
1360 else if (unlikely(ra
== 31))
1361 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
1363 gen_zapnoti (cpu_ir
[rc
], cpu_ir
[ra
], ~(byte_mask
<< (lit
& 7)));
1365 TCGv shift
= tcg_temp_new();
1366 TCGv mask
= tcg_temp_new();
1368 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 7);
1369 tcg_gen_shli_i64(shift
, shift
, 3);
1370 tcg_gen_movi_i64(mask
, zapnot_mask (byte_mask
));
1371 tcg_gen_shl_i64(mask
, mask
, shift
);
1373 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], mask
);
1375 tcg_temp_free(mask
);
1376 tcg_temp_free(shift
);
1380 /* Code to call arith3 helpers */
1381 #define ARITH3(name) \
1382 static inline void glue(gen_, name)(int ra, int rb, int rc, int islit,\
1385 if (unlikely(rc == 31)) \
1390 TCGv tmp = tcg_const_i64(lit); \
1391 gen_helper_ ## name(cpu_ir[rc], cpu_ir[ra], tmp); \
1392 tcg_temp_free(tmp); \
1394 gen_helper_ ## name (cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]); \
1396 TCGv tmp1 = tcg_const_i64(0); \
1398 TCGv tmp2 = tcg_const_i64(lit); \
1399 gen_helper_ ## name (cpu_ir[rc], tmp1, tmp2); \
1400 tcg_temp_free(tmp2); \
1402 gen_helper_ ## name (cpu_ir[rc], tmp1, cpu_ir[rb]); \
1403 tcg_temp_free(tmp1); \
1417 /* Code to call arith3 helpers */
1418 #define ARITH3_EX(name) \
1419 static inline void glue(gen_, name)(int ra, int rb, int rc, \
1420 int islit, uint8_t lit) \
1422 if (unlikely(rc == 31)) { \
1427 TCGv tmp = tcg_const_i64(lit); \
1428 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1430 tcg_temp_free(tmp); \
1432 gen_helper_ ## name(cpu_ir[rc], cpu_env, \
1433 cpu_ir[ra], cpu_ir[rb]); \
1436 TCGv tmp1 = tcg_const_i64(0); \
1438 TCGv tmp2 = tcg_const_i64(lit); \
1439 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, tmp2); \
1440 tcg_temp_free(tmp2); \
1442 gen_helper_ ## name(cpu_ir[rc], cpu_env, tmp1, cpu_ir[rb]); \
1444 tcg_temp_free(tmp1); \
1454 #define MVIOP2(name) \
1455 static inline void glue(gen_, name)(int rb, int rc) \
1457 if (unlikely(rc == 31)) \
1459 if (unlikely(rb == 31)) \
1460 tcg_gen_movi_i64(cpu_ir[rc], 0); \
1462 gen_helper_ ## name (cpu_ir[rc], cpu_ir[rb]); \
1469 static void gen_cmp(TCGCond cond
, int ra
, int rb
, int rc
,
1470 int islit
, uint8_t lit
)
1474 if (unlikely(rc
== 31)) {
1479 va
= tcg_const_i64(0);
1484 vb
= tcg_const_i64(lit
);
1489 tcg_gen_setcond_i64(cond
, cpu_ir
[rc
], va
, vb
);
1499 static void gen_rx(int ra
, int set
)
1504 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1507 tmp
= tcg_const_i32(set
);
1508 tcg_gen_st8_i32(tmp
, cpu_env
, offsetof(CPUAlphaState
, intr_flag
));
1509 tcg_temp_free_i32(tmp
);
1512 static ExitStatus
gen_call_pal(DisasContext
*ctx
, int palcode
)
1514 /* We're emulating OSF/1 PALcode. Many of these are trivial access
1515 to internal cpu registers. */
1517 /* Unprivileged PAL call */
1518 if (palcode
>= 0x80 && palcode
< 0xC0) {
1522 /* No-op inside QEMU. */
1526 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_unique
);
1530 tcg_gen_mov_i64(cpu_unique
, cpu_ir
[IR_A0
]);
1539 #ifndef CONFIG_USER_ONLY
1540 /* Privileged PAL code */
1541 if (palcode
< 0x40 && (ctx
->tb
->flags
& TB_FLAGS_USER_MODE
) == 0) {
1545 /* No-op inside QEMU. */
1549 /* No-op inside QEMU. */
1553 tcg_gen_st_i64(cpu_ir
[IR_A0
], cpu_env
, offsetof(CPUAlphaState
, vptptr
));
1557 tcg_gen_mov_i64(cpu_sysval
, cpu_ir
[IR_A0
]);
1561 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_sysval
);
1568 /* Note that we already know we're in kernel mode, so we know
1569 that PS only contains the 3 IPL bits. */
1570 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1572 /* But make sure and store only the 3 IPL bits from the user. */
1573 tmp
= tcg_temp_new();
1574 tcg_gen_andi_i64(tmp
, cpu_ir
[IR_A0
], PS_INT_MASK
);
1575 tcg_gen_st8_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, ps
));
1582 tcg_gen_ld8u_i64(cpu_ir
[IR_V0
], cpu_env
, offsetof(CPUAlphaState
, ps
));
1586 tcg_gen_mov_i64(cpu_usp
, cpu_ir
[IR_A0
]);
1590 tcg_gen_mov_i64(cpu_ir
[IR_V0
], cpu_usp
);
1594 tcg_gen_ld32s_i64(cpu_ir
[IR_V0
], cpu_env
,
1595 -offsetof(AlphaCPU
, env
) + offsetof(CPUState
, cpu_index
));
1605 return gen_invalid(ctx
);
1608 #ifdef CONFIG_USER_ONLY
1609 return gen_excp(ctx
, EXCP_CALL_PAL
, palcode
);
1612 TCGv pc
= tcg_const_i64(ctx
->pc
);
1613 TCGv entry
= tcg_const_i64(palcode
& 0x80
1614 ? 0x2000 + (palcode
- 0x80) * 64
1615 : 0x1000 + palcode
* 64);
1617 gen_helper_call_pal(cpu_env
, pc
, entry
);
1619 tcg_temp_free(entry
);
1622 /* Since the destination is running in PALmode, we don't really
1623 need the page permissions check. We'll see the existance of
1624 the page when we create the TB, and we'll flush all TBs if
1625 we change the PAL base register. */
1626 if (!ctx
->singlestep_enabled
&& !(ctx
->tb
->cflags
& CF_LAST_IO
)) {
1628 tcg_gen_exit_tb((uintptr_t)ctx
->tb
);
1629 return EXIT_GOTO_TB
;
1632 return EXIT_PC_UPDATED
;
1637 #ifndef CONFIG_USER_ONLY
1639 #define PR_BYTE 0x100000
1640 #define PR_LONG 0x200000
1642 static int cpu_pr_data(int pr
)
1645 case 0: return offsetof(CPUAlphaState
, ps
) | PR_BYTE
;
1646 case 1: return offsetof(CPUAlphaState
, fen
) | PR_BYTE
;
1647 case 2: return offsetof(CPUAlphaState
, pcc_ofs
) | PR_LONG
;
1648 case 3: return offsetof(CPUAlphaState
, trap_arg0
);
1649 case 4: return offsetof(CPUAlphaState
, trap_arg1
);
1650 case 5: return offsetof(CPUAlphaState
, trap_arg2
);
1651 case 6: return offsetof(CPUAlphaState
, exc_addr
);
1652 case 7: return offsetof(CPUAlphaState
, palbr
);
1653 case 8: return offsetof(CPUAlphaState
, ptbr
);
1654 case 9: return offsetof(CPUAlphaState
, vptptr
);
1655 case 10: return offsetof(CPUAlphaState
, unique
);
1656 case 11: return offsetof(CPUAlphaState
, sysval
);
1657 case 12: return offsetof(CPUAlphaState
, usp
);
1660 return offsetof(CPUAlphaState
, shadow
[pr
- 32]);
1662 return offsetof(CPUAlphaState
, scratch
[pr
- 40]);
1665 return offsetof(CPUAlphaState
, alarm_expire
);
1670 static ExitStatus
gen_mfpr(int ra
, int regno
)
1672 int data
= cpu_pr_data(regno
);
1674 /* In our emulated PALcode, these processor registers have no
1675 side effects from reading. */
1680 /* Special help for VMTIME and WALLTIME. */
1681 if (regno
== 250 || regno
== 249) {
1682 void (*helper
)(TCGv
) = gen_helper_get_walltime
;
1684 helper
= gen_helper_get_vmtime
;
1690 return EXIT_PC_STALE
;
1697 /* The basic registers are data only, and unknown registers
1698 are read-zero, write-ignore. */
1700 tcg_gen_movi_i64(cpu_ir
[ra
], 0);
1701 } else if (data
& PR_BYTE
) {
1702 tcg_gen_ld8u_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_BYTE
);
1703 } else if (data
& PR_LONG
) {
1704 tcg_gen_ld32s_i64(cpu_ir
[ra
], cpu_env
, data
& ~PR_LONG
);
1706 tcg_gen_ld_i64(cpu_ir
[ra
], cpu_env
, data
);
1711 static ExitStatus
gen_mtpr(DisasContext
*ctx
, int rb
, int regno
)
1717 tmp
= tcg_const_i64(0);
1725 gen_helper_tbia(cpu_env
);
1730 gen_helper_tbis(cpu_env
, tmp
);
1735 tmp
= tcg_const_i64(1);
1736 tcg_gen_st32_i64(tmp
, cpu_env
, -offsetof(AlphaCPU
, env
) +
1737 offsetof(CPUState
, halted
));
1738 return gen_excp(ctx
, EXCP_HLT
, 0);
1742 gen_helper_halt(tmp
);
1743 return EXIT_PC_STALE
;
1747 gen_helper_set_alarm(cpu_env
, tmp
);
1752 tcg_gen_st_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, palbr
));
1753 /* Changing the PAL base register implies un-chaining all of the TBs
1754 that ended with a CALL_PAL. Since the base register usually only
1755 changes during boot, flushing everything works well. */
1756 gen_helper_tb_flush(cpu_env
);
1757 return EXIT_PC_STALE
;
1760 /* The basic registers are data only, and unknown registers
1761 are read-zero, write-ignore. */
1762 data
= cpu_pr_data(regno
);
1764 if (data
& PR_BYTE
) {
1765 tcg_gen_st8_i64(tmp
, cpu_env
, data
& ~PR_BYTE
);
1766 } else if (data
& PR_LONG
) {
1767 tcg_gen_st32_i64(tmp
, cpu_env
, data
& ~PR_LONG
);
1769 tcg_gen_st_i64(tmp
, cpu_env
, data
);
1781 #endif /* !USER_ONLY*/
1783 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
1786 int32_t disp21
, disp16
;
1787 #ifndef CONFIG_USER_ONLY
1791 uint8_t opc
, ra
, rb
, rc
, fpfn
, fn7
, islit
, real_islit
;
1795 /* Decode all instruction fields */
1797 ra
= (insn
>> 21) & 0x1F;
1798 rb
= (insn
>> 16) & 0x1F;
1800 real_islit
= islit
= (insn
>> 12) & 1;
1801 if (rb
== 31 && !islit
) {
1805 lit
= (insn
>> 13) & 0xFF;
1806 palcode
= insn
& 0x03FFFFFF;
1807 disp21
= ((int32_t)((insn
& 0x001FFFFF) << 11)) >> 11;
1808 disp16
= (int16_t)(insn
& 0x0000FFFF);
1809 #ifndef CONFIG_USER_ONLY
1810 disp12
= (int32_t)((insn
& 0x00000FFF) << 20) >> 20;
1812 fn11
= (insn
>> 5) & 0x000007FF;
1814 fn7
= (insn
>> 5) & 0x0000007F;
1815 LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
1816 opc
, ra
, rb
, rc
, disp16
);
1822 ret
= gen_call_pal(ctx
, palcode
);
1847 if (likely(ra
!= 31)) {
1849 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
);
1851 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
);
1856 if (likely(ra
!= 31)) {
1858 tcg_gen_addi_i64(cpu_ir
[ra
], cpu_ir
[rb
], disp16
<< 16);
1860 tcg_gen_movi_i64(cpu_ir
[ra
], disp16
<< 16);
1865 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1866 gen_load_mem(ctx
, &tcg_gen_qemu_ld8u
, ra
, rb
, disp16
, 0, 0);
1872 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 1);
1876 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
1877 gen_load_mem(ctx
, &tcg_gen_qemu_ld16u
, ra
, rb
, disp16
, 0, 0);
1883 gen_store_mem(ctx
, &tcg_gen_qemu_st16
, ra
, rb
, disp16
, 0, 0);
1887 gen_store_mem(ctx
, &tcg_gen_qemu_st8
, ra
, rb
, disp16
, 0, 0);
1891 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 1);
1897 if (likely(rc
!= 31)) {
1900 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1901 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1903 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1904 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1908 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1910 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1916 if (likely(rc
!= 31)) {
1918 TCGv tmp
= tcg_temp_new();
1919 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1921 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1923 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1924 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1928 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1930 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1936 if (likely(rc
!= 31)) {
1939 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
1941 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
1942 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1945 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1947 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1948 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1954 if (likely(rc
!= 31)) {
1956 TCGv tmp
= tcg_temp_new();
1957 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
1959 tcg_gen_subi_i64(tmp
, tmp
, lit
);
1961 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
1962 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1966 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
1968 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
1969 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
1976 gen_cmpbge(ra
, rb
, rc
, islit
, lit
);
1980 if (likely(rc
!= 31)) {
1982 TCGv tmp
= tcg_temp_new();
1983 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
1985 tcg_gen_addi_i64(tmp
, tmp
, lit
);
1987 tcg_gen_add_i64(tmp
, tmp
, cpu_ir
[rb
]);
1988 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
1992 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
1994 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2000 if (likely(rc
!= 31)) {
2002 TCGv tmp
= tcg_temp_new();
2003 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2005 tcg_gen_subi_i64(tmp
, tmp
, lit
);
2007 tcg_gen_sub_i64(tmp
, tmp
, cpu_ir
[rb
]);
2008 tcg_gen_ext32s_i64(cpu_ir
[rc
], tmp
);
2012 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2014 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2015 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2022 gen_cmp(TCG_COND_LTU
, ra
, rb
, rc
, islit
, lit
);
2026 if (likely(rc
!= 31)) {
2029 tcg_gen_addi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2031 tcg_gen_add_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2034 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2036 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2042 if (likely(rc
!= 31)) {
2044 TCGv tmp
= tcg_temp_new();
2045 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2047 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2049 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2053 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2055 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2061 if (likely(rc
!= 31)) {
2064 tcg_gen_subi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2066 tcg_gen_sub_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2069 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2071 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2077 if (likely(rc
!= 31)) {
2079 TCGv tmp
= tcg_temp_new();
2080 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 2);
2082 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2084 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2088 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2090 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2096 gen_cmp(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
);
2100 if (likely(rc
!= 31)) {
2102 TCGv tmp
= tcg_temp_new();
2103 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2105 tcg_gen_addi_i64(cpu_ir
[rc
], tmp
, lit
);
2107 tcg_gen_add_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2111 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2113 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2119 if (likely(rc
!= 31)) {
2121 TCGv tmp
= tcg_temp_new();
2122 tcg_gen_shli_i64(tmp
, cpu_ir
[ra
], 3);
2124 tcg_gen_subi_i64(cpu_ir
[rc
], tmp
, lit
);
2126 tcg_gen_sub_i64(cpu_ir
[rc
], tmp
, cpu_ir
[rb
]);
2130 tcg_gen_movi_i64(cpu_ir
[rc
], -lit
);
2132 tcg_gen_neg_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2138 gen_cmp(TCG_COND_LEU
, ra
, rb
, rc
, islit
, lit
);
2142 gen_addlv(ra
, rb
, rc
, islit
, lit
);
2146 gen_sublv(ra
, rb
, rc
, islit
, lit
);
2150 gen_cmp(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
);
2154 gen_addqv(ra
, rb
, rc
, islit
, lit
);
2158 gen_subqv(ra
, rb
, rc
, islit
, lit
);
2162 gen_cmp(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
);
2172 if (likely(rc
!= 31)) {
2174 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2176 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2178 tcg_gen_and_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2183 if (likely(rc
!= 31)) {
2186 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2188 tcg_gen_andc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2190 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2195 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 1);
2199 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 1);
2203 if (likely(rc
!= 31)) {
2206 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2208 tcg_gen_or_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2211 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2213 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2219 gen_cmov(TCG_COND_EQ
, ra
, rb
, rc
, islit
, lit
, 0);
2223 gen_cmov(TCG_COND_NE
, ra
, rb
, rc
, islit
, lit
, 0);
2227 if (likely(rc
!= 31)) {
2230 tcg_gen_ori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2232 tcg_gen_orc_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2235 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2237 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2243 if (likely(rc
!= 31)) {
2246 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2248 tcg_gen_xor_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2251 tcg_gen_movi_i64(cpu_ir
[rc
], lit
);
2253 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2259 gen_cmov(TCG_COND_LT
, ra
, rb
, rc
, islit
, lit
, 0);
2263 gen_cmov(TCG_COND_GE
, ra
, rb
, rc
, islit
, lit
, 0);
2267 if (likely(rc
!= 31)) {
2270 tcg_gen_xori_i64(cpu_ir
[rc
], cpu_ir
[ra
], ~lit
);
2272 tcg_gen_eqv_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2275 tcg_gen_movi_i64(cpu_ir
[rc
], ~lit
);
2277 tcg_gen_not_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
2283 if (likely(rc
!= 31)) {
2284 uint64_t amask
= ctx
->tb
->flags
>> TB_FLAGS_AMASK_SHIFT
;
2287 tcg_gen_movi_i64(cpu_ir
[rc
], lit
& ~amask
);
2289 tcg_gen_andi_i64(cpu_ir
[rc
], cpu_ir
[rb
], ~amask
);
2295 gen_cmov(TCG_COND_LE
, ra
, rb
, rc
, islit
, lit
, 0);
2299 gen_cmov(TCG_COND_GT
, ra
, rb
, rc
, islit
, lit
, 0);
2304 tcg_gen_movi_i64(cpu_ir
[rc
], ctx
->implver
);
2315 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2319 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2323 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x01);
2327 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2331 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2335 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x03);
2339 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2343 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2347 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0x0f);
2351 gen_zap(ra
, rb
, rc
, islit
, lit
);
2355 gen_zapnot(ra
, rb
, rc
, islit
, lit
);
2359 gen_msk_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2363 if (likely(rc
!= 31)) {
2366 tcg_gen_shri_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2368 TCGv shift
= tcg_temp_new();
2369 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2370 tcg_gen_shr_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2371 tcg_temp_free(shift
);
2374 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2379 gen_ext_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2383 if (likely(rc
!= 31)) {
2386 tcg_gen_shli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2388 TCGv shift
= tcg_temp_new();
2389 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2390 tcg_gen_shl_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2391 tcg_temp_free(shift
);
2394 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2399 gen_ins_l(ra
, rb
, rc
, islit
, lit
, 0xff);
2403 if (likely(rc
!= 31)) {
2406 tcg_gen_sari_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
& 0x3f);
2408 TCGv shift
= tcg_temp_new();
2409 tcg_gen_andi_i64(shift
, cpu_ir
[rb
], 0x3f);
2410 tcg_gen_sar_i64(cpu_ir
[rc
], cpu_ir
[ra
], shift
);
2411 tcg_temp_free(shift
);
2414 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2419 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2423 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2427 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x03);
2431 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2435 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2439 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0x0f);
2443 gen_msk_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2447 gen_ins_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2451 gen_ext_h(ra
, rb
, rc
, islit
, lit
, 0xff);
2461 if (likely(rc
!= 31)) {
2463 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2466 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2468 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2469 tcg_gen_ext32s_i64(cpu_ir
[rc
], cpu_ir
[rc
]);
2475 if (likely(rc
!= 31)) {
2477 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2479 tcg_gen_muli_i64(cpu_ir
[rc
], cpu_ir
[ra
], lit
);
2481 tcg_gen_mul_i64(cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2488 if (unlikely(rc
== 31)){
2492 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
2495 low
= tcg_temp_new();
2497 tcg_gen_movi_tl(low
, lit
);
2498 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], low
);
2500 tcg_gen_mulu2_i64(low
, cpu_ir
[rc
], cpu_ir
[ra
], cpu_ir
[rb
]);
2507 gen_mullv(ra
, rb
, rc
, islit
, lit
);
2511 gen_mulqv(ra
, rb
, rc
, islit
, lit
);
2518 switch (fpfn
) { /* fn11 & 0x3F */
2521 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2524 if (likely(rc
!= 31)) {
2526 TCGv_i32 tmp
= tcg_temp_new_i32();
2527 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2528 gen_helper_memory_to_s(cpu_fir
[rc
], tmp
);
2529 tcg_temp_free_i32(tmp
);
2531 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2536 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2543 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2544 gen_fsqrts(ctx
, rb
, rc
, fn11
);
2550 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2553 if (likely(rc
!= 31)) {
2555 TCGv_i32 tmp
= tcg_temp_new_i32();
2556 tcg_gen_trunc_i64_i32(tmp
, cpu_ir
[ra
]);
2557 gen_helper_memory_to_f(cpu_fir
[rc
], tmp
);
2558 tcg_temp_free_i32(tmp
);
2560 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2565 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
2568 if (likely(rc
!= 31)) {
2570 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_ir
[ra
]);
2572 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2577 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2584 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) {
2585 gen_fsqrtt(ctx
, rb
, rc
, fn11
);
2594 /* VAX floating point */
2595 /* XXX: rounding mode and trap are ignored (!) */
2596 switch (fpfn
) { /* fn11 & 0x3F */
2599 gen_faddf(ra
, rb
, rc
);
2603 gen_fsubf(ra
, rb
, rc
);
2607 gen_fmulf(ra
, rb
, rc
);
2611 gen_fdivf(ra
, rb
, rc
);
2623 gen_faddg(ra
, rb
, rc
);
2627 gen_fsubg(ra
, rb
, rc
);
2631 gen_fmulg(ra
, rb
, rc
);
2635 gen_fdivg(ra
, rb
, rc
);
2639 gen_fcmpgeq(ra
, rb
, rc
);
2643 gen_fcmpglt(ra
, rb
, rc
);
2647 gen_fcmpgle(ra
, rb
, rc
);
2678 /* IEEE floating-point */
2679 switch (fpfn
) { /* fn11 & 0x3F */
2682 gen_fadds(ctx
, ra
, rb
, rc
, fn11
);
2686 gen_fsubs(ctx
, ra
, rb
, rc
, fn11
);
2690 gen_fmuls(ctx
, ra
, rb
, rc
, fn11
);
2694 gen_fdivs(ctx
, ra
, rb
, rc
, fn11
);
2698 gen_faddt(ctx
, ra
, rb
, rc
, fn11
);
2702 gen_fsubt(ctx
, ra
, rb
, rc
, fn11
);
2706 gen_fmult(ctx
, ra
, rb
, rc
, fn11
);
2710 gen_fdivt(ctx
, ra
, rb
, rc
, fn11
);
2714 gen_fcmptun(ctx
, ra
, rb
, rc
, fn11
);
2718 gen_fcmpteq(ctx
, ra
, rb
, rc
, fn11
);
2722 gen_fcmptlt(ctx
, ra
, rb
, rc
, fn11
);
2726 gen_fcmptle(ctx
, ra
, rb
, rc
, fn11
);
2729 if (fn11
== 0x2AC || fn11
== 0x6AC) {
2731 gen_fcvtst(ctx
, rb
, rc
, fn11
);
2734 gen_fcvtts(ctx
, rb
, rc
, fn11
);
2739 gen_fcvttq(ctx
, rb
, rc
, fn11
);
2743 gen_fcvtqs(ctx
, rb
, rc
, fn11
);
2747 gen_fcvtqt(ctx
, rb
, rc
, fn11
);
2760 if (likely(rc
!= 31)) {
2764 tcg_gen_movi_i64(cpu_fir
[rc
], 0);
2766 tcg_gen_mov_i64(cpu_fir
[rc
], cpu_fir
[ra
]);
2769 gen_fcpys(ra
, rb
, rc
);
2775 gen_fcpysn(ra
, rb
, rc
);
2779 gen_fcpyse(ra
, rb
, rc
);
2783 if (likely(ra
!= 31))
2784 gen_helper_store_fpcr(cpu_env
, cpu_fir
[ra
]);
2786 TCGv tmp
= tcg_const_i64(0);
2787 gen_helper_store_fpcr(cpu_env
, tmp
);
2793 if (likely(ra
!= 31))
2794 gen_helper_load_fpcr(cpu_fir
[ra
], cpu_env
);
2798 gen_fcmov(TCG_COND_EQ
, ra
, rb
, rc
);
2802 gen_fcmov(TCG_COND_NE
, ra
, rb
, rc
);
2806 gen_fcmov(TCG_COND_LT
, ra
, rb
, rc
);
2810 gen_fcmov(TCG_COND_GE
, ra
, rb
, rc
);
2814 gen_fcmov(TCG_COND_LE
, ra
, rb
, rc
);
2818 gen_fcmov(TCG_COND_GT
, ra
, rb
, rc
);
2828 /* ??? I'm pretty sure there's nothing that /sv needs to do that
2829 /v doesn't do. The only thing I can think is that /sv is a
2830 valid instruction merely for completeness in the ISA. */
2831 gen_fcvtql_v(ctx
, rb
, rc
);
2838 switch ((uint16_t)disp16
) {
2868 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2870 ret
= EXIT_PC_STALE
;
2872 gen_helper_load_pcc(cpu_ir
[ra
], cpu_env
);
2896 /* HW_MFPR (PALcode) */
2897 #ifndef CONFIG_USER_ONLY
2898 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2899 return gen_mfpr(ra
, insn
& 0xffff);
2904 /* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
2905 prediction stack action, which of course we don't implement. */
2907 tcg_gen_andi_i64(cpu_pc
, cpu_ir
[rb
], ~3);
2909 tcg_gen_movi_i64(cpu_pc
, 0);
2912 tcg_gen_movi_i64(cpu_ir
[ra
], ctx
->pc
);
2914 ret
= EXIT_PC_UPDATED
;
2917 /* HW_LD (PALcode) */
2918 #ifndef CONFIG_USER_ONLY
2919 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
2926 addr
= tcg_temp_new();
2928 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
2930 tcg_gen_movi_i64(addr
, disp12
);
2931 switch ((insn
>> 12) & 0xF) {
2933 /* Longword physical access (hw_ldl/p) */
2934 gen_helper_ldl_phys(cpu_ir
[ra
], addr
);
2937 /* Quadword physical access (hw_ldq/p) */
2938 gen_helper_ldq_phys(cpu_ir
[ra
], addr
);
2941 /* Longword physical access with lock (hw_ldl_l/p) */
2942 gen_helper_ldl_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2945 /* Quadword physical access with lock (hw_ldq_l/p) */
2946 gen_helper_ldq_l_phys(cpu_ir
[ra
], cpu_env
, addr
);
2949 /* Longword virtual PTE fetch (hw_ldl/v) */
2952 /* Quadword virtual PTE fetch (hw_ldq/v) */
2956 /* Incpu_ir[ra]id */
2959 /* Incpu_ir[ra]id */
2962 /* Longword virtual access (hw_ldl) */
2965 /* Quadword virtual access (hw_ldq) */
2968 /* Longword virtual access with protection check (hw_ldl/w) */
2969 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2972 /* Quadword virtual access with protection check (hw_ldq/w) */
2973 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_KERNEL_IDX
);
2976 /* Longword virtual access with alt access mode (hw_ldl/a)*/
2979 /* Quadword virtual access with alt access mode (hw_ldq/a) */
2982 /* Longword virtual access with alternate access mode and
2983 protection checks (hw_ldl/wa) */
2984 tcg_gen_qemu_ld32s(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2987 /* Quadword virtual access with alternate access mode and
2988 protection checks (hw_ldq/wa) */
2989 tcg_gen_qemu_ld64(cpu_ir
[ra
], addr
, MMU_USER_IDX
);
2992 tcg_temp_free(addr
);
3001 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) == 0) {
3004 if (likely(rc
!= 31)) {
3006 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int8_t)lit
));
3008 tcg_gen_ext8s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
3013 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_BWX
) {
3014 if (likely(rc
!= 31)) {
3016 tcg_gen_movi_i64(cpu_ir
[rc
], (int64_t)((int16_t)lit
));
3018 tcg_gen_ext16s_i64(cpu_ir
[rc
], cpu_ir
[rb
]);
3026 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3027 if (likely(rc
!= 31)) {
3029 tcg_gen_movi_i64(cpu_ir
[rc
], ctpop64(lit
));
3031 gen_helper_ctpop(cpu_ir
[rc
], cpu_ir
[rb
]);
3039 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3040 gen_perr(ra
, rb
, rc
, islit
, lit
);
3046 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3047 if (likely(rc
!= 31)) {
3049 tcg_gen_movi_i64(cpu_ir
[rc
], clz64(lit
));
3051 gen_helper_ctlz(cpu_ir
[rc
], cpu_ir
[rb
]);
3059 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_CIX
) {
3060 if (likely(rc
!= 31)) {
3062 tcg_gen_movi_i64(cpu_ir
[rc
], ctz64(lit
));
3064 gen_helper_cttz(cpu_ir
[rc
], cpu_ir
[rb
]);
3072 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3073 if (real_islit
|| ra
!= 31) {
3082 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3083 if (real_islit
|| ra
!= 31) {
3092 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3093 if (real_islit
|| ra
!= 31) {
3102 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3103 if (real_islit
|| ra
!= 31) {
3112 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3113 gen_minsb8(ra
, rb
, rc
, islit
, lit
);
3119 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3120 gen_minsw4(ra
, rb
, rc
, islit
, lit
);
3126 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3127 gen_minub8(ra
, rb
, rc
, islit
, lit
);
3133 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3134 gen_minuw4(ra
, rb
, rc
, islit
, lit
);
3140 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3141 gen_maxub8(ra
, rb
, rc
, islit
, lit
);
3147 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3148 gen_maxuw4(ra
, rb
, rc
, islit
, lit
);
3154 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3155 gen_maxsb8(ra
, rb
, rc
, islit
, lit
);
3161 if (ctx
->tb
->flags
& TB_FLAGS_AMASK_MVI
) {
3162 gen_maxsw4(ra
, rb
, rc
, islit
, lit
);
3168 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3171 if (likely(rc
!= 31)) {
3173 tcg_gen_mov_i64(cpu_ir
[rc
], cpu_fir
[ra
]);
3175 tcg_gen_movi_i64(cpu_ir
[rc
], 0);
3180 if ((ctx
->tb
->flags
& TB_FLAGS_AMASK_FIX
) == 0) {
3184 TCGv_i32 tmp1
= tcg_temp_new_i32();
3186 gen_helper_s_to_memory(tmp1
, cpu_fir
[ra
]);
3188 TCGv tmp2
= tcg_const_i64(0);
3189 gen_helper_s_to_memory(tmp1
, tmp2
);
3190 tcg_temp_free(tmp2
);
3192 tcg_gen_ext_i32_i64(cpu_ir
[rc
], tmp1
);
3193 tcg_temp_free_i32(tmp1
);
3201 /* HW_MTPR (PALcode) */
3202 #ifndef CONFIG_USER_ONLY
3203 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3204 return gen_mtpr(ctx
, rb
, insn
& 0xffff);
3209 /* HW_RET (PALcode) */
3210 #ifndef CONFIG_USER_ONLY
3211 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3213 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
3214 address from EXC_ADDR. This turns out to be useful for our
3215 emulation PALcode, so continue to accept it. */
3216 TCGv tmp
= tcg_temp_new();
3217 tcg_gen_ld_i64(tmp
, cpu_env
, offsetof(CPUAlphaState
, exc_addr
));
3218 gen_helper_hw_ret(cpu_env
, tmp
);
3221 gen_helper_hw_ret(cpu_env
, cpu_ir
[rb
]);
3223 ret
= EXIT_PC_UPDATED
;
3229 /* HW_ST (PALcode) */
3230 #ifndef CONFIG_USER_ONLY
3231 if (ctx
->tb
->flags
& TB_FLAGS_PAL_MODE
) {
3233 addr
= tcg_temp_new();
3235 tcg_gen_addi_i64(addr
, cpu_ir
[rb
], disp12
);
3237 tcg_gen_movi_i64(addr
, disp12
);
3241 val
= tcg_temp_new();
3242 tcg_gen_movi_i64(val
, 0);
3244 switch ((insn
>> 12) & 0xF) {
3246 /* Longword physical access */
3247 gen_helper_stl_phys(addr
, val
);
3250 /* Quadword physical access */
3251 gen_helper_stq_phys(addr
, val
);
3254 /* Longword physical access with lock */
3255 gen_helper_stl_c_phys(val
, cpu_env
, addr
, val
);
3258 /* Quadword physical access with lock */
3259 gen_helper_stq_c_phys(val
, cpu_env
, addr
, val
);
3262 /* Longword virtual access */
3265 /* Quadword virtual access */
3286 /* Longword virtual access with alternate access mode */
3289 /* Quadword virtual access with alternate access mode */
3300 tcg_temp_free(addr
);
3307 gen_load_mem(ctx
, &gen_qemu_ldf
, ra
, rb
, disp16
, 1, 0);
3311 gen_load_mem(ctx
, &gen_qemu_ldg
, ra
, rb
, disp16
, 1, 0);
3315 gen_load_mem(ctx
, &gen_qemu_lds
, ra
, rb
, disp16
, 1, 0);
3319 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 1, 0);
3323 gen_store_mem(ctx
, &gen_qemu_stf
, ra
, rb
, disp16
, 1, 0);
3327 gen_store_mem(ctx
, &gen_qemu_stg
, ra
, rb
, disp16
, 1, 0);
3331 gen_store_mem(ctx
, &gen_qemu_sts
, ra
, rb
, disp16
, 1, 0);
3335 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 1, 0);
3339 gen_load_mem(ctx
, &tcg_gen_qemu_ld32s
, ra
, rb
, disp16
, 0, 0);
3343 gen_load_mem(ctx
, &tcg_gen_qemu_ld64
, ra
, rb
, disp16
, 0, 0);
3347 gen_load_mem(ctx
, &gen_qemu_ldl_l
, ra
, rb
, disp16
, 0, 0);
3351 gen_load_mem(ctx
, &gen_qemu_ldq_l
, ra
, rb
, disp16
, 0, 0);
3355 gen_store_mem(ctx
, &tcg_gen_qemu_st32
, ra
, rb
, disp16
, 0, 0);
3359 gen_store_mem(ctx
, &tcg_gen_qemu_st64
, ra
, rb
, disp16
, 0, 0);
3363 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 0);
3367 ret
= gen_store_conditional(ctx
, ra
, rb
, disp16
, 1);
3371 ret
= gen_bdirect(ctx
, ra
, disp21
);
3373 case 0x31: /* FBEQ */
3374 ret
= gen_fbcond(ctx
, TCG_COND_EQ
, ra
, disp21
);
3376 case 0x32: /* FBLT */
3377 ret
= gen_fbcond(ctx
, TCG_COND_LT
, ra
, disp21
);
3379 case 0x33: /* FBLE */
3380 ret
= gen_fbcond(ctx
, TCG_COND_LE
, ra
, disp21
);
3384 ret
= gen_bdirect(ctx
, ra
, disp21
);
3386 case 0x35: /* FBNE */
3387 ret
= gen_fbcond(ctx
, TCG_COND_NE
, ra
, disp21
);
3389 case 0x36: /* FBGE */
3390 ret
= gen_fbcond(ctx
, TCG_COND_GE
, ra
, disp21
);
3392 case 0x37: /* FBGT */
3393 ret
= gen_fbcond(ctx
, TCG_COND_GT
, ra
, disp21
);
3397 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 1);
3401 ret
= gen_bcond(ctx
, TCG_COND_EQ
, ra
, disp21
, 0);
3405 ret
= gen_bcond(ctx
, TCG_COND_LT
, ra
, disp21
, 0);
3409 ret
= gen_bcond(ctx
, TCG_COND_LE
, ra
, disp21
, 0);
3413 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 1);
3417 ret
= gen_bcond(ctx
, TCG_COND_NE
, ra
, disp21
, 0);
3421 ret
= gen_bcond(ctx
, TCG_COND_GE
, ra
, disp21
, 0);
3425 ret
= gen_bcond(ctx
, TCG_COND_GT
, ra
, disp21
, 0);
3428 ret
= gen_invalid(ctx
);
3435 static inline void gen_intermediate_code_internal(AlphaCPU
*cpu
,
3436 TranslationBlock
*tb
,
3439 CPUState
*cs
= CPU(cpu
);
3440 CPUAlphaState
*env
= &cpu
->env
;
3441 DisasContext ctx
, *ctxp
= &ctx
;
3442 target_ulong pc_start
;
3443 target_ulong pc_mask
;
3445 uint16_t *gen_opc_end
;
3453 gen_opc_end
= tcg_ctx
.gen_opc_buf
+ OPC_MAX_SIZE
;
3457 ctx
.mem_idx
= cpu_mmu_index(env
);
3458 ctx
.implver
= env
->implver
;
3459 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
3461 /* ??? Every TB begins with unset rounding mode, to be initialized on
3462 the first fp insn of the TB. Alternately we could define a proper
3463 default for every TB (e.g. QUAL_RM_N or QUAL_RM_D) and make sure
3464 to reset the FP_STATUS to that default at the end of any TB that
3465 changes the default. We could even (gasp) dynamiclly figure out
3466 what default would be most efficient given the running program. */
3468 /* Similarly for flush-to-zero. */
3472 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
3473 if (max_insns
== 0) {
3474 max_insns
= CF_COUNT_MASK
;
3477 if (in_superpage(&ctx
, pc_start
)) {
3478 pc_mask
= (1ULL << 41) - 1;
3480 pc_mask
= ~TARGET_PAGE_MASK
;
3485 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
3486 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
3487 if (bp
->pc
== ctx
.pc
) {
3488 gen_excp(&ctx
, EXCP_DEBUG
, 0);
3494 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3498 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3500 tcg_ctx
.gen_opc_pc
[lj
] = ctx
.pc
;
3501 tcg_ctx
.gen_opc_instr_start
[lj
] = 1;
3502 tcg_ctx
.gen_opc_icount
[lj
] = num_insns
;
3504 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
3506 insn
= cpu_ldl_code(env
, ctx
.pc
);
3509 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
3510 tcg_gen_debug_insn_start(ctx
.pc
);
3514 ret
= translate_one(ctxp
, insn
);
3516 /* If we reach a page boundary, are single stepping,
3517 or exhaust instruction count, stop generation. */
3519 && ((ctx
.pc
& pc_mask
) == 0
3520 || tcg_ctx
.gen_opc_ptr
>= gen_opc_end
3521 || num_insns
>= max_insns
3523 || ctx
.singlestep_enabled
)) {
3524 ret
= EXIT_PC_STALE
;
3526 } while (ret
== NO_EXIT
);
3528 if (tb
->cflags
& CF_LAST_IO
) {
3537 tcg_gen_movi_i64(cpu_pc
, ctx
.pc
);
3539 case EXIT_PC_UPDATED
:
3540 if (ctx
.singlestep_enabled
) {
3541 gen_excp_1(EXCP_DEBUG
, 0);
3550 gen_tb_end(tb
, num_insns
);
3551 *tcg_ctx
.gen_opc_ptr
= INDEX_op_end
;
3553 j
= tcg_ctx
.gen_opc_ptr
- tcg_ctx
.gen_opc_buf
;
3556 tcg_ctx
.gen_opc_instr_start
[lj
++] = 0;
3558 tb
->size
= ctx
.pc
- pc_start
;
3559 tb
->icount
= num_insns
;
3563 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
3564 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
3565 log_target_disas(env
, pc_start
, ctx
.pc
- pc_start
, 1);
3571 void gen_intermediate_code (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3573 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, false);
3576 void gen_intermediate_code_pc (CPUAlphaState
*env
, struct TranslationBlock
*tb
)
3578 gen_intermediate_code_internal(alpha_env_get_cpu(env
), tb
, true);
3581 void restore_state_to_opc(CPUAlphaState
*env
, TranslationBlock
*tb
, int pc_pos
)
3583 env
->pc
= tcg_ctx
.gen_opc_pc
[pc_pos
];