4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
29 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
40 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_flushw(E) qemu_build_not_reached()
46 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
47 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
48 # define gen_helper_restored(E) qemu_build_not_reached()
49 # define gen_helper_saved(E) qemu_build_not_reached()
50 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
51 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
52 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
53 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
54 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
55 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
56 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
57 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
58 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
59 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
60 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
61 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
65 /* Dynamic PC, must exit to main loop. */
67 /* Dynamic PC, one of two values according to jump_pc[T2]. */
69 /* Dynamic PC, may lookup next TB. */
70 #define DYNAMIC_PC_LOOKUP 3
72 #define DISAS_EXIT DISAS_TARGET_0
74 /* global register indexes */
75 static TCGv_ptr cpu_regwptr
;
76 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
77 static TCGv_i32 cpu_cc_op
;
78 static TCGv_i32 cpu_psr
;
79 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
80 static TCGv cpu_regs
[32];
85 static TCGv_i32 cpu_xcc
, cpu_fprs
;
88 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
89 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
91 /* Floating point registers */
92 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
94 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
96 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
97 # define env64_field_offsetof(X) env_field_offsetof(X)
99 # define env32_field_offsetof(X) env_field_offsetof(X)
100 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
103 typedef struct DisasDelayException
{
104 struct DisasDelayException
*next
;
107 /* Saved state at parent insn. */
110 } DisasDelayException
;
112 typedef struct DisasContext
{
113 DisasContextBase base
;
114 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
115 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
116 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
119 bool address_mask_32bit
;
120 #ifndef CONFIG_USER_ONLY
122 #ifdef TARGET_SPARC64
127 uint32_t cc_op
; /* current CC operation */
129 #ifdef TARGET_SPARC64
133 DisasDelayException
*delay_excp_list
;
142 // This function uses non-native bit order
143 #define GET_FIELD(X, FROM, TO) \
144 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
146 // This function uses the order in the manuals, i.e. bit 0 is 2^0
147 #define GET_FIELD_SP(X, FROM, TO) \
148 GET_FIELD(X, 31 - (TO), 31 - (FROM))
150 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
151 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
153 #ifdef TARGET_SPARC64
154 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
155 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
157 #define DFPREG(r) (r & 0x1e)
158 #define QFPREG(r) (r & 0x1c)
161 #define UA2005_HTRAP_MASK 0xff
162 #define V8_TRAP_MASK 0x7f
164 static int sign_extend(int x
, int len
)
167 return (x
<< len
) >> len
;
170 #define IS_IMM (insn & (1<<13))
172 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
174 #if defined(TARGET_SPARC64)
175 int bit
= (rd
< 32) ? 1 : 2;
176 /* If we know we've already set this bit within the TB,
177 we can avoid setting it again. */
178 if (!(dc
->fprs_dirty
& bit
)) {
179 dc
->fprs_dirty
|= bit
;
180 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
185 /* floating point registers moves */
186 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
188 TCGv_i32 ret
= tcg_temp_new_i32();
190 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
192 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
197 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
199 TCGv_i64 t
= tcg_temp_new_i64();
201 tcg_gen_extu_i32_i64(t
, v
);
202 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
203 (dst
& 1 ? 0 : 32), 32);
204 gen_update_fprs_dirty(dc
, dst
);
207 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
209 return tcg_temp_new_i32();
212 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
215 return cpu_fpr
[src
/ 2];
218 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
221 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
222 gen_update_fprs_dirty(dc
, dst
);
225 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
227 return cpu_fpr
[DFPREG(dst
) / 2];
230 static void gen_op_load_fpr_QT0(unsigned int src
)
232 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
233 offsetof(CPU_QuadU
, ll
.upper
));
234 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
235 offsetof(CPU_QuadU
, ll
.lower
));
238 static void gen_op_load_fpr_QT1(unsigned int src
)
240 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
241 offsetof(CPU_QuadU
, ll
.upper
));
242 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
243 offsetof(CPU_QuadU
, ll
.lower
));
246 static void gen_op_store_QT0_fpr(unsigned int dst
)
248 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
249 offsetof(CPU_QuadU
, ll
.upper
));
250 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
251 offsetof(CPU_QuadU
, ll
.lower
));
254 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
255 TCGv_i64 v1
, TCGv_i64 v2
)
259 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
260 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
261 gen_update_fprs_dirty(dc
, dst
);
264 #ifdef TARGET_SPARC64
265 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
268 return cpu_fpr
[src
/ 2];
271 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
274 return cpu_fpr
[src
/ 2 + 1];
277 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
282 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
283 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
284 gen_update_fprs_dirty(dc
, rd
);
289 #ifdef CONFIG_USER_ONLY
290 #define supervisor(dc) 0
291 #define hypervisor(dc) 0
293 #ifdef TARGET_SPARC64
294 #define hypervisor(dc) (dc->hypervisor)
295 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
297 #define supervisor(dc) (dc->supervisor)
298 #define hypervisor(dc) 0
302 #if !defined(TARGET_SPARC64)
303 # define AM_CHECK(dc) false
304 #elif defined(TARGET_ABI32)
305 # define AM_CHECK(dc) true
306 #elif defined(CONFIG_USER_ONLY)
307 # define AM_CHECK(dc) false
309 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
312 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
315 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
319 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
321 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
324 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
328 return cpu_regs
[reg
];
330 TCGv t
= tcg_temp_new();
331 tcg_gen_movi_tl(t
, 0);
336 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
340 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
344 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
348 return cpu_regs
[reg
];
350 return tcg_temp_new();
354 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
356 return translator_use_goto_tb(&s
->base
, pc
) &&
357 translator_use_goto_tb(&s
->base
, npc
);
360 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
361 target_ulong pc
, target_ulong npc
)
363 if (use_goto_tb(s
, pc
, npc
)) {
364 /* jump to same page: we can use a direct jump */
365 tcg_gen_goto_tb(tb_num
);
366 tcg_gen_movi_tl(cpu_pc
, pc
);
367 tcg_gen_movi_tl(cpu_npc
, npc
);
368 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
370 /* jump to another page: we can use an indirect jump */
371 tcg_gen_movi_tl(cpu_pc
, pc
);
372 tcg_gen_movi_tl(cpu_npc
, npc
);
373 tcg_gen_lookup_and_goto_ptr();
378 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
380 tcg_gen_extu_i32_tl(reg
, src
);
381 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
384 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
386 tcg_gen_extu_i32_tl(reg
, src
);
387 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
390 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
392 tcg_gen_extu_i32_tl(reg
, src
);
393 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
396 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
398 tcg_gen_extu_i32_tl(reg
, src
);
399 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
402 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
404 tcg_gen_mov_tl(cpu_cc_src
, src1
);
405 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
406 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
407 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
410 static TCGv_i32
gen_add32_carry32(void)
412 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
414 /* Carry is computed from a previous add: (dst < src) */
415 #if TARGET_LONG_BITS == 64
416 cc_src1_32
= tcg_temp_new_i32();
417 cc_src2_32
= tcg_temp_new_i32();
418 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
419 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
421 cc_src1_32
= cpu_cc_dst
;
422 cc_src2_32
= cpu_cc_src
;
425 carry_32
= tcg_temp_new_i32();
426 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
431 static TCGv_i32
gen_sub32_carry32(void)
433 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
435 /* Carry is computed from a previous borrow: (src1 < src2) */
436 #if TARGET_LONG_BITS == 64
437 cc_src1_32
= tcg_temp_new_i32();
438 cc_src2_32
= tcg_temp_new_i32();
439 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
440 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
442 cc_src1_32
= cpu_cc_src
;
443 cc_src2_32
= cpu_cc_src2
;
446 carry_32
= tcg_temp_new_i32();
447 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
452 static void gen_op_addc_int(TCGv dst
, TCGv src1
, TCGv src2
,
453 TCGv_i32 carry_32
, bool update_cc
)
455 tcg_gen_add_tl(dst
, src1
, src2
);
457 #ifdef TARGET_SPARC64
458 TCGv carry
= tcg_temp_new();
459 tcg_gen_extu_i32_tl(carry
, carry_32
);
460 tcg_gen_add_tl(dst
, dst
, carry
);
462 tcg_gen_add_i32(dst
, dst
, carry_32
);
466 tcg_debug_assert(dst
== cpu_cc_dst
);
467 tcg_gen_mov_tl(cpu_cc_src
, src1
);
468 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
472 static void gen_op_addc_int_add(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
476 if (TARGET_LONG_BITS
== 64) {
477 gen_op_addc_int(dst
, src1
, src2
, gen_add32_carry32(), update_cc
);
482 * We can re-use the host's hardware carry generation by using
483 * an ADD2 opcode. We discard the low part of the output.
484 * Ideally we'd combine this operation with the add that
485 * generated the carry in the first place.
487 discard
= tcg_temp_new();
488 tcg_gen_add2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
491 tcg_debug_assert(dst
== cpu_cc_dst
);
492 tcg_gen_mov_tl(cpu_cc_src
, src1
);
493 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
497 static void gen_op_addc_add(TCGv dst
, TCGv src1
, TCGv src2
)
499 gen_op_addc_int_add(dst
, src1
, src2
, false);
502 static void gen_op_addccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
504 gen_op_addc_int_add(dst
, src1
, src2
, true);
507 static void gen_op_addc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
509 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), false);
512 static void gen_op_addccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
514 gen_op_addc_int(dst
, src1
, src2
, gen_sub32_carry32(), true);
517 static void gen_op_addc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
520 TCGv_i32 carry_32
= tcg_temp_new_i32();
521 gen_helper_compute_C_icc(carry_32
, tcg_env
);
522 gen_op_addc_int(dst
, src1
, src2
, carry_32
, update_cc
);
525 static void gen_op_addc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
527 gen_op_addc_int_generic(dst
, src1
, src2
, false);
530 static void gen_op_addccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
532 gen_op_addc_int_generic(dst
, src1
, src2
, true);
535 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
537 tcg_gen_mov_tl(cpu_cc_src
, src1
);
538 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
539 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
540 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
543 static void gen_op_subc_int(TCGv dst
, TCGv src1
, TCGv src2
,
544 TCGv_i32 carry_32
, bool update_cc
)
548 #if TARGET_LONG_BITS == 64
549 carry
= tcg_temp_new();
550 tcg_gen_extu_i32_i64(carry
, carry_32
);
555 tcg_gen_sub_tl(dst
, src1
, src2
);
556 tcg_gen_sub_tl(dst
, dst
, carry
);
559 tcg_debug_assert(dst
== cpu_cc_dst
);
560 tcg_gen_mov_tl(cpu_cc_src
, src1
);
561 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
565 static void gen_op_subc_add(TCGv dst
, TCGv src1
, TCGv src2
)
567 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), false);
570 static void gen_op_subccc_add(TCGv dst
, TCGv src1
, TCGv src2
)
572 gen_op_subc_int(dst
, src1
, src2
, gen_add32_carry32(), true);
575 static void gen_op_subc_int_sub(TCGv dst
, TCGv src1
, TCGv src2
, bool update_cc
)
579 if (TARGET_LONG_BITS
== 64) {
580 gen_op_subc_int(dst
, src1
, src2
, gen_sub32_carry32(), update_cc
);
585 * We can re-use the host's hardware carry generation by using
586 * a SUB2 opcode. We discard the low part of the output.
588 discard
= tcg_temp_new();
589 tcg_gen_sub2_tl(discard
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
592 tcg_debug_assert(dst
== cpu_cc_dst
);
593 tcg_gen_mov_tl(cpu_cc_src
, src1
);
594 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
598 static void gen_op_subc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
600 gen_op_subc_int_sub(dst
, src1
, src2
, false);
603 static void gen_op_subccc_sub(TCGv dst
, TCGv src1
, TCGv src2
)
605 gen_op_subc_int_sub(dst
, src1
, src2
, true);
608 static void gen_op_subc_int_generic(TCGv dst
, TCGv src1
, TCGv src2
,
611 TCGv_i32 carry_32
= tcg_temp_new_i32();
613 gen_helper_compute_C_icc(carry_32
, tcg_env
);
614 gen_op_subc_int(dst
, src1
, src2
, carry_32
, update_cc
);
617 static void gen_op_subc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
619 gen_op_subc_int_generic(dst
, src1
, src2
, false);
622 static void gen_op_subccc_generic(TCGv dst
, TCGv src1
, TCGv src2
)
624 gen_op_subc_int_generic(dst
, src1
, src2
, true);
627 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
629 TCGv r_temp
, zero
, t0
;
631 r_temp
= tcg_temp_new();
638 zero
= tcg_constant_tl(0);
639 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
640 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
641 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
642 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
646 // env->y = (b2 << 31) | (env->y >> 1);
647 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
648 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
651 gen_mov_reg_N(t0
, cpu_psr
);
652 gen_mov_reg_V(r_temp
, cpu_psr
);
653 tcg_gen_xor_tl(t0
, t0
, r_temp
);
655 // T0 = (b1 << 31) | (T0 >> 1);
657 tcg_gen_shli_tl(t0
, t0
, 31);
658 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
659 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
661 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
663 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
666 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
668 #if TARGET_LONG_BITS == 32
670 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
672 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
675 TCGv t0
= tcg_temp_new_i64();
676 TCGv t1
= tcg_temp_new_i64();
679 tcg_gen_ext32s_i64(t0
, src1
);
680 tcg_gen_ext32s_i64(t1
, src2
);
682 tcg_gen_ext32u_i64(t0
, src1
);
683 tcg_gen_ext32u_i64(t1
, src2
);
686 tcg_gen_mul_i64(dst
, t0
, t1
);
687 tcg_gen_shri_i64(cpu_y
, dst
, 32);
691 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
693 /* zero-extend truncated operands before multiplication */
694 gen_op_multiply(dst
, src1
, src2
, 0);
697 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
699 /* sign-extend truncated operands before multiplication */
700 gen_op_multiply(dst
, src1
, src2
, 1);
703 static void gen_op_udivx(TCGv dst
, TCGv src1
, TCGv src2
)
705 gen_helper_udivx(dst
, tcg_env
, src1
, src2
);
708 static void gen_op_sdivx(TCGv dst
, TCGv src1
, TCGv src2
)
710 gen_helper_sdivx(dst
, tcg_env
, src1
, src2
);
713 static void gen_op_udiv(TCGv dst
, TCGv src1
, TCGv src2
)
715 gen_helper_udiv(dst
, tcg_env
, src1
, src2
);
718 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
720 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
723 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
725 gen_helper_udiv_cc(dst
, tcg_env
, src1
, src2
);
728 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
730 gen_helper_sdiv_cc(dst
, tcg_env
, src1
, src2
);
733 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
735 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
738 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
740 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
744 static void gen_op_eval_ba(TCGv dst
)
746 tcg_gen_movi_tl(dst
, 1);
750 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
752 gen_mov_reg_Z(dst
, src
);
756 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
758 TCGv t0
= tcg_temp_new();
759 gen_mov_reg_N(t0
, src
);
760 gen_mov_reg_V(dst
, src
);
761 tcg_gen_xor_tl(dst
, dst
, t0
);
762 gen_mov_reg_Z(t0
, src
);
763 tcg_gen_or_tl(dst
, dst
, t0
);
767 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
769 TCGv t0
= tcg_temp_new();
770 gen_mov_reg_V(t0
, src
);
771 gen_mov_reg_N(dst
, src
);
772 tcg_gen_xor_tl(dst
, dst
, t0
);
776 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
778 TCGv t0
= tcg_temp_new();
779 gen_mov_reg_Z(t0
, src
);
780 gen_mov_reg_C(dst
, src
);
781 tcg_gen_or_tl(dst
, dst
, t0
);
785 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
787 gen_mov_reg_C(dst
, src
);
791 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
793 gen_mov_reg_V(dst
, src
);
797 static void gen_op_eval_bn(TCGv dst
)
799 tcg_gen_movi_tl(dst
, 0);
803 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
805 gen_mov_reg_N(dst
, src
);
809 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
811 gen_mov_reg_Z(dst
, src
);
812 tcg_gen_xori_tl(dst
, dst
, 0x1);
816 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
818 gen_op_eval_ble(dst
, src
);
819 tcg_gen_xori_tl(dst
, dst
, 0x1);
823 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
825 gen_op_eval_bl(dst
, src
);
826 tcg_gen_xori_tl(dst
, dst
, 0x1);
830 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
832 gen_op_eval_bleu(dst
, src
);
833 tcg_gen_xori_tl(dst
, dst
, 0x1);
837 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
839 gen_mov_reg_C(dst
, src
);
840 tcg_gen_xori_tl(dst
, dst
, 0x1);
844 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
846 gen_mov_reg_N(dst
, src
);
847 tcg_gen_xori_tl(dst
, dst
, 0x1);
851 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
853 gen_mov_reg_V(dst
, src
);
854 tcg_gen_xori_tl(dst
, dst
, 0x1);
858 FPSR bit field FCC1 | FCC0:
864 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
865 unsigned int fcc_offset
)
867 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
868 tcg_gen_andi_tl(reg
, reg
, 0x1);
871 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
873 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
874 tcg_gen_andi_tl(reg
, reg
, 0x1);
878 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
880 TCGv t0
= tcg_temp_new();
881 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
882 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
883 tcg_gen_or_tl(dst
, dst
, t0
);
886 // 1 or 2: FCC0 ^ FCC1
887 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
889 TCGv t0
= tcg_temp_new();
890 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
891 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
892 tcg_gen_xor_tl(dst
, dst
, t0
);
896 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
898 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
902 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
904 TCGv t0
= tcg_temp_new();
905 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
906 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
907 tcg_gen_andc_tl(dst
, dst
, t0
);
911 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
913 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
917 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
919 TCGv t0
= tcg_temp_new();
920 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
921 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
922 tcg_gen_andc_tl(dst
, t0
, dst
);
926 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
928 TCGv t0
= tcg_temp_new();
929 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
930 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
931 tcg_gen_and_tl(dst
, dst
, t0
);
935 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
937 TCGv t0
= tcg_temp_new();
938 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
939 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
940 tcg_gen_or_tl(dst
, dst
, t0
);
941 tcg_gen_xori_tl(dst
, dst
, 0x1);
944 // 0 or 3: !(FCC0 ^ FCC1)
945 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
947 TCGv t0
= tcg_temp_new();
948 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
949 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
950 tcg_gen_xor_tl(dst
, dst
, t0
);
951 tcg_gen_xori_tl(dst
, dst
, 0x1);
955 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
957 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
958 tcg_gen_xori_tl(dst
, dst
, 0x1);
961 // !1: !(FCC0 & !FCC1)
962 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
964 TCGv t0
= tcg_temp_new();
965 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
966 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
967 tcg_gen_andc_tl(dst
, dst
, t0
);
968 tcg_gen_xori_tl(dst
, dst
, 0x1);
972 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
974 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
975 tcg_gen_xori_tl(dst
, dst
, 0x1);
978 // !2: !(!FCC0 & FCC1)
979 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
981 TCGv t0
= tcg_temp_new();
982 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
983 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
984 tcg_gen_andc_tl(dst
, t0
, dst
);
985 tcg_gen_xori_tl(dst
, dst
, 0x1);
988 // !3: !(FCC0 & FCC1)
989 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
991 TCGv t0
= tcg_temp_new();
992 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
993 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
994 tcg_gen_and_tl(dst
, dst
, t0
);
995 tcg_gen_xori_tl(dst
, dst
, 0x1);
998 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
999 target_ulong pc2
, TCGv r_cond
)
1001 TCGLabel
*l1
= gen_new_label();
1003 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
1005 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
1008 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
1011 static void gen_generic_branch(DisasContext
*dc
)
1013 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
1014 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
1015 TCGv zero
= tcg_constant_tl(0);
1017 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1020 /* call this function before using the condition register as it may
1021 have been set for a jump */
1022 static void flush_cond(DisasContext
*dc
)
1024 if (dc
->npc
== JUMP_PC
) {
1025 gen_generic_branch(dc
);
1026 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1030 static void save_npc(DisasContext
*dc
)
1035 gen_generic_branch(dc
);
1036 dc
->npc
= DYNAMIC_PC_LOOKUP
;
1039 case DYNAMIC_PC_LOOKUP
:
1042 g_assert_not_reached();
1045 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1049 static void update_psr(DisasContext
*dc
)
1051 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1052 dc
->cc_op
= CC_OP_FLAGS
;
1053 gen_helper_compute_psr(tcg_env
);
1057 static void save_state(DisasContext
*dc
)
1059 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1063 static void gen_exception(DisasContext
*dc
, int which
)
1066 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
1067 dc
->base
.is_jmp
= DISAS_NORETURN
;
1070 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
1072 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
1074 e
->next
= dc
->delay_excp_list
;
1075 dc
->delay_excp_list
= e
;
1077 e
->lab
= gen_new_label();
1080 /* Caller must have used flush_cond before branch. */
1081 assert(e
->npc
!= JUMP_PC
);
1087 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
1089 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
1092 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
1094 TCGv t
= tcg_temp_new();
1097 tcg_gen_andi_tl(t
, addr
, mask
);
1100 lab
= delay_exception(dc
, TT_UNALIGNED
);
1101 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
1104 static void gen_mov_pc_npc(DisasContext
*dc
)
1109 gen_generic_branch(dc
);
1110 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1111 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1114 case DYNAMIC_PC_LOOKUP
:
1115 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1119 g_assert_not_reached();
1126 static void gen_op_next_insn(void)
1128 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1129 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1132 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1135 static int subcc_cond
[16] = {
1151 -1, /* no overflow */
1154 static int logic_cond
[16] = {
1156 TCG_COND_EQ
, /* eq: Z */
1157 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1158 TCG_COND_LT
, /* lt: N ^ V -> N */
1159 TCG_COND_EQ
, /* leu: C | Z -> Z */
1160 TCG_COND_NEVER
, /* ltu: C -> 0 */
1161 TCG_COND_LT
, /* neg: N */
1162 TCG_COND_NEVER
, /* vs: V -> 0 */
1164 TCG_COND_NE
, /* ne: !Z */
1165 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1166 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1167 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1168 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1169 TCG_COND_GE
, /* pos: !N */
1170 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1176 #ifdef TARGET_SPARC64
1186 switch (dc
->cc_op
) {
1188 cmp
->cond
= logic_cond
[cond
];
1190 cmp
->is_bool
= false;
1191 cmp
->c2
= tcg_constant_tl(0);
1192 #ifdef TARGET_SPARC64
1194 cmp
->c1
= tcg_temp_new();
1195 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1199 cmp
->c1
= cpu_cc_dst
;
1206 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1207 goto do_compare_dst_0
;
1209 case 7: /* overflow */
1210 case 15: /* !overflow */
1214 cmp
->cond
= subcc_cond
[cond
];
1215 cmp
->is_bool
= false;
1216 #ifdef TARGET_SPARC64
1218 /* Note that sign-extension works for unsigned compares as
1219 long as both operands are sign-extended. */
1220 cmp
->c1
= tcg_temp_new();
1221 cmp
->c2
= tcg_temp_new();
1222 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1223 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1227 cmp
->c1
= cpu_cc_src
;
1228 cmp
->c2
= cpu_cc_src2
;
1235 gen_helper_compute_psr(tcg_env
);
1236 dc
->cc_op
= CC_OP_FLAGS
;
1240 /* We're going to generate a boolean result. */
1241 cmp
->cond
= TCG_COND_NE
;
1242 cmp
->is_bool
= true;
1243 cmp
->c1
= r_dst
= tcg_temp_new();
1244 cmp
->c2
= tcg_constant_tl(0);
1248 gen_op_eval_bn(r_dst
);
1251 gen_op_eval_be(r_dst
, r_src
);
1254 gen_op_eval_ble(r_dst
, r_src
);
1257 gen_op_eval_bl(r_dst
, r_src
);
1260 gen_op_eval_bleu(r_dst
, r_src
);
1263 gen_op_eval_bcs(r_dst
, r_src
);
1266 gen_op_eval_bneg(r_dst
, r_src
);
1269 gen_op_eval_bvs(r_dst
, r_src
);
1272 gen_op_eval_ba(r_dst
);
1275 gen_op_eval_bne(r_dst
, r_src
);
1278 gen_op_eval_bg(r_dst
, r_src
);
1281 gen_op_eval_bge(r_dst
, r_src
);
1284 gen_op_eval_bgu(r_dst
, r_src
);
1287 gen_op_eval_bcc(r_dst
, r_src
);
1290 gen_op_eval_bpos(r_dst
, r_src
);
1293 gen_op_eval_bvc(r_dst
, r_src
);
1300 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1302 unsigned int offset
;
1305 /* For now we still generate a straight boolean result. */
1306 cmp
->cond
= TCG_COND_NE
;
1307 cmp
->is_bool
= true;
1308 cmp
->c1
= r_dst
= tcg_temp_new();
1309 cmp
->c2
= tcg_constant_tl(0);
1329 gen_op_eval_bn(r_dst
);
1332 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1335 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1338 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1341 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1344 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1347 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1350 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1353 gen_op_eval_ba(r_dst
);
1356 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1359 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1362 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1365 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1368 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1371 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1374 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1380 static const TCGCond gen_tcg_cond_reg
[8] = {
1381 TCG_COND_NEVER
, /* reserved */
1385 TCG_COND_NEVER
, /* reserved */
1391 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1393 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1394 cmp
->is_bool
= false;
1396 cmp
->c2
= tcg_constant_tl(0);
1399 #ifdef TARGET_SPARC64
1400 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1404 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1407 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1410 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1413 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1418 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1422 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1425 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1428 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1431 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1436 static void gen_op_fcmpq(int fccno
)
1440 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1443 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1446 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1449 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1454 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1458 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1461 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1464 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1467 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1472 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1476 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1479 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1482 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1485 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1490 static void gen_op_fcmpeq(int fccno
)
1494 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1497 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1500 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1503 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1510 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1512 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1515 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1517 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1520 static void gen_op_fcmpq(int fccno
)
1522 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1525 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1527 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1530 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1532 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1535 static void gen_op_fcmpeq(int fccno
)
1537 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1541 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1543 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1544 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1545 gen_exception(dc
, TT_FP_EXCP
);
1548 static int gen_trap_ifnofpu(DisasContext
*dc
)
1550 #if !defined(CONFIG_USER_ONLY)
1551 if (!dc
->fpu_enabled
) {
1552 gen_exception(dc
, TT_NFPU_INSN
);
1559 static void gen_op_clear_ieee_excp_and_FTT(void)
1561 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1564 static void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1565 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1569 src
= gen_load_fpr_F(dc
, rs
);
1570 dst
= gen_dest_fpr_F(dc
);
1572 gen(dst
, tcg_env
, src
);
1573 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1575 gen_store_fpr_F(dc
, rd
, dst
);
1578 static void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1579 void (*gen
)(TCGv_i32
, TCGv_i32
))
1583 src
= gen_load_fpr_F(dc
, rs
);
1584 dst
= gen_dest_fpr_F(dc
);
1588 gen_store_fpr_F(dc
, rd
, dst
);
1591 static void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1592 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1594 TCGv_i32 dst
, src1
, src2
;
1596 src1
= gen_load_fpr_F(dc
, rs1
);
1597 src2
= gen_load_fpr_F(dc
, rs2
);
1598 dst
= gen_dest_fpr_F(dc
);
1600 gen(dst
, tcg_env
, src1
, src2
);
1601 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1603 gen_store_fpr_F(dc
, rd
, dst
);
1606 #ifdef TARGET_SPARC64
1607 static void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1608 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1610 TCGv_i32 dst
, src1
, src2
;
1612 src1
= gen_load_fpr_F(dc
, rs1
);
1613 src2
= gen_load_fpr_F(dc
, rs2
);
1614 dst
= gen_dest_fpr_F(dc
);
1616 gen(dst
, src1
, src2
);
1618 gen_store_fpr_F(dc
, rd
, dst
);
1622 static void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1623 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1627 src
= gen_load_fpr_D(dc
, rs
);
1628 dst
= gen_dest_fpr_D(dc
, rd
);
1630 gen(dst
, tcg_env
, src
);
1631 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1633 gen_store_fpr_D(dc
, rd
, dst
);
1636 #ifdef TARGET_SPARC64
1637 static void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1638 void (*gen
)(TCGv_i64
, TCGv_i64
))
1642 src
= gen_load_fpr_D(dc
, rs
);
1643 dst
= gen_dest_fpr_D(dc
, rd
);
1647 gen_store_fpr_D(dc
, rd
, dst
);
1651 static void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1652 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1654 TCGv_i64 dst
, src1
, src2
;
1656 src1
= gen_load_fpr_D(dc
, rs1
);
1657 src2
= gen_load_fpr_D(dc
, rs2
);
1658 dst
= gen_dest_fpr_D(dc
, rd
);
1660 gen(dst
, tcg_env
, src1
, src2
);
1661 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1663 gen_store_fpr_D(dc
, rd
, dst
);
1666 #ifdef TARGET_SPARC64
1667 static void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1668 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1670 TCGv_i64 dst
, src1
, src2
;
1672 src1
= gen_load_fpr_D(dc
, rs1
);
1673 src2
= gen_load_fpr_D(dc
, rs2
);
1674 dst
= gen_dest_fpr_D(dc
, rd
);
1676 gen(dst
, src1
, src2
);
1678 gen_store_fpr_D(dc
, rd
, dst
);
1681 static void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1682 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1684 TCGv_i64 dst
, src1
, src2
;
1686 src1
= gen_load_fpr_D(dc
, rs1
);
1687 src2
= gen_load_fpr_D(dc
, rs2
);
1688 dst
= gen_dest_fpr_D(dc
, rd
);
1690 gen(dst
, cpu_gsr
, src1
, src2
);
1692 gen_store_fpr_D(dc
, rd
, dst
);
1695 static void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1696 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1698 TCGv_i64 dst
, src0
, src1
, src2
;
1700 src1
= gen_load_fpr_D(dc
, rs1
);
1701 src2
= gen_load_fpr_D(dc
, rs2
);
1702 src0
= gen_load_fpr_D(dc
, rd
);
1703 dst
= gen_dest_fpr_D(dc
, rd
);
1705 gen(dst
, src0
, src1
, src2
);
1707 gen_store_fpr_D(dc
, rd
, dst
);
1711 static void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1712 void (*gen
)(TCGv_ptr
))
1714 gen_op_load_fpr_QT1(QFPREG(rs
));
1717 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1719 gen_op_store_QT0_fpr(QFPREG(rd
));
1720 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1723 #ifdef TARGET_SPARC64
1724 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1725 void (*gen
)(TCGv_ptr
))
1727 gen_op_load_fpr_QT1(QFPREG(rs
));
1731 gen_op_store_QT0_fpr(QFPREG(rd
));
1732 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1736 static void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1737 void (*gen
)(TCGv_ptr
))
1739 gen_op_load_fpr_QT0(QFPREG(rs1
));
1740 gen_op_load_fpr_QT1(QFPREG(rs2
));
1743 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1745 gen_op_store_QT0_fpr(QFPREG(rd
));
1746 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1749 static void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1750 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1753 TCGv_i32 src1
, src2
;
1755 src1
= gen_load_fpr_F(dc
, rs1
);
1756 src2
= gen_load_fpr_F(dc
, rs2
);
1757 dst
= gen_dest_fpr_D(dc
, rd
);
1759 gen(dst
, tcg_env
, src1
, src2
);
1760 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1762 gen_store_fpr_D(dc
, rd
, dst
);
1765 static void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1766 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1768 TCGv_i64 src1
, src2
;
1770 src1
= gen_load_fpr_D(dc
, rs1
);
1771 src2
= gen_load_fpr_D(dc
, rs2
);
1773 gen(tcg_env
, src1
, src2
);
1774 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1776 gen_op_store_QT0_fpr(QFPREG(rd
));
1777 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1780 #ifdef TARGET_SPARC64
1781 static void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1782 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1787 src
= gen_load_fpr_F(dc
, rs
);
1788 dst
= gen_dest_fpr_D(dc
, rd
);
1790 gen(dst
, tcg_env
, src
);
1791 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1793 gen_store_fpr_D(dc
, rd
, dst
);
1797 static void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1798 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1803 src
= gen_load_fpr_F(dc
, rs
);
1804 dst
= gen_dest_fpr_D(dc
, rd
);
1806 gen(dst
, tcg_env
, src
);
1808 gen_store_fpr_D(dc
, rd
, dst
);
1811 static void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1812 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1817 src
= gen_load_fpr_D(dc
, rs
);
1818 dst
= gen_dest_fpr_F(dc
);
1820 gen(dst
, tcg_env
, src
);
1821 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1823 gen_store_fpr_F(dc
, rd
, dst
);
1826 static void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1827 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1831 gen_op_load_fpr_QT1(QFPREG(rs
));
1832 dst
= gen_dest_fpr_F(dc
);
1835 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1837 gen_store_fpr_F(dc
, rd
, dst
);
1840 static void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1841 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1845 gen_op_load_fpr_QT1(QFPREG(rs
));
1846 dst
= gen_dest_fpr_D(dc
, rd
);
1849 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1851 gen_store_fpr_D(dc
, rd
, dst
);
1854 static void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1855 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1859 src
= gen_load_fpr_F(dc
, rs
);
1863 gen_op_store_QT0_fpr(QFPREG(rd
));
1864 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1867 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1868 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1872 src
= gen_load_fpr_D(dc
, rs
);
1876 gen_op_store_QT0_fpr(QFPREG(rd
));
1877 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1880 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1881 TCGv addr
, int mmu_idx
, MemOp memop
)
1883 gen_address_mask(dc
, addr
);
1884 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
| MO_ALIGN
);
1887 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
1889 TCGv m1
= tcg_constant_tl(0xff);
1890 gen_address_mask(dc
, addr
);
1891 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
1895 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1914 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
1916 int asi
= GET_FIELD(insn
, 19, 26);
1917 ASIType type
= GET_ASI_HELPER
;
1918 int mem_idx
= dc
->mem_idx
;
1920 #ifndef TARGET_SPARC64
1921 /* Before v9, all asis are immediate and privileged. */
1923 gen_exception(dc
, TT_ILL_INSN
);
1924 type
= GET_ASI_EXCP
;
1925 } else if (supervisor(dc
)
1926 /* Note that LEON accepts ASI_USERDATA in user mode, for
1927 use with CASA. Also note that previous versions of
1928 QEMU allowed (and old versions of gcc emitted) ASI_P
1929 for LEON, which is incorrect. */
1930 || (asi
== ASI_USERDATA
1931 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1933 case ASI_USERDATA
: /* User data access */
1934 mem_idx
= MMU_USER_IDX
;
1935 type
= GET_ASI_DIRECT
;
1937 case ASI_KERNELDATA
: /* Supervisor data access */
1938 mem_idx
= MMU_KERNEL_IDX
;
1939 type
= GET_ASI_DIRECT
;
1941 case ASI_M_BYPASS
: /* MMU passthrough */
1942 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1943 mem_idx
= MMU_PHYS_IDX
;
1944 type
= GET_ASI_DIRECT
;
1946 case ASI_M_BCOPY
: /* Block copy, sta access */
1947 mem_idx
= MMU_KERNEL_IDX
;
1948 type
= GET_ASI_BCOPY
;
1950 case ASI_M_BFILL
: /* Block fill, stda access */
1951 mem_idx
= MMU_KERNEL_IDX
;
1952 type
= GET_ASI_BFILL
;
1956 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1957 * permissions check in get_physical_address(..).
1959 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1961 gen_exception(dc
, TT_PRIV_INSN
);
1962 type
= GET_ASI_EXCP
;
1968 /* With v9, all asis below 0x80 are privileged. */
1969 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1970 down that bit into DisasContext. For the moment that's ok,
1971 since the direct implementations below doesn't have any ASIs
1972 in the restricted [0x30, 0x7f] range, and the check will be
1973 done properly in the helper. */
1974 if (!supervisor(dc
) && asi
< 0x80) {
1975 gen_exception(dc
, TT_PRIV_ACT
);
1976 type
= GET_ASI_EXCP
;
1979 case ASI_REAL
: /* Bypass */
1980 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1981 case ASI_REAL_L
: /* Bypass LE */
1982 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1983 case ASI_TWINX_REAL
: /* Real address, twinx */
1984 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1985 case ASI_QUAD_LDD_PHYS
:
1986 case ASI_QUAD_LDD_PHYS_L
:
1987 mem_idx
= MMU_PHYS_IDX
;
1989 case ASI_N
: /* Nucleus */
1990 case ASI_NL
: /* Nucleus LE */
1993 case ASI_NUCLEUS_QUAD_LDD
:
1994 case ASI_NUCLEUS_QUAD_LDD_L
:
1995 if (hypervisor(dc
)) {
1996 mem_idx
= MMU_PHYS_IDX
;
1998 mem_idx
= MMU_NUCLEUS_IDX
;
2001 case ASI_AIUP
: /* As if user primary */
2002 case ASI_AIUPL
: /* As if user primary LE */
2003 case ASI_TWINX_AIUP
:
2004 case ASI_TWINX_AIUP_L
:
2005 case ASI_BLK_AIUP_4V
:
2006 case ASI_BLK_AIUP_L_4V
:
2009 mem_idx
= MMU_USER_IDX
;
2011 case ASI_AIUS
: /* As if user secondary */
2012 case ASI_AIUSL
: /* As if user secondary LE */
2013 case ASI_TWINX_AIUS
:
2014 case ASI_TWINX_AIUS_L
:
2015 case ASI_BLK_AIUS_4V
:
2016 case ASI_BLK_AIUS_L_4V
:
2019 mem_idx
= MMU_USER_SECONDARY_IDX
;
2021 case ASI_S
: /* Secondary */
2022 case ASI_SL
: /* Secondary LE */
2025 case ASI_BLK_COMMIT_S
:
2032 if (mem_idx
== MMU_USER_IDX
) {
2033 mem_idx
= MMU_USER_SECONDARY_IDX
;
2034 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2035 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2038 case ASI_P
: /* Primary */
2039 case ASI_PL
: /* Primary LE */
2042 case ASI_BLK_COMMIT_P
:
2066 type
= GET_ASI_DIRECT
;
2068 case ASI_TWINX_REAL
:
2069 case ASI_TWINX_REAL_L
:
2072 case ASI_TWINX_AIUP
:
2073 case ASI_TWINX_AIUP_L
:
2074 case ASI_TWINX_AIUS
:
2075 case ASI_TWINX_AIUS_L
:
2080 case ASI_QUAD_LDD_PHYS
:
2081 case ASI_QUAD_LDD_PHYS_L
:
2082 case ASI_NUCLEUS_QUAD_LDD
:
2083 case ASI_NUCLEUS_QUAD_LDD_L
:
2084 type
= GET_ASI_DTWINX
;
2086 case ASI_BLK_COMMIT_P
:
2087 case ASI_BLK_COMMIT_S
:
2088 case ASI_BLK_AIUP_4V
:
2089 case ASI_BLK_AIUP_L_4V
:
2092 case ASI_BLK_AIUS_4V
:
2093 case ASI_BLK_AIUS_L_4V
:
2100 type
= GET_ASI_BLOCK
;
2107 type
= GET_ASI_SHORT
;
2114 type
= GET_ASI_SHORT
;
2117 /* The little-endian asis all have bit 3 set. */
2124 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2127 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2128 int insn
, MemOp memop
)
2130 DisasASI da
= get_asi(dc
, insn
, memop
);
2135 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2136 gen_exception(dc
, TT_ILL_INSN
);
2138 case GET_ASI_DIRECT
:
2139 gen_address_mask(dc
, addr
);
2140 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2144 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2145 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2148 #ifdef TARGET_SPARC64
2149 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
2152 TCGv_i64 t64
= tcg_temp_new_i64();
2153 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2154 tcg_gen_trunc_i64_tl(dst
, t64
);
2162 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2163 int insn
, MemOp memop
)
2165 DisasASI da
= get_asi(dc
, insn
, memop
);
2170 case GET_ASI_DTWINX
: /* Reserved for stda. */
2171 #ifndef TARGET_SPARC64
2172 gen_exception(dc
, TT_ILL_INSN
);
2175 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2176 /* Pre OpenSPARC CPUs don't have these */
2177 gen_exception(dc
, TT_ILL_INSN
);
2180 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2181 * are ST_BLKINIT_ ASIs */
2184 case GET_ASI_DIRECT
:
2185 gen_address_mask(dc
, addr
);
2186 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2188 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2190 /* Copy 32 bytes from the address in SRC to ADDR. */
2191 /* ??? The original qemu code suggests 4-byte alignment, dropping
2192 the low bits, but the only place I can see this used is in the
2193 Linux kernel with 32 byte alignment, which would make more sense
2194 as a cacheline-style operation. */
2196 TCGv saddr
= tcg_temp_new();
2197 TCGv daddr
= tcg_temp_new();
2198 TCGv four
= tcg_constant_tl(4);
2199 TCGv_i32 tmp
= tcg_temp_new_i32();
2202 tcg_gen_andi_tl(saddr
, src
, -4);
2203 tcg_gen_andi_tl(daddr
, addr
, -4);
2204 for (i
= 0; i
< 32; i
+= 4) {
2205 /* Since the loads and stores are paired, allow the
2206 copy to happen in the host endianness. */
2207 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2208 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2209 tcg_gen_add_tl(saddr
, saddr
, four
);
2210 tcg_gen_add_tl(daddr
, daddr
, four
);
2217 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2218 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2221 #ifdef TARGET_SPARC64
2222 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
2225 TCGv_i64 t64
= tcg_temp_new_i64();
2226 tcg_gen_extu_tl_i64(t64
, src
);
2227 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2231 /* A write to a TLB register may alter page maps. End the TB. */
2232 dc
->npc
= DYNAMIC_PC
;
2238 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2239 TCGv addr
, int insn
)
2241 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2246 case GET_ASI_DIRECT
:
2247 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2250 /* ??? Should be DAE_invalid_asi. */
2251 gen_exception(dc
, TT_DATA_ACCESS
);
2256 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2259 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2265 case GET_ASI_DIRECT
:
2266 oldv
= tcg_temp_new();
2267 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2268 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2269 gen_store_gpr(dc
, rd
, oldv
);
2272 /* ??? Should be DAE_invalid_asi. */
2273 gen_exception(dc
, TT_DATA_ACCESS
);
2278 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2280 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2285 case GET_ASI_DIRECT
:
2286 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2289 /* ??? In theory, this should be raise DAE_invalid_asi.
2290 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2291 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2292 gen_helper_exit_atomic(tcg_env
);
2294 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2295 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2299 t64
= tcg_temp_new_i64();
2300 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2302 s64
= tcg_constant_i64(0xff);
2303 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
2305 tcg_gen_trunc_i64_tl(dst
, t64
);
2308 dc
->npc
= DYNAMIC_PC
;
2315 #ifdef TARGET_SPARC64
2316 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2317 int insn
, int size
, int rd
)
2319 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2327 case GET_ASI_DIRECT
:
2328 gen_address_mask(dc
, addr
);
2331 d32
= gen_dest_fpr_F(dc
);
2332 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2333 gen_store_fpr_F(dc
, rd
, d32
);
2336 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2337 da
.memop
| MO_ALIGN_4
);
2340 d64
= tcg_temp_new_i64();
2341 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2342 tcg_gen_addi_tl(addr
, addr
, 8);
2343 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2344 da
.memop
| MO_ALIGN_4
);
2345 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2348 g_assert_not_reached();
2353 /* Valid for lddfa on aligned registers only. */
2354 if (size
== 8 && (rd
& 7) == 0) {
2359 gen_address_mask(dc
, addr
);
2361 /* The first operation checks required alignment. */
2362 memop
= da
.memop
| MO_ALIGN_64
;
2363 eight
= tcg_constant_tl(8);
2364 for (i
= 0; ; ++i
) {
2365 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2370 tcg_gen_add_tl(addr
, addr
, eight
);
2374 gen_exception(dc
, TT_ILL_INSN
);
2379 /* Valid for lddfa only. */
2381 gen_address_mask(dc
, addr
);
2382 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2383 da
.memop
| MO_ALIGN
);
2385 gen_exception(dc
, TT_ILL_INSN
);
2391 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2392 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
| MO_ALIGN
);
2395 /* According to the table in the UA2011 manual, the only
2396 other asis that are valid for ldfa/lddfa/ldqfa are
2397 the NO_FAULT asis. We still need a helper for these,
2398 but we can just use the integer asi helper for them. */
2401 d64
= tcg_temp_new_i64();
2402 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2403 d32
= gen_dest_fpr_F(dc
);
2404 tcg_gen_extrl_i64_i32(d32
, d64
);
2405 gen_store_fpr_F(dc
, rd
, d32
);
2408 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
, r_asi
, r_mop
);
2411 d64
= tcg_temp_new_i64();
2412 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2413 tcg_gen_addi_tl(addr
, addr
, 8);
2414 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], tcg_env
, addr
, r_asi
, r_mop
);
2415 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2418 g_assert_not_reached();
2425 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2426 int insn
, int size
, int rd
)
2428 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2435 case GET_ASI_DIRECT
:
2436 gen_address_mask(dc
, addr
);
2439 d32
= gen_load_fpr_F(dc
, rd
);
2440 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2443 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2444 da
.memop
| MO_ALIGN_4
);
2447 /* Only 4-byte alignment required. However, it is legal for the
2448 cpu to signal the alignment fault, and the OS trap handler is
2449 required to fix it up. Requiring 16-byte alignment here avoids
2450 having to probe the second page before performing the first
2452 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2453 da
.memop
| MO_ALIGN_16
);
2454 tcg_gen_addi_tl(addr
, addr
, 8);
2455 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2458 g_assert_not_reached();
2463 /* Valid for stdfa on aligned registers only. */
2464 if (size
== 8 && (rd
& 7) == 0) {
2469 gen_address_mask(dc
, addr
);
2471 /* The first operation checks required alignment. */
2472 memop
= da
.memop
| MO_ALIGN_64
;
2473 eight
= tcg_constant_tl(8);
2474 for (i
= 0; ; ++i
) {
2475 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2480 tcg_gen_add_tl(addr
, addr
, eight
);
2484 gen_exception(dc
, TT_ILL_INSN
);
2489 /* Valid for stdfa only. */
2491 gen_address_mask(dc
, addr
);
2492 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2493 da
.memop
| MO_ALIGN
);
2495 gen_exception(dc
, TT_ILL_INSN
);
2500 /* According to the table in the UA2011 manual, the only
2501 other asis that are valid for ldfa/lddfa/ldqfa are
2502 the PST* asis, which aren't currently handled. */
2503 gen_exception(dc
, TT_ILL_INSN
);
2508 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2510 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2511 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2512 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2518 case GET_ASI_DTWINX
:
2519 gen_address_mask(dc
, addr
);
2520 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2521 tcg_gen_addi_tl(addr
, addr
, 8);
2522 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2525 case GET_ASI_DIRECT
:
2527 TCGv_i64 tmp
= tcg_temp_new_i64();
2529 gen_address_mask(dc
, addr
);
2530 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2532 /* Note that LE ldda acts as if each 32-bit register
2533 result is byte swapped. Having just performed one
2534 64-bit bswap, we need now to swap the writebacks. */
2535 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2536 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2538 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2544 /* ??? In theory we've handled all of the ASIs that are valid
2545 for ldda, and this should raise DAE_invalid_asi. However,
2546 real hardware allows others. This can be seen with e.g.
2547 FreeBSD 10.3 wrt ASI_IC_TAG. */
2549 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2550 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2551 TCGv_i64 tmp
= tcg_temp_new_i64();
2554 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2557 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2558 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2560 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2566 gen_store_gpr(dc
, rd
, hi
);
2567 gen_store_gpr(dc
, rd
+ 1, lo
);
2570 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2573 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2574 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2580 case GET_ASI_DTWINX
:
2581 gen_address_mask(dc
, addr
);
2582 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2583 tcg_gen_addi_tl(addr
, addr
, 8);
2584 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2587 case GET_ASI_DIRECT
:
2589 TCGv_i64 t64
= tcg_temp_new_i64();
2591 /* Note that LE stda acts as if each 32-bit register result is
2592 byte swapped. We will perform one 64-bit LE store, so now
2593 we must swap the order of the construction. */
2594 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2595 tcg_gen_concat32_i64(t64
, lo
, hi
);
2597 tcg_gen_concat32_i64(t64
, hi
, lo
);
2599 gen_address_mask(dc
, addr
);
2600 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2605 /* ??? In theory we've handled all of the ASIs that are valid
2606 for stda, and this should raise DAE_invalid_asi. */
2608 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2609 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2610 TCGv_i64 t64
= tcg_temp_new_i64();
2613 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2614 tcg_gen_concat32_i64(t64
, lo
, hi
);
2616 tcg_gen_concat32_i64(t64
, hi
, lo
);
2620 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2626 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2629 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2635 case GET_ASI_DIRECT
:
2636 oldv
= tcg_temp_new();
2637 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2638 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2639 gen_store_gpr(dc
, rd
, oldv
);
2642 /* ??? Should be DAE_invalid_asi. */
2643 gen_exception(dc
, TT_DATA_ACCESS
);
2648 #elif !defined(CONFIG_USER_ONLY)
2649 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2651 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2652 whereby "rd + 1" elicits "error: array subscript is above array".
2653 Since we have already asserted that rd is even, the semantics
2655 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2656 TCGv hi
= gen_dest_gpr(dc
, rd
);
2657 TCGv_i64 t64
= tcg_temp_new_i64();
2658 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2663 case GET_ASI_DIRECT
:
2664 gen_address_mask(dc
, addr
);
2665 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2669 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2670 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2673 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2678 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2679 gen_store_gpr(dc
, rd
| 1, lo
);
2680 gen_store_gpr(dc
, rd
, hi
);
2683 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2686 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2687 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2688 TCGv_i64 t64
= tcg_temp_new_i64();
2690 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2695 case GET_ASI_DIRECT
:
2696 gen_address_mask(dc
, addr
);
2697 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2700 /* Store 32 bytes of T64 to ADDR. */
2701 /* ??? The original qemu code suggests 8-byte alignment, dropping
2702 the low bits, but the only place I can see this used is in the
2703 Linux kernel with 32 byte alignment, which would make more sense
2704 as a cacheline-style operation. */
2706 TCGv d_addr
= tcg_temp_new();
2707 TCGv eight
= tcg_constant_tl(8);
2710 tcg_gen_andi_tl(d_addr
, addr
, -8);
2711 for (i
= 0; i
< 32; i
+= 8) {
2712 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2713 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2719 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2720 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2723 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2730 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2732 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2733 return gen_load_gpr(dc
, rs1
);
2736 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2738 if (IS_IMM
) { /* immediate */
2739 target_long simm
= GET_FIELDs(insn
, 19, 31);
2740 TCGv t
= tcg_temp_new();
2741 tcg_gen_movi_tl(t
, simm
);
2743 } else { /* register */
2744 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2745 return gen_load_gpr(dc
, rs2
);
2749 #ifdef TARGET_SPARC64
2750 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2752 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2754 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2755 or fold the comparison down to 32 bits and use movcond_i32. Choose
2757 c32
= tcg_temp_new_i32();
2759 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2761 TCGv_i64 c64
= tcg_temp_new_i64();
2762 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2763 tcg_gen_extrl_i64_i32(c32
, c64
);
2766 s1
= gen_load_fpr_F(dc
, rs
);
2767 s2
= gen_load_fpr_F(dc
, rd
);
2768 dst
= gen_dest_fpr_F(dc
);
2769 zero
= tcg_constant_i32(0);
2771 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2773 gen_store_fpr_F(dc
, rd
, dst
);
2776 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2778 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2779 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2780 gen_load_fpr_D(dc
, rs
),
2781 gen_load_fpr_D(dc
, rd
));
2782 gen_store_fpr_D(dc
, rd
, dst
);
2785 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2787 int qd
= QFPREG(rd
);
2788 int qs
= QFPREG(rs
);
2790 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2791 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2792 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2793 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2795 gen_update_fprs_dirty(dc
, qd
);
2798 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2800 TCGv_i32 r_tl
= tcg_temp_new_i32();
2802 /* load env->tl into r_tl */
2803 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2805 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2806 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2808 /* calculate offset to current trap state from env->ts, reuse r_tl */
2809 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2810 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2812 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2814 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2815 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2816 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2820 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2821 int width
, bool cc
, bool left
)
2824 uint64_t amask
, tabl
, tabr
;
2825 int shift
, imask
, omask
;
2828 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2829 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2830 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2831 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2832 dc
->cc_op
= CC_OP_SUB
;
2835 /* Theory of operation: there are two tables, left and right (not to
2836 be confused with the left and right versions of the opcode). These
2837 are indexed by the low 3 bits of the inputs. To make things "easy",
2838 these tables are loaded into two constants, TABL and TABR below.
2839 The operation index = (input & imask) << shift calculates the index
2840 into the constant, while val = (table >> index) & omask calculates
2841 the value we're looking for. */
2848 tabl
= 0x80c0e0f0f8fcfeffULL
;
2849 tabr
= 0xff7f3f1f0f070301ULL
;
2851 tabl
= 0x0103070f1f3f7fffULL
;
2852 tabr
= 0xfffefcf8f0e0c080ULL
;
2872 tabl
= (2 << 2) | 3;
2873 tabr
= (3 << 2) | 1;
2875 tabl
= (1 << 2) | 3;
2876 tabr
= (3 << 2) | 2;
2883 lo1
= tcg_temp_new();
2884 lo2
= tcg_temp_new();
2885 tcg_gen_andi_tl(lo1
, s1
, imask
);
2886 tcg_gen_andi_tl(lo2
, s2
, imask
);
2887 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2888 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2890 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
2891 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
2892 tcg_gen_andi_tl(lo1
, lo1
, omask
);
2893 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2897 amask
&= 0xffffffffULL
;
2899 tcg_gen_andi_tl(s1
, s1
, amask
);
2900 tcg_gen_andi_tl(s2
, s2
, amask
);
2902 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2903 tcg_gen_and_tl(lo2
, lo2
, lo1
);
2904 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
2907 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2909 TCGv tmp
= tcg_temp_new();
2911 tcg_gen_add_tl(tmp
, s1
, s2
);
2912 tcg_gen_andi_tl(dst
, tmp
, -8);
2914 tcg_gen_neg_tl(tmp
, tmp
);
2916 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2919 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2923 t1
= tcg_temp_new();
2924 t2
= tcg_temp_new();
2925 shift
= tcg_temp_new();
2927 tcg_gen_andi_tl(shift
, gsr
, 7);
2928 tcg_gen_shli_tl(shift
, shift
, 3);
2929 tcg_gen_shl_tl(t1
, s1
, shift
);
2931 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2932 shift of (up to 63) followed by a constant shift of 1. */
2933 tcg_gen_xori_tl(shift
, shift
, 63);
2934 tcg_gen_shr_tl(t2
, s2
, shift
);
2935 tcg_gen_shri_tl(t2
, t2
, 1);
2937 tcg_gen_or_tl(dst
, t1
, t2
);
2941 /* Include the auto-generated decoder. */
2942 #include "decode-insns.c.inc"
2944 #define TRANS(NAME, AVAIL, FUNC, ...) \
2945 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2946 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2948 #define avail_ALL(C) true
2949 #ifdef TARGET_SPARC64
2950 # define avail_32(C) false
2951 # define avail_ASR17(C) false
2952 # define avail_DIV(C) true
2953 # define avail_MUL(C) true
2954 # define avail_POWERDOWN(C) false
2955 # define avail_64(C) true
2956 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2957 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2959 # define avail_32(C) true
2960 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2961 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2962 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2963 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2964 # define avail_64(C) false
2965 # define avail_GL(C) false
2966 # define avail_HYPV(C) false
2969 /* Default case for non jump instructions. */
2970 static bool advance_pc(DisasContext
*dc
)
2975 case DYNAMIC_PC_LOOKUP
:
2980 /* we can do a static jump */
2981 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
2982 dc
->base
.is_jmp
= DISAS_NORETURN
;
2985 g_assert_not_reached();
2989 dc
->npc
= dc
->npc
+ 4;
2995 * Major opcodes 00 and 01 -- branches, call, and sethi
2998 static bool advance_jump_uncond_never(DisasContext
*dc
, bool annul
)
3001 dc
->pc
= dc
->npc
+ 4;
3002 dc
->npc
= dc
->pc
+ 4;
3005 dc
->npc
= dc
->pc
+ 4;
3010 static bool advance_jump_uncond_always(DisasContext
*dc
, bool annul
,
3019 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
3024 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
3025 bool annul
, target_ulong dest
)
3027 target_ulong npc
= dc
->npc
;
3030 TCGLabel
*l1
= gen_new_label();
3032 tcg_gen_brcond_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
3033 gen_goto_tb(dc
, 0, npc
, dest
);
3035 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
3037 dc
->base
.is_jmp
= DISAS_NORETURN
;
3042 case DYNAMIC_PC_LOOKUP
:
3043 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
3044 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
3045 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
3047 tcg_constant_tl(dest
), cpu_npc
);
3051 g_assert_not_reached();
3055 dc
->jump_pc
[0] = dest
;
3056 dc
->jump_pc
[1] = npc
+ 4;
3059 tcg_gen_mov_tl(cpu_cond
, cmp
->c1
);
3061 tcg_gen_setcond_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
3068 static bool raise_priv(DisasContext
*dc
)
3070 gen_exception(dc
, TT_PRIV_INSN
);
3074 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
3076 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3081 return advance_jump_uncond_never(dc
, a
->a
);
3083 return advance_jump_uncond_always(dc
, a
->a
, target
);
3087 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
3088 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3092 TRANS(Bicc
, ALL
, do_bpcc
, a
)
3093 TRANS(BPcc
, 64, do_bpcc
, a
)
3095 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
3097 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3100 if (gen_trap_ifnofpu(dc
)) {
3105 return advance_jump_uncond_never(dc
, a
->a
);
3107 return advance_jump_uncond_always(dc
, a
->a
, target
);
3111 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
3112 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3116 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
3117 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
3119 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
3121 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3124 if (!avail_64(dc
)) {
3127 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
3132 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
3133 return advance_jump_cond(dc
, &cmp
, a
->a
, target
);
3136 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
3138 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
3140 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
3146 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
3149 * For sparc32, always generate the no-coprocessor exception.
3150 * For sparc64, always generate illegal instruction.
3152 #ifdef TARGET_SPARC64
3155 gen_exception(dc
, TT_NCP_INSN
);
3160 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
3162 /* Special-case %g0 because that's the canonical nop. */
3164 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
3166 return advance_pc(dc
);
3170 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
3173 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
3174 int rs1
, bool imm
, int rs2_or_imm
)
3176 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3177 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3184 return advance_pc(dc
);
3188 * Immediate traps are the most common case. Since this value is
3189 * live across the branch, it really pays to evaluate the constant.
3191 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
3192 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
3194 trap
= tcg_temp_new_i32();
3195 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
3197 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
3199 TCGv_i32 t2
= tcg_temp_new_i32();
3200 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
3201 tcg_gen_add_i32(trap
, trap
, t2
);
3203 tcg_gen_andi_i32(trap
, trap
, mask
);
3204 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3210 gen_helper_raise_exception(tcg_env
, trap
);
3211 dc
->base
.is_jmp
= DISAS_NORETURN
;
3215 /* Conditional trap. */
3217 lab
= delay_exceptionv(dc
, trap
);
3218 gen_compare(&cmp
, cc
, cond
, dc
);
3219 tcg_gen_brcond_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
3221 return advance_pc(dc
);
3224 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
3226 if (avail_32(dc
) && a
->cc
) {
3229 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
3232 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
3237 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
3240 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
3245 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
3248 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
3250 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
3251 return advance_pc(dc
);
3254 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
3260 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
3261 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
3264 /* For #Sync, etc, end the TB to recognize interrupts. */
3265 dc
->base
.is_jmp
= DISAS_EXIT
;
3267 return advance_pc(dc
);
3270 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
3271 TCGv (*func
)(DisasContext
*, TCGv
))
3274 return raise_priv(dc
);
3276 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
3277 return advance_pc(dc
);
3280 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
3285 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
3288 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
3289 * 32-bit cpus like sparcv7, which ignores the rs1 field.
3290 * This matches after all other ASR, so Leon3 Asr17 is handled first.
3292 if (avail_64(dc
) && a
->rs1
!= 0) {
3295 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
3298 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
3303 * TODO: There are many more fields to be filled,
3304 * some of which are writable.
3306 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
3307 val
|= 1 << 8; /* [8] V8 */
3309 return tcg_constant_tl(val
);
3312 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
3314 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
3317 gen_helper_rdccr(dst
, tcg_env
);
3321 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
3323 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
3325 #ifdef TARGET_SPARC64
3326 return tcg_constant_tl(dc
->asi
);
3328 qemu_build_not_reached();
3332 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
3334 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
3336 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3338 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3339 if (translator_io_start(&dc
->base
)) {
3340 dc
->base
.is_jmp
= DISAS_EXIT
;
3342 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3343 tcg_constant_i32(dc
->mem_idx
));
3347 /* TODO: non-priv access only allowed when enabled. */
3348 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
3350 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
3352 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
3355 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
3357 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
3359 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
3363 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
3365 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
3367 gen_trap_ifnofpu(dc
);
3371 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
3373 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
3375 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
3379 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
3381 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
3383 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3387 /* TODO: non-priv access only allowed when enabled. */
3388 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
3390 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
3392 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3394 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3395 if (translator_io_start(&dc
->base
)) {
3396 dc
->base
.is_jmp
= DISAS_EXIT
;
3398 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
3399 tcg_constant_i32(dc
->mem_idx
));
3403 /* TODO: non-priv access only allowed when enabled. */
3404 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
3406 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
3408 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3412 /* TODO: supervisor access only allowed when enabled by hypervisor. */
3413 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
3416 * UltraSPARC-T1 Strand status.
3417 * HYPV check maybe not enough, UA2005 & UA2007 describe
3418 * this ASR as impl. dep
3420 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
3422 return tcg_constant_tl(1);
3425 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
3427 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
3430 gen_helper_rdpsr(dst
, tcg_env
);
3434 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
3436 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
3438 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
3442 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
3444 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
3446 TCGv_i32 tl
= tcg_temp_new_i32();
3447 TCGv_ptr tp
= tcg_temp_new_ptr();
3449 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3450 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3451 tcg_gen_shli_i32(tl
, tl
, 3);
3452 tcg_gen_ext_i32_ptr(tp
, tl
);
3453 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3455 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
3459 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
3461 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
3463 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
3467 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
3469 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
3471 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
3475 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
3477 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
3479 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
3483 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
3485 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
3487 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3491 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
3494 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
3496 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
3500 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
3502 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
3504 #ifdef TARGET_SPARC64
3505 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3507 gen_load_trap_state_at_tl(r_tsptr
);
3508 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
3511 qemu_build_not_reached();
3515 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
3517 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
3519 #ifdef TARGET_SPARC64
3520 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3522 gen_load_trap_state_at_tl(r_tsptr
);
3523 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
3526 qemu_build_not_reached();
3530 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
3532 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
3534 #ifdef TARGET_SPARC64
3535 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3537 gen_load_trap_state_at_tl(r_tsptr
);
3538 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
3541 qemu_build_not_reached();
3545 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
3547 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
3549 #ifdef TARGET_SPARC64
3550 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3552 gen_load_trap_state_at_tl(r_tsptr
);
3553 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
3556 qemu_build_not_reached();
3560 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
3561 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
3563 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
3568 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3569 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
3571 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
3573 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
3577 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
3579 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
3581 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
3585 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
3587 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
3589 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
3593 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
3595 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
3597 gen_helper_rdcwp(dst
, tcg_env
);
3601 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
3603 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
3605 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
3609 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
3611 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
3613 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
3617 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3620 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3622 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3626 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3628 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3630 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3634 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3636 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3638 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3642 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3644 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3646 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3650 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3652 /* UA2005 strand status */
3653 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3655 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3659 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3661 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3663 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3667 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3669 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3672 gen_helper_flushw(tcg_env
);
3673 return advance_pc(dc
);
3678 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3679 void (*func
)(DisasContext
*, TCGv
))
3683 /* For simplicity, we under-decoded the rs2 form. */
3684 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3688 return raise_priv(dc
);
3691 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3692 src
= tcg_constant_tl(a
->rs2_or_imm
);
3694 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3695 if (a
->rs2_or_imm
== 0) {
3698 src
= tcg_temp_new();
3700 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3702 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3707 return advance_pc(dc
);
3710 static void do_wry(DisasContext
*dc
, TCGv src
)
3712 tcg_gen_ext32u_tl(cpu_y
, src
);
3715 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3717 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3719 gen_helper_wrccr(tcg_env
, src
);
3722 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3724 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3726 TCGv tmp
= tcg_temp_new();
3728 tcg_gen_ext8u_tl(tmp
, src
);
3729 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3730 /* End TB to notice changed ASI. */
3731 dc
->base
.is_jmp
= DISAS_EXIT
;
3734 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3736 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3738 #ifdef TARGET_SPARC64
3739 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3741 dc
->base
.is_jmp
= DISAS_EXIT
;
3743 qemu_build_not_reached();
3747 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3749 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3751 gen_trap_ifnofpu(dc
);
3752 tcg_gen_mov_tl(cpu_gsr
, src
);
3755 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3757 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3759 gen_helper_set_softint(tcg_env
, src
);
3762 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3764 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3766 gen_helper_clear_softint(tcg_env
, src
);
3769 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3771 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3773 gen_helper_write_softint(tcg_env
, src
);
3776 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3778 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3780 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3782 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3783 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3784 translator_io_start(&dc
->base
);
3785 gen_helper_tick_set_limit(r_tickptr
, src
);
3786 /* End TB to handle timer interrupt */
3787 dc
->base
.is_jmp
= DISAS_EXIT
;
3790 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3792 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3794 #ifdef TARGET_SPARC64
3795 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3797 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3798 translator_io_start(&dc
->base
);
3799 gen_helper_tick_set_count(r_tickptr
, src
);
3800 /* End TB to handle timer interrupt */
3801 dc
->base
.is_jmp
= DISAS_EXIT
;
3803 qemu_build_not_reached();
3807 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3809 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3811 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3813 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3814 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3815 translator_io_start(&dc
->base
);
3816 gen_helper_tick_set_limit(r_tickptr
, src
);
3817 /* End TB to handle timer interrupt */
3818 dc
->base
.is_jmp
= DISAS_EXIT
;
3821 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3823 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3826 gen_helper_power_down(tcg_env
);
3829 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3831 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3833 gen_helper_wrpsr(tcg_env
, src
);
3834 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3835 dc
->cc_op
= CC_OP_FLAGS
;
3836 dc
->base
.is_jmp
= DISAS_EXIT
;
3839 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3841 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3843 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3844 TCGv tmp
= tcg_temp_new();
3846 tcg_gen_andi_tl(tmp
, src
, mask
);
3847 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3850 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3852 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3854 #ifdef TARGET_SPARC64
3855 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3857 gen_load_trap_state_at_tl(r_tsptr
);
3858 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3860 qemu_build_not_reached();
3864 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3866 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3868 #ifdef TARGET_SPARC64
3869 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3871 gen_load_trap_state_at_tl(r_tsptr
);
3872 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3874 qemu_build_not_reached();
3878 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3880 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3882 #ifdef TARGET_SPARC64
3883 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3885 gen_load_trap_state_at_tl(r_tsptr
);
3886 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3888 qemu_build_not_reached();
3892 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3894 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3896 #ifdef TARGET_SPARC64
3897 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3899 gen_load_trap_state_at_tl(r_tsptr
);
3900 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3902 qemu_build_not_reached();
3906 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3908 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3910 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3912 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3913 translator_io_start(&dc
->base
);
3914 gen_helper_tick_set_count(r_tickptr
, src
);
3915 /* End TB to handle timer interrupt */
3916 dc
->base
.is_jmp
= DISAS_EXIT
;
3919 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3921 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3923 tcg_gen_mov_tl(cpu_tbr
, src
);
3926 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3928 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3931 if (translator_io_start(&dc
->base
)) {
3932 dc
->base
.is_jmp
= DISAS_EXIT
;
3934 gen_helper_wrpstate(tcg_env
, src
);
3935 dc
->npc
= DYNAMIC_PC
;
3938 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3940 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3943 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3944 dc
->npc
= DYNAMIC_PC
;
3947 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3949 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3951 if (translator_io_start(&dc
->base
)) {
3952 dc
->base
.is_jmp
= DISAS_EXIT
;
3954 gen_helper_wrpil(tcg_env
, src
);
3957 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3959 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3961 gen_helper_wrcwp(tcg_env
, src
);
3964 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3966 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3968 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3971 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3973 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3975 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3978 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3980 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3982 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3985 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3987 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3989 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3992 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3994 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3996 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3999 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
4001 static void do_wrgl(DisasContext
*dc
, TCGv src
)
4003 gen_helper_wrgl(tcg_env
, src
);
4006 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
4008 /* UA2005 strand status */
4009 static void do_wrssr(DisasContext
*dc
, TCGv src
)
4011 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
4014 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
4016 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
4018 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
4020 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
4021 dc
->base
.is_jmp
= DISAS_EXIT
;
4024 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
4026 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
4028 TCGv_i32 tl
= tcg_temp_new_i32();
4029 TCGv_ptr tp
= tcg_temp_new_ptr();
4031 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
4032 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
4033 tcg_gen_shli_i32(tl
, tl
, 3);
4034 tcg_gen_ext_i32_ptr(tp
, tl
);
4035 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
4037 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
4040 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
4042 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
4044 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
4047 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
4049 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
4051 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
4054 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
4056 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
4058 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
4060 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
4061 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
4062 translator_io_start(&dc
->base
);
4063 gen_helper_tick_set_limit(r_tickptr
, src
);
4064 /* End TB to handle timer interrupt */
4065 dc
->base
.is_jmp
= DISAS_EXIT
;
4068 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
4071 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
4073 if (!supervisor(dc
)) {
4074 return raise_priv(dc
);
4077 gen_helper_saved(tcg_env
);
4079 gen_helper_restored(tcg_env
);
4081 return advance_pc(dc
);
4084 TRANS(SAVED
, 64, do_saved_restored
, true)
4085 TRANS(RESTORED
, 64, do_saved_restored
, false)
4087 static bool trans_NOP_v7(DisasContext
*dc
, arg_NOP_v7
*a
)
4090 * TODO: Need a feature bit for sparcv8.
4091 * In the meantime, treat all 32-bit cpus like sparcv7.
4094 return advance_pc(dc
);
4099 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
4100 void (*func
)(TCGv
, TCGv
, TCGv
),
4101 void (*funci
)(TCGv
, TCGv
, target_long
))
4105 /* For simplicity, we under-decoded the rs2 form. */
4106 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
4113 dst
= gen_dest_gpr(dc
, a
->rd
);
4115 src1
= gen_load_gpr(dc
, a
->rs1
);
4117 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4119 funci(dst
, src1
, a
->rs2_or_imm
);
4121 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
4124 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
4126 gen_store_gpr(dc
, a
->rd
, dst
);
4129 tcg_gen_movi_i32(cpu_cc_op
, cc_op
);
4132 return advance_pc(dc
);
4135 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
, int cc_op
,
4136 void (*func
)(TCGv
, TCGv
, TCGv
),
4137 void (*funci
)(TCGv
, TCGv
, target_long
),
4138 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
4142 return do_arith_int(dc
, a
, cc_op
, func_cc
, NULL
);
4144 return do_arith_int(dc
, a
, cc_op
, func
, funci
);
4147 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
4148 void (*func
)(TCGv
, TCGv
, TCGv
),
4149 void (*funci
)(TCGv
, TCGv
, target_long
))
4151 return do_arith_int(dc
, a
, CC_OP_LOGIC
, func
, funci
);
4154 TRANS(ADD
, ALL
, do_arith
, a
, CC_OP_ADD
,
4155 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
)
4156 TRANS(SUB
, ALL
, do_arith
, a
, CC_OP_SUB
,
4157 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
)
4159 TRANS(TADDcc
, ALL
, do_arith
, a
, CC_OP_TADD
, NULL
, NULL
, gen_op_add_cc
)
4160 TRANS(TSUBcc
, ALL
, do_arith
, a
, CC_OP_TSUB
, NULL
, NULL
, gen_op_sub_cc
)
4161 TRANS(TADDccTV
, ALL
, do_arith
, a
, CC_OP_TADDTV
, NULL
, NULL
, gen_op_taddcctv
)
4162 TRANS(TSUBccTV
, ALL
, do_arith
, a
, CC_OP_TSUBTV
, NULL
, NULL
, gen_op_tsubcctv
)
4164 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
4165 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
4166 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
4167 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
4168 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
4170 TRANS(MULX
, 64, do_arith
, a
, -1, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
4171 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
4172 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
4174 TRANS(UDIVX
, 64, do_arith
, a
, -1, gen_op_udivx
, NULL
, NULL
)
4175 TRANS(SDIVX
, 64, do_arith
, a
, -1, gen_op_sdivx
, NULL
, NULL
)
4176 TRANS(UDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_udiv
, NULL
, gen_op_udivcc
)
4177 TRANS(SDIV
, DIV
, do_arith
, a
, CC_OP_DIV
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
4179 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4181 /* OR with %g0 is the canonical alias for MOV. */
4182 if (!a
->cc
&& a
->rs1
== 0) {
4183 if (a
->imm
|| a
->rs2_or_imm
== 0) {
4184 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
4185 } else if (a
->rs2_or_imm
& ~0x1f) {
4186 /* For simplicity, we under-decoded the rs2 form. */
4189 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
4191 return advance_pc(dc
);
4193 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
4196 static bool trans_ADDC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4198 switch (dc
->cc_op
) {
4201 /* Carry is known to be zero. Fall back to plain ADD. */
4202 return do_arith(dc
, a
, CC_OP_ADD
,
4203 tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_add_cc
);
4207 return do_arith(dc
, a
, CC_OP_ADDX
,
4208 gen_op_addc_add
, NULL
, gen_op_addccc_add
);
4212 return do_arith(dc
, a
, CC_OP_ADDX
,
4213 gen_op_addc_sub
, NULL
, gen_op_addccc_sub
);
4215 return do_arith(dc
, a
, CC_OP_ADDX
,
4216 gen_op_addc_generic
, NULL
, gen_op_addccc_generic
);
4220 static bool trans_SUBC(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4222 switch (dc
->cc_op
) {
4225 /* Carry is known to be zero. Fall back to plain SUB. */
4226 return do_arith(dc
, a
, CC_OP_SUB
,
4227 tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_sub_cc
);
4231 return do_arith(dc
, a
, CC_OP_SUBX
,
4232 gen_op_subc_add
, NULL
, gen_op_subccc_add
);
4236 return do_arith(dc
, a
, CC_OP_SUBX
,
4237 gen_op_subc_sub
, NULL
, gen_op_subccc_sub
);
4239 return do_arith(dc
, a
, CC_OP_SUBX
,
4240 gen_op_subc_generic
, NULL
, gen_op_subccc_generic
);
4244 static bool trans_MULScc(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
4247 return do_arith(dc
, a
, CC_OP_ADD
, NULL
, NULL
, gen_op_mulscc
);
4250 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
4252 TCGv dst
, src1
, src2
;
4254 /* Reject 64-bit shifts for sparc32. */
4255 if (avail_32(dc
) && a
->x
) {
4259 src2
= tcg_temp_new();
4260 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
4261 src1
= gen_load_gpr(dc
, a
->rs1
);
4262 dst
= gen_dest_gpr(dc
, a
->rd
);
4265 tcg_gen_shl_tl(dst
, src1
, src2
);
4267 tcg_gen_ext32u_tl(dst
, dst
);
4271 tcg_gen_ext32u_tl(dst
, src1
);
4274 tcg_gen_shr_tl(dst
, src1
, src2
);
4277 tcg_gen_ext32s_tl(dst
, src1
);
4280 tcg_gen_sar_tl(dst
, src1
, src2
);
4282 gen_store_gpr(dc
, a
->rd
, dst
);
4283 return advance_pc(dc
);
4286 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
4287 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
4288 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
4290 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
4294 /* Reject 64-bit shifts for sparc32. */
4295 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
4299 src1
= gen_load_gpr(dc
, a
->rs1
);
4300 dst
= gen_dest_gpr(dc
, a
->rd
);
4302 if (avail_32(dc
) || a
->x
) {
4304 tcg_gen_shli_tl(dst
, src1
, a
->i
);
4306 tcg_gen_shri_tl(dst
, src1
, a
->i
);
4308 tcg_gen_sari_tl(dst
, src1
, a
->i
);
4312 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4314 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4316 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
4319 gen_store_gpr(dc
, a
->rd
, dst
);
4320 return advance_pc(dc
);
4323 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
4324 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
4325 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
4327 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
4329 /* For simplicity, we under-decoded the rs2 form. */
4330 if (!imm
&& rs2_or_imm
& ~0x1f) {
4333 if (imm
|| rs2_or_imm
== 0) {
4334 return tcg_constant_tl(rs2_or_imm
);
4336 return cpu_regs
[rs2_or_imm
];
4340 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
4342 TCGv dst
= gen_load_gpr(dc
, rd
);
4344 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
, src2
, dst
);
4345 gen_store_gpr(dc
, rd
, dst
);
4346 return advance_pc(dc
);
4349 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
4351 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4357 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4358 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4361 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
4363 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4369 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4370 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4373 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
4375 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
4381 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
4382 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
4385 #define CHECK_IU_FEATURE(dc, FEATURE) \
4386 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4388 #define CHECK_FPU_FEATURE(dc, FEATURE) \
4389 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
4392 /* before an instruction, dc->pc must be static */
4393 static void disas_sparc_legacy(DisasContext
*dc
, unsigned int insn
)
4395 unsigned int opc
, rs1
, rs2
, rd
;
4396 TCGv cpu_src1
, cpu_src2
;
4397 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
4398 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
4401 opc
= GET_FIELD(insn
, 0, 1);
4402 rd
= GET_FIELD(insn
, 2, 6);
4406 goto illegal_insn
; /* in decodetree */
4408 g_assert_not_reached(); /* in decodetree */
4409 case 2: /* FPU & Logical Operations */
4411 unsigned int xop
__attribute__((unused
)) = GET_FIELD(insn
, 7, 12);
4412 TCGv cpu_dst
__attribute__((unused
)) = tcg_temp_new();
4413 TCGv cpu_tmp0
__attribute__((unused
));
4415 if (xop
== 0x34) { /* FPU Operations */
4416 if (gen_trap_ifnofpu(dc
)) {
4419 gen_op_clear_ieee_excp_and_FTT();
4420 rs1
= GET_FIELD(insn
, 13, 17);
4421 rs2
= GET_FIELD(insn
, 27, 31);
4422 xop
= GET_FIELD(insn
, 18, 26);
4425 case 0x1: /* fmovs */
4426 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4427 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4429 case 0x5: /* fnegs */
4430 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
4432 case 0x9: /* fabss */
4433 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
4435 case 0x29: /* fsqrts */
4436 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
4438 case 0x2a: /* fsqrtd */
4439 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
4441 case 0x2b: /* fsqrtq */
4442 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4443 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
4445 case 0x41: /* fadds */
4446 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
4448 case 0x42: /* faddd */
4449 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
4451 case 0x43: /* faddq */
4452 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4453 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
4455 case 0x45: /* fsubs */
4456 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
4458 case 0x46: /* fsubd */
4459 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
4461 case 0x47: /* fsubq */
4462 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4463 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
4465 case 0x49: /* fmuls */
4466 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
4468 case 0x4a: /* fmuld */
4469 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
4471 case 0x4b: /* fmulq */
4472 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4473 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
4475 case 0x4d: /* fdivs */
4476 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
4478 case 0x4e: /* fdivd */
4479 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
4481 case 0x4f: /* fdivq */
4482 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4483 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
4485 case 0x69: /* fsmuld */
4486 CHECK_FPU_FEATURE(dc
, FSMULD
);
4487 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
4489 case 0x6e: /* fdmulq */
4490 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4491 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
4493 case 0xc4: /* fitos */
4494 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
4496 case 0xc6: /* fdtos */
4497 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
4499 case 0xc7: /* fqtos */
4500 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4501 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
4503 case 0xc8: /* fitod */
4504 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
4506 case 0xc9: /* fstod */
4507 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
4509 case 0xcb: /* fqtod */
4510 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4511 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
4513 case 0xcc: /* fitoq */
4514 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4515 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
4517 case 0xcd: /* fstoq */
4518 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4519 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
4521 case 0xce: /* fdtoq */
4522 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4523 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
4525 case 0xd1: /* fstoi */
4526 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
4528 case 0xd2: /* fdtoi */
4529 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
4531 case 0xd3: /* fqtoi */
4532 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4533 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
4535 #ifdef TARGET_SPARC64
4536 case 0x2: /* V9 fmovd */
4537 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4538 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4540 case 0x3: /* V9 fmovq */
4541 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4542 gen_move_Q(dc
, rd
, rs2
);
4544 case 0x6: /* V9 fnegd */
4545 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
4547 case 0x7: /* V9 fnegq */
4548 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4549 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
4551 case 0xa: /* V9 fabsd */
4552 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
4554 case 0xb: /* V9 fabsq */
4555 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4556 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
4558 case 0x81: /* V9 fstox */
4559 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
4561 case 0x82: /* V9 fdtox */
4562 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
4564 case 0x83: /* V9 fqtox */
4565 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4566 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
4568 case 0x84: /* V9 fxtos */
4569 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
4571 case 0x88: /* V9 fxtod */
4572 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
4574 case 0x8c: /* V9 fxtoq */
4575 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4576 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
4582 } else if (xop
== 0x35) { /* FPU Operations */
4583 #ifdef TARGET_SPARC64
4586 if (gen_trap_ifnofpu(dc
)) {
4589 gen_op_clear_ieee_excp_and_FTT();
4590 rs1
= GET_FIELD(insn
, 13, 17);
4591 rs2
= GET_FIELD(insn
, 27, 31);
4592 xop
= GET_FIELD(insn
, 18, 26);
4594 #ifdef TARGET_SPARC64
4598 cond = GET_FIELD_SP(insn, 10, 12); \
4599 cpu_src1 = get_src1(dc, insn); \
4600 gen_compare_reg(&cmp, cond, cpu_src1); \
4601 gen_fmov##sz(dc, &cmp, rd, rs2); \
4604 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
4607 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
4610 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
4611 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4618 #ifdef TARGET_SPARC64
4619 #define FMOVCC(fcc, sz) \
4622 cond = GET_FIELD_SP(insn, 14, 17); \
4623 gen_fcompare(&cmp, fcc, cond); \
4624 gen_fmov##sz(dc, &cmp, rd, rs2); \
4627 case 0x001: /* V9 fmovscc %fcc0 */
4630 case 0x002: /* V9 fmovdcc %fcc0 */
4633 case 0x003: /* V9 fmovqcc %fcc0 */
4634 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4637 case 0x041: /* V9 fmovscc %fcc1 */
4640 case 0x042: /* V9 fmovdcc %fcc1 */
4643 case 0x043: /* V9 fmovqcc %fcc1 */
4644 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4647 case 0x081: /* V9 fmovscc %fcc2 */
4650 case 0x082: /* V9 fmovdcc %fcc2 */
4653 case 0x083: /* V9 fmovqcc %fcc2 */
4654 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4657 case 0x0c1: /* V9 fmovscc %fcc3 */
4660 case 0x0c2: /* V9 fmovdcc %fcc3 */
4663 case 0x0c3: /* V9 fmovqcc %fcc3 */
4664 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4668 #define FMOVCC(xcc, sz) \
4671 cond = GET_FIELD_SP(insn, 14, 17); \
4672 gen_compare(&cmp, xcc, cond, dc); \
4673 gen_fmov##sz(dc, &cmp, rd, rs2); \
4676 case 0x101: /* V9 fmovscc %icc */
4679 case 0x102: /* V9 fmovdcc %icc */
4682 case 0x103: /* V9 fmovqcc %icc */
4683 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4686 case 0x181: /* V9 fmovscc %xcc */
4689 case 0x182: /* V9 fmovdcc %xcc */
4692 case 0x183: /* V9 fmovqcc %xcc */
4693 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4698 case 0x51: /* fcmps, V9 %fcc */
4699 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4700 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
4701 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
4703 case 0x52: /* fcmpd, V9 %fcc */
4704 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4705 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4706 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
4708 case 0x53: /* fcmpq, V9 %fcc */
4709 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4710 gen_op_load_fpr_QT0(QFPREG(rs1
));
4711 gen_op_load_fpr_QT1(QFPREG(rs2
));
4712 gen_op_fcmpq(rd
& 3);
4714 case 0x55: /* fcmpes, V9 %fcc */
4715 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4716 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
4717 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
4719 case 0x56: /* fcmped, V9 %fcc */
4720 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4721 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4722 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
4724 case 0x57: /* fcmpeq, V9 %fcc */
4725 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4726 gen_op_load_fpr_QT0(QFPREG(rs1
));
4727 gen_op_load_fpr_QT1(QFPREG(rs2
));
4728 gen_op_fcmpeq(rd
& 3);
4733 } else if (xop
< 0x36) {
4737 cpu_src1
= get_src1(dc
, insn
);
4738 cpu_src2
= get_src2(dc
, insn
);
4740 case 0x20: /* taddcc */
4741 case 0x21: /* tsubcc */
4742 case 0x22: /* taddcctv */
4743 case 0x23: /* tsubcctv */
4744 case 0x24: /* mulscc */
4745 case 0x25: /* sll */
4746 case 0x26: /* srl */
4747 case 0x27: /* sra */
4748 goto illegal_insn
; /* in decodetree */
4750 goto illegal_insn
; /* WRASR in decodetree */
4752 goto illegal_insn
; /* WRPR in decodetree */
4753 case 0x33: /* wrtbr, UA2005 wrhpr */
4754 goto illegal_insn
; /* WRTBR, WRHPR in decodetree */
4755 #ifdef TARGET_SPARC64
4756 case 0x2c: /* V9 movcc */
4757 case 0x2f: /* V9 movr */
4758 goto illegal_insn
; /* in decodetree */
4759 case 0x2e: /* V9 popc */
4760 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4761 gen_store_gpr(dc
, rd
, cpu_dst
);
4768 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4769 #ifdef TARGET_SPARC64
4770 int opf
= GET_FIELD_SP(insn
, 5, 13);
4771 rs1
= GET_FIELD(insn
, 13, 17);
4772 rs2
= GET_FIELD(insn
, 27, 31);
4773 if (gen_trap_ifnofpu(dc
)) {
4778 case 0x000: /* VIS I edge8cc */
4779 CHECK_FPU_FEATURE(dc
, VIS1
);
4780 cpu_src1
= gen_load_gpr(dc
, rs1
);
4781 cpu_src2
= gen_load_gpr(dc
, rs2
);
4782 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4783 gen_store_gpr(dc
, rd
, cpu_dst
);
4785 case 0x001: /* VIS II edge8n */
4786 CHECK_FPU_FEATURE(dc
, VIS2
);
4787 cpu_src1
= gen_load_gpr(dc
, rs1
);
4788 cpu_src2
= gen_load_gpr(dc
, rs2
);
4789 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4790 gen_store_gpr(dc
, rd
, cpu_dst
);
4792 case 0x002: /* VIS I edge8lcc */
4793 CHECK_FPU_FEATURE(dc
, VIS1
);
4794 cpu_src1
= gen_load_gpr(dc
, rs1
);
4795 cpu_src2
= gen_load_gpr(dc
, rs2
);
4796 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4797 gen_store_gpr(dc
, rd
, cpu_dst
);
4799 case 0x003: /* VIS II edge8ln */
4800 CHECK_FPU_FEATURE(dc
, VIS2
);
4801 cpu_src1
= gen_load_gpr(dc
, rs1
);
4802 cpu_src2
= gen_load_gpr(dc
, rs2
);
4803 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4804 gen_store_gpr(dc
, rd
, cpu_dst
);
4806 case 0x004: /* VIS I edge16cc */
4807 CHECK_FPU_FEATURE(dc
, VIS1
);
4808 cpu_src1
= gen_load_gpr(dc
, rs1
);
4809 cpu_src2
= gen_load_gpr(dc
, rs2
);
4810 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4811 gen_store_gpr(dc
, rd
, cpu_dst
);
4813 case 0x005: /* VIS II edge16n */
4814 CHECK_FPU_FEATURE(dc
, VIS2
);
4815 cpu_src1
= gen_load_gpr(dc
, rs1
);
4816 cpu_src2
= gen_load_gpr(dc
, rs2
);
4817 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4818 gen_store_gpr(dc
, rd
, cpu_dst
);
4820 case 0x006: /* VIS I edge16lcc */
4821 CHECK_FPU_FEATURE(dc
, VIS1
);
4822 cpu_src1
= gen_load_gpr(dc
, rs1
);
4823 cpu_src2
= gen_load_gpr(dc
, rs2
);
4824 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4825 gen_store_gpr(dc
, rd
, cpu_dst
);
4827 case 0x007: /* VIS II edge16ln */
4828 CHECK_FPU_FEATURE(dc
, VIS2
);
4829 cpu_src1
= gen_load_gpr(dc
, rs1
);
4830 cpu_src2
= gen_load_gpr(dc
, rs2
);
4831 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4832 gen_store_gpr(dc
, rd
, cpu_dst
);
4834 case 0x008: /* VIS I edge32cc */
4835 CHECK_FPU_FEATURE(dc
, VIS1
);
4836 cpu_src1
= gen_load_gpr(dc
, rs1
);
4837 cpu_src2
= gen_load_gpr(dc
, rs2
);
4838 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4839 gen_store_gpr(dc
, rd
, cpu_dst
);
4841 case 0x009: /* VIS II edge32n */
4842 CHECK_FPU_FEATURE(dc
, VIS2
);
4843 cpu_src1
= gen_load_gpr(dc
, rs1
);
4844 cpu_src2
= gen_load_gpr(dc
, rs2
);
4845 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4846 gen_store_gpr(dc
, rd
, cpu_dst
);
4848 case 0x00a: /* VIS I edge32lcc */
4849 CHECK_FPU_FEATURE(dc
, VIS1
);
4850 cpu_src1
= gen_load_gpr(dc
, rs1
);
4851 cpu_src2
= gen_load_gpr(dc
, rs2
);
4852 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4853 gen_store_gpr(dc
, rd
, cpu_dst
);
4855 case 0x00b: /* VIS II edge32ln */
4856 CHECK_FPU_FEATURE(dc
, VIS2
);
4857 cpu_src1
= gen_load_gpr(dc
, rs1
);
4858 cpu_src2
= gen_load_gpr(dc
, rs2
);
4859 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4860 gen_store_gpr(dc
, rd
, cpu_dst
);
4862 case 0x010: /* VIS I array8 */
4863 CHECK_FPU_FEATURE(dc
, VIS1
);
4864 cpu_src1
= gen_load_gpr(dc
, rs1
);
4865 cpu_src2
= gen_load_gpr(dc
, rs2
);
4866 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4867 gen_store_gpr(dc
, rd
, cpu_dst
);
4869 case 0x012: /* VIS I array16 */
4870 CHECK_FPU_FEATURE(dc
, VIS1
);
4871 cpu_src1
= gen_load_gpr(dc
, rs1
);
4872 cpu_src2
= gen_load_gpr(dc
, rs2
);
4873 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4874 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4875 gen_store_gpr(dc
, rd
, cpu_dst
);
4877 case 0x014: /* VIS I array32 */
4878 CHECK_FPU_FEATURE(dc
, VIS1
);
4879 cpu_src1
= gen_load_gpr(dc
, rs1
);
4880 cpu_src2
= gen_load_gpr(dc
, rs2
);
4881 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4882 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4883 gen_store_gpr(dc
, rd
, cpu_dst
);
4885 case 0x018: /* VIS I alignaddr */
4886 CHECK_FPU_FEATURE(dc
, VIS1
);
4887 cpu_src1
= gen_load_gpr(dc
, rs1
);
4888 cpu_src2
= gen_load_gpr(dc
, rs2
);
4889 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4890 gen_store_gpr(dc
, rd
, cpu_dst
);
4892 case 0x01a: /* VIS I alignaddrl */
4893 CHECK_FPU_FEATURE(dc
, VIS1
);
4894 cpu_src1
= gen_load_gpr(dc
, rs1
);
4895 cpu_src2
= gen_load_gpr(dc
, rs2
);
4896 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4897 gen_store_gpr(dc
, rd
, cpu_dst
);
4899 case 0x019: /* VIS II bmask */
4900 CHECK_FPU_FEATURE(dc
, VIS2
);
4901 cpu_src1
= gen_load_gpr(dc
, rs1
);
4902 cpu_src2
= gen_load_gpr(dc
, rs2
);
4903 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4904 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4905 gen_store_gpr(dc
, rd
, cpu_dst
);
4907 case 0x020: /* VIS I fcmple16 */
4908 CHECK_FPU_FEATURE(dc
, VIS1
);
4909 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4910 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4911 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4912 gen_store_gpr(dc
, rd
, cpu_dst
);
4914 case 0x022: /* VIS I fcmpne16 */
4915 CHECK_FPU_FEATURE(dc
, VIS1
);
4916 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4917 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4918 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4919 gen_store_gpr(dc
, rd
, cpu_dst
);
4921 case 0x024: /* VIS I fcmple32 */
4922 CHECK_FPU_FEATURE(dc
, VIS1
);
4923 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4924 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4925 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4926 gen_store_gpr(dc
, rd
, cpu_dst
);
4928 case 0x026: /* VIS I fcmpne32 */
4929 CHECK_FPU_FEATURE(dc
, VIS1
);
4930 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4931 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4932 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4933 gen_store_gpr(dc
, rd
, cpu_dst
);
4935 case 0x028: /* VIS I fcmpgt16 */
4936 CHECK_FPU_FEATURE(dc
, VIS1
);
4937 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4938 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4939 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4940 gen_store_gpr(dc
, rd
, cpu_dst
);
4942 case 0x02a: /* VIS I fcmpeq16 */
4943 CHECK_FPU_FEATURE(dc
, VIS1
);
4944 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4945 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4946 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4947 gen_store_gpr(dc
, rd
, cpu_dst
);
4949 case 0x02c: /* VIS I fcmpgt32 */
4950 CHECK_FPU_FEATURE(dc
, VIS1
);
4951 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4952 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4953 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4954 gen_store_gpr(dc
, rd
, cpu_dst
);
4956 case 0x02e: /* VIS I fcmpeq32 */
4957 CHECK_FPU_FEATURE(dc
, VIS1
);
4958 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4959 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4960 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4961 gen_store_gpr(dc
, rd
, cpu_dst
);
4963 case 0x031: /* VIS I fmul8x16 */
4964 CHECK_FPU_FEATURE(dc
, VIS1
);
4965 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4967 case 0x033: /* VIS I fmul8x16au */
4968 CHECK_FPU_FEATURE(dc
, VIS1
);
4969 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4971 case 0x035: /* VIS I fmul8x16al */
4972 CHECK_FPU_FEATURE(dc
, VIS1
);
4973 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4975 case 0x036: /* VIS I fmul8sux16 */
4976 CHECK_FPU_FEATURE(dc
, VIS1
);
4977 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4979 case 0x037: /* VIS I fmul8ulx16 */
4980 CHECK_FPU_FEATURE(dc
, VIS1
);
4981 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4983 case 0x038: /* VIS I fmuld8sux16 */
4984 CHECK_FPU_FEATURE(dc
, VIS1
);
4985 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4987 case 0x039: /* VIS I fmuld8ulx16 */
4988 CHECK_FPU_FEATURE(dc
, VIS1
);
4989 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4991 case 0x03a: /* VIS I fpack32 */
4992 CHECK_FPU_FEATURE(dc
, VIS1
);
4993 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4995 case 0x03b: /* VIS I fpack16 */
4996 CHECK_FPU_FEATURE(dc
, VIS1
);
4997 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4998 cpu_dst_32
= gen_dest_fpr_F(dc
);
4999 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5000 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5002 case 0x03d: /* VIS I fpackfix */
5003 CHECK_FPU_FEATURE(dc
, VIS1
);
5004 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5005 cpu_dst_32
= gen_dest_fpr_F(dc
);
5006 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
5007 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5009 case 0x03e: /* VIS I pdist */
5010 CHECK_FPU_FEATURE(dc
, VIS1
);
5011 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
5013 case 0x048: /* VIS I faligndata */
5014 CHECK_FPU_FEATURE(dc
, VIS1
);
5015 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
5017 case 0x04b: /* VIS I fpmerge */
5018 CHECK_FPU_FEATURE(dc
, VIS1
);
5019 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
5021 case 0x04c: /* VIS II bshuffle */
5022 CHECK_FPU_FEATURE(dc
, VIS2
);
5023 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
5025 case 0x04d: /* VIS I fexpand */
5026 CHECK_FPU_FEATURE(dc
, VIS1
);
5027 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
5029 case 0x050: /* VIS I fpadd16 */
5030 CHECK_FPU_FEATURE(dc
, VIS1
);
5031 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
5033 case 0x051: /* VIS I fpadd16s */
5034 CHECK_FPU_FEATURE(dc
, VIS1
);
5035 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
5037 case 0x052: /* VIS I fpadd32 */
5038 CHECK_FPU_FEATURE(dc
, VIS1
);
5039 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
5041 case 0x053: /* VIS I fpadd32s */
5042 CHECK_FPU_FEATURE(dc
, VIS1
);
5043 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
5045 case 0x054: /* VIS I fpsub16 */
5046 CHECK_FPU_FEATURE(dc
, VIS1
);
5047 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
5049 case 0x055: /* VIS I fpsub16s */
5050 CHECK_FPU_FEATURE(dc
, VIS1
);
5051 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
5053 case 0x056: /* VIS I fpsub32 */
5054 CHECK_FPU_FEATURE(dc
, VIS1
);
5055 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
5057 case 0x057: /* VIS I fpsub32s */
5058 CHECK_FPU_FEATURE(dc
, VIS1
);
5059 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
5061 case 0x060: /* VIS I fzero */
5062 CHECK_FPU_FEATURE(dc
, VIS1
);
5063 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5064 tcg_gen_movi_i64(cpu_dst_64
, 0);
5065 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5067 case 0x061: /* VIS I fzeros */
5068 CHECK_FPU_FEATURE(dc
, VIS1
);
5069 cpu_dst_32
= gen_dest_fpr_F(dc
);
5070 tcg_gen_movi_i32(cpu_dst_32
, 0);
5071 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5073 case 0x062: /* VIS I fnor */
5074 CHECK_FPU_FEATURE(dc
, VIS1
);
5075 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
5077 case 0x063: /* VIS I fnors */
5078 CHECK_FPU_FEATURE(dc
, VIS1
);
5079 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
5081 case 0x064: /* VIS I fandnot2 */
5082 CHECK_FPU_FEATURE(dc
, VIS1
);
5083 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
5085 case 0x065: /* VIS I fandnot2s */
5086 CHECK_FPU_FEATURE(dc
, VIS1
);
5087 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
5089 case 0x066: /* VIS I fnot2 */
5090 CHECK_FPU_FEATURE(dc
, VIS1
);
5091 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
5093 case 0x067: /* VIS I fnot2s */
5094 CHECK_FPU_FEATURE(dc
, VIS1
);
5095 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
5097 case 0x068: /* VIS I fandnot1 */
5098 CHECK_FPU_FEATURE(dc
, VIS1
);
5099 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
5101 case 0x069: /* VIS I fandnot1s */
5102 CHECK_FPU_FEATURE(dc
, VIS1
);
5103 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
5105 case 0x06a: /* VIS I fnot1 */
5106 CHECK_FPU_FEATURE(dc
, VIS1
);
5107 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
5109 case 0x06b: /* VIS I fnot1s */
5110 CHECK_FPU_FEATURE(dc
, VIS1
);
5111 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
5113 case 0x06c: /* VIS I fxor */
5114 CHECK_FPU_FEATURE(dc
, VIS1
);
5115 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
5117 case 0x06d: /* VIS I fxors */
5118 CHECK_FPU_FEATURE(dc
, VIS1
);
5119 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
5121 case 0x06e: /* VIS I fnand */
5122 CHECK_FPU_FEATURE(dc
, VIS1
);
5123 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
5125 case 0x06f: /* VIS I fnands */
5126 CHECK_FPU_FEATURE(dc
, VIS1
);
5127 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
5129 case 0x070: /* VIS I fand */
5130 CHECK_FPU_FEATURE(dc
, VIS1
);
5131 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
5133 case 0x071: /* VIS I fands */
5134 CHECK_FPU_FEATURE(dc
, VIS1
);
5135 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
5137 case 0x072: /* VIS I fxnor */
5138 CHECK_FPU_FEATURE(dc
, VIS1
);
5139 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
5141 case 0x073: /* VIS I fxnors */
5142 CHECK_FPU_FEATURE(dc
, VIS1
);
5143 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
5145 case 0x074: /* VIS I fsrc1 */
5146 CHECK_FPU_FEATURE(dc
, VIS1
);
5147 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
5148 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5150 case 0x075: /* VIS I fsrc1s */
5151 CHECK_FPU_FEATURE(dc
, VIS1
);
5152 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
5153 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5155 case 0x076: /* VIS I fornot2 */
5156 CHECK_FPU_FEATURE(dc
, VIS1
);
5157 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
5159 case 0x077: /* VIS I fornot2s */
5160 CHECK_FPU_FEATURE(dc
, VIS1
);
5161 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
5163 case 0x078: /* VIS I fsrc2 */
5164 CHECK_FPU_FEATURE(dc
, VIS1
);
5165 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
5166 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
5168 case 0x079: /* VIS I fsrc2s */
5169 CHECK_FPU_FEATURE(dc
, VIS1
);
5170 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
5171 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
5173 case 0x07a: /* VIS I fornot1 */
5174 CHECK_FPU_FEATURE(dc
, VIS1
);
5175 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
5177 case 0x07b: /* VIS I fornot1s */
5178 CHECK_FPU_FEATURE(dc
, VIS1
);
5179 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
5181 case 0x07c: /* VIS I for */
5182 CHECK_FPU_FEATURE(dc
, VIS1
);
5183 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
5185 case 0x07d: /* VIS I fors */
5186 CHECK_FPU_FEATURE(dc
, VIS1
);
5187 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
5189 case 0x07e: /* VIS I fone */
5190 CHECK_FPU_FEATURE(dc
, VIS1
);
5191 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5192 tcg_gen_movi_i64(cpu_dst_64
, -1);
5193 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5195 case 0x07f: /* VIS I fones */
5196 CHECK_FPU_FEATURE(dc
, VIS1
);
5197 cpu_dst_32
= gen_dest_fpr_F(dc
);
5198 tcg_gen_movi_i32(cpu_dst_32
, -1);
5199 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5201 case 0x080: /* VIS I shutdown */
5202 case 0x081: /* VIS II siam */
5211 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5212 #ifdef TARGET_SPARC64
5217 #ifdef TARGET_SPARC64
5218 } else if (xop
== 0x39) { /* V9 return */
5220 cpu_src1
= get_src1(dc
, insn
);
5221 cpu_tmp0
= tcg_temp_new();
5222 if (IS_IMM
) { /* immediate */
5223 simm
= GET_FIELDs(insn
, 19, 31);
5224 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5225 } else { /* register */
5226 rs2
= GET_FIELD(insn
, 27, 31);
5228 cpu_src2
= gen_load_gpr(dc
, rs2
);
5229 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5231 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5234 gen_check_align(dc
, cpu_tmp0
, 3);
5235 gen_helper_restore(tcg_env
);
5237 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5238 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5242 cpu_src1
= get_src1(dc
, insn
);
5243 cpu_tmp0
= tcg_temp_new();
5244 if (IS_IMM
) { /* immediate */
5245 simm
= GET_FIELDs(insn
, 19, 31);
5246 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5247 } else { /* register */
5248 rs2
= GET_FIELD(insn
, 27, 31);
5250 cpu_src2
= gen_load_gpr(dc
, rs2
);
5251 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5253 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5257 case 0x38: /* jmpl */
5259 gen_check_align(dc
, cpu_tmp0
, 3);
5260 gen_store_gpr(dc
, rd
, tcg_constant_tl(dc
->pc
));
5262 gen_address_mask(dc
, cpu_tmp0
);
5263 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5264 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5267 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5268 case 0x39: /* rett, V9 return */
5270 if (!supervisor(dc
))
5272 gen_check_align(dc
, cpu_tmp0
, 3);
5274 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5275 dc
->npc
= DYNAMIC_PC
;
5276 gen_helper_rett(tcg_env
);
5280 case 0x3b: /* flush */
5283 case 0x3c: /* save */
5284 gen_helper_save(tcg_env
);
5285 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5287 case 0x3d: /* restore */
5288 gen_helper_restore(tcg_env
);
5289 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5291 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5292 case 0x3e: /* V9 done/retry */
5296 if (!supervisor(dc
))
5298 dc
->npc
= DYNAMIC_PC
;
5299 dc
->pc
= DYNAMIC_PC
;
5300 translator_io_start(&dc
->base
);
5301 gen_helper_done(tcg_env
);
5304 if (!supervisor(dc
))
5306 dc
->npc
= DYNAMIC_PC
;
5307 dc
->pc
= DYNAMIC_PC
;
5308 translator_io_start(&dc
->base
);
5309 gen_helper_retry(tcg_env
);
5324 case 3: /* load/store instructions */
5326 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5327 /* ??? gen_address_mask prevents us from using a source
5328 register directly. Always generate a temporary. */
5329 TCGv cpu_addr
= tcg_temp_new();
5331 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5332 if (xop
== 0x3c || xop
== 0x3e) {
5333 /* V9 casa/casxa : no offset */
5334 } else if (IS_IMM
) { /* immediate */
5335 simm
= GET_FIELDs(insn
, 19, 31);
5337 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5339 } else { /* register */
5340 rs2
= GET_FIELD(insn
, 27, 31);
5342 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5345 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5346 (xop
> 0x17 && xop
<= 0x1d ) ||
5347 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5348 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5351 case 0x0: /* ld, V9 lduw, load unsigned word */
5352 gen_address_mask(dc
, cpu_addr
);
5353 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5354 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5356 case 0x1: /* ldub, load unsigned byte */
5357 gen_address_mask(dc
, cpu_addr
);
5358 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5359 dc
->mem_idx
, MO_UB
);
5361 case 0x2: /* lduh, load unsigned halfword */
5362 gen_address_mask(dc
, cpu_addr
);
5363 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5364 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5366 case 0x3: /* ldd, load double word */
5372 gen_address_mask(dc
, cpu_addr
);
5373 t64
= tcg_temp_new_i64();
5374 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5375 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5376 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5377 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5378 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5379 tcg_gen_shri_i64(t64
, t64
, 32);
5380 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5381 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5384 case 0x9: /* ldsb, load signed byte */
5385 gen_address_mask(dc
, cpu_addr
);
5386 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_SB
);
5388 case 0xa: /* ldsh, load signed halfword */
5389 gen_address_mask(dc
, cpu_addr
);
5390 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5391 dc
->mem_idx
, MO_TESW
| MO_ALIGN
);
5393 case 0xd: /* ldstub */
5394 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5397 /* swap, swap register with memory. Also atomically */
5398 cpu_src1
= gen_load_gpr(dc
, rd
);
5399 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5400 dc
->mem_idx
, MO_TEUL
);
5402 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5403 case 0x10: /* lda, V9 lduwa, load word alternate */
5404 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5406 case 0x11: /* lduba, load unsigned byte alternate */
5407 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5409 case 0x12: /* lduha, load unsigned halfword alternate */
5410 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5412 case 0x13: /* ldda, load double word alternate */
5416 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5418 case 0x19: /* ldsba, load signed byte alternate */
5419 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5421 case 0x1a: /* ldsha, load signed halfword alternate */
5422 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5424 case 0x1d: /* ldstuba -- XXX: should be atomically */
5425 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5427 case 0x1f: /* swapa, swap reg with alt. memory. Also
5429 cpu_src1
= gen_load_gpr(dc
, rd
);
5430 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5433 #ifndef TARGET_SPARC64
5434 case 0x30: /* ldc */
5435 case 0x31: /* ldcsr */
5436 case 0x33: /* lddc */
5440 #ifdef TARGET_SPARC64
5441 case 0x08: /* V9 ldsw */
5442 gen_address_mask(dc
, cpu_addr
);
5443 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5444 dc
->mem_idx
, MO_TESL
| MO_ALIGN
);
5446 case 0x0b: /* V9 ldx */
5447 gen_address_mask(dc
, cpu_addr
);
5448 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5449 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5451 case 0x18: /* V9 ldswa */
5452 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5454 case 0x1b: /* V9 ldxa */
5455 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5457 case 0x2d: /* V9 prefetch, no effect */
5459 case 0x30: /* V9 ldfa */
5460 if (gen_trap_ifnofpu(dc
)) {
5463 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5464 gen_update_fprs_dirty(dc
, rd
);
5466 case 0x33: /* V9 lddfa */
5467 if (gen_trap_ifnofpu(dc
)) {
5470 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5471 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5473 case 0x3d: /* V9 prefetcha, no effect */
5475 case 0x32: /* V9 ldqfa */
5476 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5477 if (gen_trap_ifnofpu(dc
)) {
5480 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5481 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5487 gen_store_gpr(dc
, rd
, cpu_val
);
5488 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5491 } else if (xop
>= 0x20 && xop
< 0x24) {
5492 if (gen_trap_ifnofpu(dc
)) {
5496 case 0x20: /* ldf, load fpreg */
5497 gen_address_mask(dc
, cpu_addr
);
5498 cpu_dst_32
= gen_dest_fpr_F(dc
);
5499 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5500 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5501 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5503 case 0x21: /* ldfsr, V9 ldxfsr */
5504 #ifdef TARGET_SPARC64
5505 gen_address_mask(dc
, cpu_addr
);
5507 TCGv_i64 t64
= tcg_temp_new_i64();
5508 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5509 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5510 gen_helper_ldxfsr(cpu_fsr
, tcg_env
, cpu_fsr
, t64
);
5514 cpu_dst_32
= tcg_temp_new_i32();
5515 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5516 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5517 gen_helper_ldfsr(cpu_fsr
, tcg_env
, cpu_fsr
, cpu_dst_32
);
5519 case 0x22: /* ldqf, load quad fpreg */
5520 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5521 gen_address_mask(dc
, cpu_addr
);
5522 cpu_src1_64
= tcg_temp_new_i64();
5523 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5524 MO_TEUQ
| MO_ALIGN_4
);
5525 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5526 cpu_src2_64
= tcg_temp_new_i64();
5527 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5528 MO_TEUQ
| MO_ALIGN_4
);
5529 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5531 case 0x23: /* lddf, load double fpreg */
5532 gen_address_mask(dc
, cpu_addr
);
5533 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5534 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5535 MO_TEUQ
| MO_ALIGN_4
);
5536 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5541 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5542 xop
== 0xe || xop
== 0x1e) {
5543 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5546 case 0x4: /* st, store word */
5547 gen_address_mask(dc
, cpu_addr
);
5548 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5549 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5551 case 0x5: /* stb, store byte */
5552 gen_address_mask(dc
, cpu_addr
);
5553 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_UB
);
5555 case 0x6: /* sth, store halfword */
5556 gen_address_mask(dc
, cpu_addr
);
5557 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5558 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5560 case 0x7: /* std, store double word */
5567 gen_address_mask(dc
, cpu_addr
);
5568 lo
= gen_load_gpr(dc
, rd
+ 1);
5569 t64
= tcg_temp_new_i64();
5570 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5571 tcg_gen_qemu_st_i64(t64
, cpu_addr
,
5572 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5575 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5576 case 0x14: /* sta, V9 stwa, store word alternate */
5577 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5579 case 0x15: /* stba, store byte alternate */
5580 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5582 case 0x16: /* stha, store halfword alternate */
5583 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5585 case 0x17: /* stda, store double word alternate */
5589 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5592 #ifdef TARGET_SPARC64
5593 case 0x0e: /* V9 stx */
5594 gen_address_mask(dc
, cpu_addr
);
5595 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5596 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5598 case 0x1e: /* V9 stxa */
5599 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5605 } else if (xop
> 0x23 && xop
< 0x28) {
5606 if (gen_trap_ifnofpu(dc
)) {
5610 case 0x24: /* stf, store fpreg */
5611 gen_address_mask(dc
, cpu_addr
);
5612 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5613 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5614 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5616 case 0x25: /* stfsr, V9 stxfsr */
5618 #ifdef TARGET_SPARC64
5619 gen_address_mask(dc
, cpu_addr
);
5621 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5622 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5626 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5627 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5631 #ifdef TARGET_SPARC64
5632 /* V9 stqf, store quad fpreg */
5633 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5634 gen_address_mask(dc
, cpu_addr
);
5635 /* ??? While stqf only requires 4-byte alignment, it is
5636 legal for the cpu to signal the unaligned exception.
5637 The OS trap handler is then required to fix it up.
5638 For qemu, this avoids having to probe the second page
5639 before performing the first write. */
5640 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5641 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5642 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5643 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5644 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5645 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5646 dc
->mem_idx
, MO_TEUQ
);
5648 #else /* !TARGET_SPARC64 */
5649 /* stdfq, store floating point queue */
5650 #if defined(CONFIG_USER_ONLY)
5653 if (!supervisor(dc
))
5655 if (gen_trap_ifnofpu(dc
)) {
5661 case 0x27: /* stdf, store double fpreg */
5662 gen_address_mask(dc
, cpu_addr
);
5663 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5664 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5665 MO_TEUQ
| MO_ALIGN_4
);
5670 } else if (xop
> 0x33 && xop
< 0x3f) {
5672 #ifdef TARGET_SPARC64
5673 case 0x34: /* V9 stfa */
5674 if (gen_trap_ifnofpu(dc
)) {
5677 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5679 case 0x36: /* V9 stqfa */
5681 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5682 if (gen_trap_ifnofpu(dc
)) {
5685 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5688 case 0x37: /* V9 stdfa */
5689 if (gen_trap_ifnofpu(dc
)) {
5692 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5694 case 0x3e: /* V9 casxa */
5695 rs2
= GET_FIELD(insn
, 27, 31);
5696 cpu_src2
= gen_load_gpr(dc
, rs2
);
5697 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5700 case 0x34: /* stc */
5701 case 0x35: /* stcsr */
5702 case 0x36: /* stdcq */
5703 case 0x37: /* stdc */
5706 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5707 case 0x3c: /* V9 or LEON3 casa */
5708 #ifndef TARGET_SPARC64
5709 CHECK_IU_FEATURE(dc
, CASA
);
5711 rs2
= GET_FIELD(insn
, 27, 31);
5712 cpu_src2
= gen_load_gpr(dc
, rs2
);
5713 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5729 gen_exception(dc
, TT_ILL_INSN
);
5731 #if !defined(CONFIG_USER_ONLY)
5733 gen_exception(dc
, TT_PRIV_INSN
);
5737 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5739 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5741 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5744 #ifndef TARGET_SPARC64
5746 gen_exception(dc
, TT_NCP_INSN
);
5751 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5753 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5754 CPUSPARCState
*env
= cpu_env(cs
);
5757 dc
->pc
= dc
->base
.pc_first
;
5758 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5759 dc
->cc_op
= CC_OP_DYNAMIC
;
5760 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5761 dc
->def
= &env
->def
;
5762 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5763 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5764 #ifndef CONFIG_USER_ONLY
5765 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5767 #ifdef TARGET_SPARC64
5769 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5770 #ifndef CONFIG_USER_ONLY
5771 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5775 * if we reach a page boundary, we stop generation so that the
5776 * PC of a TT_TFAULT exception is always in the right page
5778 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5779 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5782 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5786 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5788 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5789 target_ulong npc
= dc
->npc
;
5794 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5795 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5798 case DYNAMIC_PC_LOOKUP
:
5802 g_assert_not_reached();
5805 tcg_gen_insn_start(dc
->pc
, npc
);
5808 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5810 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5811 CPUSPARCState
*env
= cpu_env(cs
);
5814 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5815 dc
->base
.pc_next
+= 4;
5817 if (!decode(dc
, insn
)) {
5818 disas_sparc_legacy(dc
, insn
);
5821 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5824 if (dc
->pc
!= dc
->base
.pc_next
) {
5825 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5829 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5831 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5832 DisasDelayException
*e
, *e_next
;
5835 switch (dc
->base
.is_jmp
) {
5837 case DISAS_TOO_MANY
:
5838 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5839 /* static PC and NPC: we can use direct chaining */
5840 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5847 case DYNAMIC_PC_LOOKUP
:
5853 g_assert_not_reached();
5856 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5862 gen_generic_branch(dc
);
5867 case DYNAMIC_PC_LOOKUP
:
5870 g_assert_not_reached();
5873 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5876 tcg_gen_lookup_and_goto_ptr();
5878 tcg_gen_exit_tb(NULL
, 0);
5882 case DISAS_NORETURN
:
5888 tcg_gen_exit_tb(NULL
, 0);
5892 g_assert_not_reached();
5895 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5896 gen_set_label(e
->lab
);
5898 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5899 if (e
->npc
% 4 == 0) {
5900 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5902 gen_helper_raise_exception(tcg_env
, e
->excp
);
5909 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5910 CPUState
*cpu
, FILE *logfile
)
5912 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5913 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5916 static const TranslatorOps sparc_tr_ops
= {
5917 .init_disas_context
= sparc_tr_init_disas_context
,
5918 .tb_start
= sparc_tr_tb_start
,
5919 .insn_start
= sparc_tr_insn_start
,
5920 .translate_insn
= sparc_tr_translate_insn
,
5921 .tb_stop
= sparc_tr_tb_stop
,
5922 .disas_log
= sparc_tr_disas_log
,
5925 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5926 target_ulong pc
, void *host_pc
)
5928 DisasContext dc
= {};
5930 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5933 void sparc_tcg_init(void)
5935 static const char gregnames
[32][4] = {
5936 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5937 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5938 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5939 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5941 static const char fregnames
[32][4] = {
5942 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5943 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5944 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5945 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5948 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5949 #ifdef TARGET_SPARC64
5950 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5951 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5953 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5954 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5957 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5958 #ifdef TARGET_SPARC64
5959 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5961 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5962 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5963 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5964 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5965 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5966 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5967 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5968 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5969 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5974 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5975 offsetof(CPUSPARCState
, regwptr
),
5978 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5979 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5982 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5983 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5987 for (i
= 1; i
< 8; ++i
) {
5988 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5989 offsetof(CPUSPARCState
, gregs
[i
]),
5993 for (i
= 8; i
< 32; ++i
) {
5994 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5995 (i
- 8) * sizeof(target_ulong
),
5999 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
6000 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
6001 offsetof(CPUSPARCState
, fpr
[i
]),
6006 void sparc_restore_state_to_opc(CPUState
*cs
,
6007 const TranslationBlock
*tb
,
6008 const uint64_t *data
)
6010 SPARCCPU
*cpu
= SPARC_CPU(cs
);
6011 CPUSPARCState
*env
= &cpu
->env
;
6012 target_ulong pc
= data
[0];
6013 target_ulong npc
= data
[1];
6016 if (npc
== DYNAMIC_PC
) {
6017 /* dynamic NPC: already stored */
6018 } else if (npc
& JUMP_PC
) {
6019 /* jump PC: use 'cond' and the jump targets of the translation */
6021 env
->npc
= npc
& ~3;