4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
29 #include "exec/helper-gen.h"
31 #include "exec/translator.h"
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
39 /* Dynamic PC, must exit to main loop. */
41 /* Dynamic PC, one of two values according to jump_pc[T2]. */
43 /* Dynamic PC, may lookup next TB. */
44 #define DYNAMIC_PC_LOOKUP 3
46 #define DISAS_EXIT DISAS_TARGET_0
48 /* global register indexes */
49 static TCGv_ptr cpu_regwptr
;
50 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
51 static TCGv_i32 cpu_cc_op
;
52 static TCGv_i32 cpu_psr
;
53 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
54 static TCGv cpu_regs
[32];
56 #ifndef CONFIG_USER_ONLY
61 static TCGv_i32 cpu_xcc
, cpu_fprs
;
63 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
64 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
68 /* Floating point registers */
69 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
71 typedef struct DisasContext
{
72 DisasContextBase base
;
73 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
74 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
75 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
78 bool address_mask_32bit
;
79 #ifndef CONFIG_USER_ONLY
86 uint32_t cc_op
; /* current CC operation */
100 // This function uses non-native bit order
101 #define GET_FIELD(X, FROM, TO) \
102 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
104 // This function uses the order in the manuals, i.e. bit 0 is 2^0
105 #define GET_FIELD_SP(X, FROM, TO) \
106 GET_FIELD(X, 31 - (TO), 31 - (FROM))
108 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
109 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
111 #ifdef TARGET_SPARC64
112 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
113 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
115 #define DFPREG(r) (r & 0x1e)
116 #define QFPREG(r) (r & 0x1c)
119 #define UA2005_HTRAP_MASK 0xff
120 #define V8_TRAP_MASK 0x7f
122 static int sign_extend(int x
, int len
)
125 return (x
<< len
) >> len
;
128 #define IS_IMM (insn & (1<<13))
130 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
132 #if defined(TARGET_SPARC64)
133 int bit
= (rd
< 32) ? 1 : 2;
134 /* If we know we've already set this bit within the TB,
135 we can avoid setting it again. */
136 if (!(dc
->fprs_dirty
& bit
)) {
137 dc
->fprs_dirty
|= bit
;
138 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
143 /* floating point registers moves */
144 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
146 TCGv_i32 ret
= tcg_temp_new_i32();
148 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
150 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
155 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
157 TCGv_i64 t
= tcg_temp_new_i64();
159 tcg_gen_extu_i32_i64(t
, v
);
160 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
161 (dst
& 1 ? 0 : 32), 32);
162 gen_update_fprs_dirty(dc
, dst
);
165 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
167 return tcg_temp_new_i32();
170 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
173 return cpu_fpr
[src
/ 2];
176 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
179 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
180 gen_update_fprs_dirty(dc
, dst
);
183 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
185 return cpu_fpr
[DFPREG(dst
) / 2];
188 static void gen_op_load_fpr_QT0(unsigned int src
)
190 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
191 offsetof(CPU_QuadU
, ll
.upper
));
192 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
193 offsetof(CPU_QuadU
, ll
.lower
));
196 static void gen_op_load_fpr_QT1(unsigned int src
)
198 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
199 offsetof(CPU_QuadU
, ll
.upper
));
200 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
201 offsetof(CPU_QuadU
, ll
.lower
));
204 static void gen_op_store_QT0_fpr(unsigned int dst
)
206 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
207 offsetof(CPU_QuadU
, ll
.upper
));
208 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
209 offsetof(CPU_QuadU
, ll
.lower
));
212 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
213 TCGv_i64 v1
, TCGv_i64 v2
)
217 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
218 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
219 gen_update_fprs_dirty(dc
, dst
);
222 #ifdef TARGET_SPARC64
223 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
226 return cpu_fpr
[src
/ 2];
229 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
232 return cpu_fpr
[src
/ 2 + 1];
235 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
240 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
242 gen_update_fprs_dirty(dc
, rd
);
247 #ifdef CONFIG_USER_ONLY
248 #define supervisor(dc) 0
249 #ifdef TARGET_SPARC64
250 #define hypervisor(dc) 0
253 #ifdef TARGET_SPARC64
254 #define hypervisor(dc) (dc->hypervisor)
255 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
257 #define supervisor(dc) (dc->supervisor)
261 #ifdef TARGET_SPARC64
263 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
265 #define AM_CHECK(dc) (1)
269 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
271 #ifdef TARGET_SPARC64
273 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
277 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
281 return cpu_regs
[reg
];
283 TCGv t
= tcg_temp_new();
284 tcg_gen_movi_tl(t
, 0);
289 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
293 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
297 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
301 return cpu_regs
[reg
];
303 return tcg_temp_new();
307 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
309 return translator_use_goto_tb(&s
->base
, pc
) &&
310 translator_use_goto_tb(&s
->base
, npc
);
313 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
314 target_ulong pc
, target_ulong npc
)
316 if (use_goto_tb(s
, pc
, npc
)) {
317 /* jump to same page: we can use a direct jump */
318 tcg_gen_goto_tb(tb_num
);
319 tcg_gen_movi_tl(cpu_pc
, pc
);
320 tcg_gen_movi_tl(cpu_npc
, npc
);
321 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
323 /* jump to another page: we can use an indirect jump */
324 tcg_gen_movi_tl(cpu_pc
, pc
);
325 tcg_gen_movi_tl(cpu_npc
, npc
);
326 tcg_gen_lookup_and_goto_ptr();
331 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
333 tcg_gen_extu_i32_tl(reg
, src
);
334 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
337 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
339 tcg_gen_extu_i32_tl(reg
, src
);
340 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
343 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
345 tcg_gen_extu_i32_tl(reg
, src
);
346 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
349 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
351 tcg_gen_extu_i32_tl(reg
, src
);
352 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
355 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
357 tcg_gen_mov_tl(cpu_cc_src
, src1
);
358 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
359 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
360 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
363 static TCGv_i32
gen_add32_carry32(void)
365 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
367 /* Carry is computed from a previous add: (dst < src) */
368 #if TARGET_LONG_BITS == 64
369 cc_src1_32
= tcg_temp_new_i32();
370 cc_src2_32
= tcg_temp_new_i32();
371 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
372 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
374 cc_src1_32
= cpu_cc_dst
;
375 cc_src2_32
= cpu_cc_src
;
378 carry_32
= tcg_temp_new_i32();
379 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
384 static TCGv_i32
gen_sub32_carry32(void)
386 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
388 /* Carry is computed from a previous borrow: (src1 < src2) */
389 #if TARGET_LONG_BITS == 64
390 cc_src1_32
= tcg_temp_new_i32();
391 cc_src2_32
= tcg_temp_new_i32();
392 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
393 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
395 cc_src1_32
= cpu_cc_src
;
396 cc_src2_32
= cpu_cc_src2
;
399 carry_32
= tcg_temp_new_i32();
400 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
405 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
406 TCGv src2
, int update_cc
)
414 /* Carry is known to be zero. Fall back to plain ADD. */
416 gen_op_add_cc(dst
, src1
, src2
);
418 tcg_gen_add_tl(dst
, src1
, src2
);
425 if (TARGET_LONG_BITS
== 32) {
426 /* We can re-use the host's hardware carry generation by using
427 an ADD2 opcode. We discard the low part of the output.
428 Ideally we'd combine this operation with the add that
429 generated the carry in the first place. */
430 carry
= tcg_temp_new();
431 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
434 carry_32
= gen_add32_carry32();
440 carry_32
= gen_sub32_carry32();
444 /* We need external help to produce the carry. */
445 carry_32
= tcg_temp_new_i32();
446 gen_helper_compute_C_icc(carry_32
, tcg_env
);
450 #if TARGET_LONG_BITS == 64
451 carry
= tcg_temp_new();
452 tcg_gen_extu_i32_i64(carry
, carry_32
);
457 tcg_gen_add_tl(dst
, src1
, src2
);
458 tcg_gen_add_tl(dst
, dst
, carry
);
462 tcg_gen_mov_tl(cpu_cc_src
, src1
);
463 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
464 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
465 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
466 dc
->cc_op
= CC_OP_ADDX
;
470 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
472 tcg_gen_mov_tl(cpu_cc_src
, src1
);
473 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
474 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
475 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
478 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
479 TCGv src2
, int update_cc
)
487 /* Carry is known to be zero. Fall back to plain SUB. */
489 gen_op_sub_cc(dst
, src1
, src2
);
491 tcg_gen_sub_tl(dst
, src1
, src2
);
498 carry_32
= gen_add32_carry32();
504 if (TARGET_LONG_BITS
== 32) {
505 /* We can re-use the host's hardware carry generation by using
506 a SUB2 opcode. We discard the low part of the output.
507 Ideally we'd combine this operation with the add that
508 generated the carry in the first place. */
509 carry
= tcg_temp_new();
510 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
513 carry_32
= gen_sub32_carry32();
517 /* We need external help to produce the carry. */
518 carry_32
= tcg_temp_new_i32();
519 gen_helper_compute_C_icc(carry_32
, tcg_env
);
523 #if TARGET_LONG_BITS == 64
524 carry
= tcg_temp_new();
525 tcg_gen_extu_i32_i64(carry
, carry_32
);
530 tcg_gen_sub_tl(dst
, src1
, src2
);
531 tcg_gen_sub_tl(dst
, dst
, carry
);
535 tcg_gen_mov_tl(cpu_cc_src
, src1
);
536 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
537 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
538 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
539 dc
->cc_op
= CC_OP_SUBX
;
543 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
545 TCGv r_temp
, zero
, t0
;
547 r_temp
= tcg_temp_new();
554 zero
= tcg_constant_tl(0);
555 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
556 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
557 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
558 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
562 // env->y = (b2 << 31) | (env->y >> 1);
563 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
564 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
567 gen_mov_reg_N(t0
, cpu_psr
);
568 gen_mov_reg_V(r_temp
, cpu_psr
);
569 tcg_gen_xor_tl(t0
, t0
, r_temp
);
571 // T0 = (b1 << 31) | (T0 >> 1);
573 tcg_gen_shli_tl(t0
, t0
, 31);
574 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
575 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
577 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
579 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
582 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
584 #if TARGET_LONG_BITS == 32
586 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
588 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
591 TCGv t0
= tcg_temp_new_i64();
592 TCGv t1
= tcg_temp_new_i64();
595 tcg_gen_ext32s_i64(t0
, src1
);
596 tcg_gen_ext32s_i64(t1
, src2
);
598 tcg_gen_ext32u_i64(t0
, src1
);
599 tcg_gen_ext32u_i64(t1
, src2
);
602 tcg_gen_mul_i64(dst
, t0
, t1
);
603 tcg_gen_shri_i64(cpu_y
, dst
, 32);
607 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
609 /* zero-extend truncated operands before multiplication */
610 gen_op_multiply(dst
, src1
, src2
, 0);
613 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
615 /* sign-extend truncated operands before multiplication */
616 gen_op_multiply(dst
, src1
, src2
, 1);
620 static void gen_op_eval_ba(TCGv dst
)
622 tcg_gen_movi_tl(dst
, 1);
626 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
628 gen_mov_reg_Z(dst
, src
);
632 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
634 TCGv t0
= tcg_temp_new();
635 gen_mov_reg_N(t0
, src
);
636 gen_mov_reg_V(dst
, src
);
637 tcg_gen_xor_tl(dst
, dst
, t0
);
638 gen_mov_reg_Z(t0
, src
);
639 tcg_gen_or_tl(dst
, dst
, t0
);
643 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
645 TCGv t0
= tcg_temp_new();
646 gen_mov_reg_V(t0
, src
);
647 gen_mov_reg_N(dst
, src
);
648 tcg_gen_xor_tl(dst
, dst
, t0
);
652 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
654 TCGv t0
= tcg_temp_new();
655 gen_mov_reg_Z(t0
, src
);
656 gen_mov_reg_C(dst
, src
);
657 tcg_gen_or_tl(dst
, dst
, t0
);
661 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
663 gen_mov_reg_C(dst
, src
);
667 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
669 gen_mov_reg_V(dst
, src
);
673 static void gen_op_eval_bn(TCGv dst
)
675 tcg_gen_movi_tl(dst
, 0);
679 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
681 gen_mov_reg_N(dst
, src
);
685 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
687 gen_mov_reg_Z(dst
, src
);
688 tcg_gen_xori_tl(dst
, dst
, 0x1);
692 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
694 gen_op_eval_ble(dst
, src
);
695 tcg_gen_xori_tl(dst
, dst
, 0x1);
699 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
701 gen_op_eval_bl(dst
, src
);
702 tcg_gen_xori_tl(dst
, dst
, 0x1);
706 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
708 gen_op_eval_bleu(dst
, src
);
709 tcg_gen_xori_tl(dst
, dst
, 0x1);
713 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
715 gen_mov_reg_C(dst
, src
);
716 tcg_gen_xori_tl(dst
, dst
, 0x1);
720 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
722 gen_mov_reg_N(dst
, src
);
723 tcg_gen_xori_tl(dst
, dst
, 0x1);
727 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
729 gen_mov_reg_V(dst
, src
);
730 tcg_gen_xori_tl(dst
, dst
, 0x1);
734 FPSR bit field FCC1 | FCC0:
740 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
741 unsigned int fcc_offset
)
743 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
744 tcg_gen_andi_tl(reg
, reg
, 0x1);
747 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
749 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
750 tcg_gen_andi_tl(reg
, reg
, 0x1);
754 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
756 TCGv t0
= tcg_temp_new();
757 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
758 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
759 tcg_gen_or_tl(dst
, dst
, t0
);
762 // 1 or 2: FCC0 ^ FCC1
763 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
765 TCGv t0
= tcg_temp_new();
766 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
767 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
768 tcg_gen_xor_tl(dst
, dst
, t0
);
772 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
774 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
778 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
780 TCGv t0
= tcg_temp_new();
781 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
782 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
783 tcg_gen_andc_tl(dst
, dst
, t0
);
787 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
789 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
793 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
795 TCGv t0
= tcg_temp_new();
796 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
797 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
798 tcg_gen_andc_tl(dst
, t0
, dst
);
802 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
804 TCGv t0
= tcg_temp_new();
805 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
806 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
807 tcg_gen_and_tl(dst
, dst
, t0
);
811 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
813 TCGv t0
= tcg_temp_new();
814 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
815 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
816 tcg_gen_or_tl(dst
, dst
, t0
);
817 tcg_gen_xori_tl(dst
, dst
, 0x1);
820 // 0 or 3: !(FCC0 ^ FCC1)
821 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
823 TCGv t0
= tcg_temp_new();
824 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
825 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
826 tcg_gen_xor_tl(dst
, dst
, t0
);
827 tcg_gen_xori_tl(dst
, dst
, 0x1);
831 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
833 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
834 tcg_gen_xori_tl(dst
, dst
, 0x1);
837 // !1: !(FCC0 & !FCC1)
838 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
840 TCGv t0
= tcg_temp_new();
841 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
842 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
843 tcg_gen_andc_tl(dst
, dst
, t0
);
844 tcg_gen_xori_tl(dst
, dst
, 0x1);
848 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
850 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
851 tcg_gen_xori_tl(dst
, dst
, 0x1);
854 // !2: !(!FCC0 & FCC1)
855 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
857 TCGv t0
= tcg_temp_new();
858 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
859 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
860 tcg_gen_andc_tl(dst
, t0
, dst
);
861 tcg_gen_xori_tl(dst
, dst
, 0x1);
864 // !3: !(FCC0 & FCC1)
865 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
867 TCGv t0
= tcg_temp_new();
868 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
869 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
870 tcg_gen_and_tl(dst
, dst
, t0
);
871 tcg_gen_xori_tl(dst
, dst
, 0x1);
874 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
875 target_ulong pc2
, TCGv r_cond
)
877 TCGLabel
*l1
= gen_new_label();
879 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
881 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
884 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
887 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
889 TCGLabel
*l1
= gen_new_label();
890 target_ulong npc
= dc
->npc
;
892 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
894 gen_goto_tb(dc
, 0, npc
, pc1
);
897 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
899 dc
->base
.is_jmp
= DISAS_NORETURN
;
902 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
904 target_ulong npc
= dc
->npc
;
909 case DYNAMIC_PC_LOOKUP
:
910 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
911 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
912 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
,
913 cpu_cond
, tcg_constant_tl(0),
914 tcg_constant_tl(pc1
), cpu_npc
);
918 g_assert_not_reached();
922 dc
->jump_pc
[0] = pc1
;
923 dc
->jump_pc
[1] = npc
+ 4;
928 static void gen_generic_branch(DisasContext
*dc
)
930 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
931 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
932 TCGv zero
= tcg_constant_tl(0);
934 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
937 /* call this function before using the condition register as it may
938 have been set for a jump */
939 static void flush_cond(DisasContext
*dc
)
941 if (dc
->npc
== JUMP_PC
) {
942 gen_generic_branch(dc
);
943 dc
->npc
= DYNAMIC_PC_LOOKUP
;
947 static void save_npc(DisasContext
*dc
)
952 gen_generic_branch(dc
);
953 dc
->npc
= DYNAMIC_PC_LOOKUP
;
956 case DYNAMIC_PC_LOOKUP
:
959 g_assert_not_reached();
962 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
966 static void update_psr(DisasContext
*dc
)
968 if (dc
->cc_op
!= CC_OP_FLAGS
) {
969 dc
->cc_op
= CC_OP_FLAGS
;
970 gen_helper_compute_psr(tcg_env
);
974 static void save_state(DisasContext
*dc
)
976 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
980 static void gen_exception(DisasContext
*dc
, int which
)
983 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
984 dc
->base
.is_jmp
= DISAS_NORETURN
;
987 static void gen_check_align(TCGv addr
, int mask
)
989 gen_helper_check_align(tcg_env
, addr
, tcg_constant_i32(mask
));
992 static void gen_mov_pc_npc(DisasContext
*dc
)
997 gen_generic_branch(dc
);
998 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
999 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1002 case DYNAMIC_PC_LOOKUP
:
1003 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1007 g_assert_not_reached();
1014 static void gen_op_next_insn(void)
1016 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1017 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1020 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1023 static int subcc_cond
[16] = {
1039 -1, /* no overflow */
1042 static int logic_cond
[16] = {
1044 TCG_COND_EQ
, /* eq: Z */
1045 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1046 TCG_COND_LT
, /* lt: N ^ V -> N */
1047 TCG_COND_EQ
, /* leu: C | Z -> Z */
1048 TCG_COND_NEVER
, /* ltu: C -> 0 */
1049 TCG_COND_LT
, /* neg: N */
1050 TCG_COND_NEVER
, /* vs: V -> 0 */
1052 TCG_COND_NE
, /* ne: !Z */
1053 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1054 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1055 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1056 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1057 TCG_COND_GE
, /* pos: !N */
1058 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1064 #ifdef TARGET_SPARC64
1074 switch (dc
->cc_op
) {
1076 cmp
->cond
= logic_cond
[cond
];
1078 cmp
->is_bool
= false;
1079 cmp
->c2
= tcg_constant_tl(0);
1080 #ifdef TARGET_SPARC64
1082 cmp
->c1
= tcg_temp_new();
1083 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1087 cmp
->c1
= cpu_cc_dst
;
1094 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1095 goto do_compare_dst_0
;
1097 case 7: /* overflow */
1098 case 15: /* !overflow */
1102 cmp
->cond
= subcc_cond
[cond
];
1103 cmp
->is_bool
= false;
1104 #ifdef TARGET_SPARC64
1106 /* Note that sign-extension works for unsigned compares as
1107 long as both operands are sign-extended. */
1108 cmp
->c1
= tcg_temp_new();
1109 cmp
->c2
= tcg_temp_new();
1110 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1111 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1115 cmp
->c1
= cpu_cc_src
;
1116 cmp
->c2
= cpu_cc_src2
;
1123 gen_helper_compute_psr(tcg_env
);
1124 dc
->cc_op
= CC_OP_FLAGS
;
1128 /* We're going to generate a boolean result. */
1129 cmp
->cond
= TCG_COND_NE
;
1130 cmp
->is_bool
= true;
1131 cmp
->c1
= r_dst
= tcg_temp_new();
1132 cmp
->c2
= tcg_constant_tl(0);
1136 gen_op_eval_bn(r_dst
);
1139 gen_op_eval_be(r_dst
, r_src
);
1142 gen_op_eval_ble(r_dst
, r_src
);
1145 gen_op_eval_bl(r_dst
, r_src
);
1148 gen_op_eval_bleu(r_dst
, r_src
);
1151 gen_op_eval_bcs(r_dst
, r_src
);
1154 gen_op_eval_bneg(r_dst
, r_src
);
1157 gen_op_eval_bvs(r_dst
, r_src
);
1160 gen_op_eval_ba(r_dst
);
1163 gen_op_eval_bne(r_dst
, r_src
);
1166 gen_op_eval_bg(r_dst
, r_src
);
1169 gen_op_eval_bge(r_dst
, r_src
);
1172 gen_op_eval_bgu(r_dst
, r_src
);
1175 gen_op_eval_bcc(r_dst
, r_src
);
1178 gen_op_eval_bpos(r_dst
, r_src
);
1181 gen_op_eval_bvc(r_dst
, r_src
);
1188 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1190 unsigned int offset
;
1193 /* For now we still generate a straight boolean result. */
1194 cmp
->cond
= TCG_COND_NE
;
1195 cmp
->is_bool
= true;
1196 cmp
->c1
= r_dst
= tcg_temp_new();
1197 cmp
->c2
= tcg_constant_tl(0);
1217 gen_op_eval_bn(r_dst
);
1220 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1223 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1226 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1229 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1232 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1235 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1238 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1241 gen_op_eval_ba(r_dst
);
1244 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1247 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1250 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1253 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1256 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1259 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1262 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1267 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1271 gen_compare(&cmp
, cc
, cond
, dc
);
1273 /* The interface is to return a boolean in r_dst. */
1275 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1277 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1281 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1284 gen_fcompare(&cmp
, cc
, cond
);
1286 /* The interface is to return a boolean in r_dst. */
1288 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1290 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1294 #ifdef TARGET_SPARC64
1296 static const int gen_tcg_cond_reg
[8] = {
1307 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1309 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1310 cmp
->is_bool
= false;
1312 cmp
->c2
= tcg_constant_tl(0);
1315 static void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1318 gen_compare_reg(&cmp
, cond
, r_src
);
1320 /* The interface is to return a boolean in r_dst. */
1321 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1325 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1327 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1328 target_ulong target
= dc
->pc
+ offset
;
1330 #ifdef TARGET_SPARC64
1331 if (unlikely(AM_CHECK(dc
))) {
1332 target
&= 0xffffffffULL
;
1336 /* unconditional not taken */
1338 dc
->pc
= dc
->npc
+ 4;
1339 dc
->npc
= dc
->pc
+ 4;
1342 dc
->npc
= dc
->pc
+ 4;
1344 } else if (cond
== 0x8) {
1345 /* unconditional taken */
1348 dc
->npc
= dc
->pc
+ 4;
1352 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1356 gen_cond(cpu_cond
, cc
, cond
, dc
);
1358 gen_branch_a(dc
, target
);
1360 gen_branch_n(dc
, target
);
1365 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1367 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1368 target_ulong target
= dc
->pc
+ offset
;
1370 #ifdef TARGET_SPARC64
1371 if (unlikely(AM_CHECK(dc
))) {
1372 target
&= 0xffffffffULL
;
1376 /* unconditional not taken */
1378 dc
->pc
= dc
->npc
+ 4;
1379 dc
->npc
= dc
->pc
+ 4;
1382 dc
->npc
= dc
->pc
+ 4;
1384 } else if (cond
== 0x8) {
1385 /* unconditional taken */
1388 dc
->npc
= dc
->pc
+ 4;
1392 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1396 gen_fcond(cpu_cond
, cc
, cond
);
1398 gen_branch_a(dc
, target
);
1400 gen_branch_n(dc
, target
);
1405 #ifdef TARGET_SPARC64
1406 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1409 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1410 target_ulong target
= dc
->pc
+ offset
;
1412 if (unlikely(AM_CHECK(dc
))) {
1413 target
&= 0xffffffffULL
;
1416 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1418 gen_branch_a(dc
, target
);
1420 gen_branch_n(dc
, target
);
1424 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1428 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1431 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1434 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1437 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1442 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1446 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1449 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1452 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1455 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1460 static void gen_op_fcmpq(int fccno
)
1464 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1467 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1470 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1473 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1478 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1482 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1485 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1488 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1491 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1496 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1500 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1503 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1506 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1509 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1514 static void gen_op_fcmpeq(int fccno
)
1518 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1521 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1524 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1527 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1534 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1536 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1539 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1541 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1544 static void gen_op_fcmpq(int fccno
)
1546 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1549 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1551 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1554 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1556 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1559 static void gen_op_fcmpeq(int fccno
)
1561 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1565 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1567 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1568 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1569 gen_exception(dc
, TT_FP_EXCP
);
1572 static int gen_trap_ifnofpu(DisasContext
*dc
)
1574 #if !defined(CONFIG_USER_ONLY)
1575 if (!dc
->fpu_enabled
) {
1576 gen_exception(dc
, TT_NFPU_INSN
);
1583 static void gen_op_clear_ieee_excp_and_FTT(void)
1585 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1588 static void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1589 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1593 src
= gen_load_fpr_F(dc
, rs
);
1594 dst
= gen_dest_fpr_F(dc
);
1596 gen(dst
, tcg_env
, src
);
1597 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1599 gen_store_fpr_F(dc
, rd
, dst
);
1602 static void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1603 void (*gen
)(TCGv_i32
, TCGv_i32
))
1607 src
= gen_load_fpr_F(dc
, rs
);
1608 dst
= gen_dest_fpr_F(dc
);
1612 gen_store_fpr_F(dc
, rd
, dst
);
1615 static void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1616 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1618 TCGv_i32 dst
, src1
, src2
;
1620 src1
= gen_load_fpr_F(dc
, rs1
);
1621 src2
= gen_load_fpr_F(dc
, rs2
);
1622 dst
= gen_dest_fpr_F(dc
);
1624 gen(dst
, tcg_env
, src1
, src2
);
1625 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1627 gen_store_fpr_F(dc
, rd
, dst
);
1630 #ifdef TARGET_SPARC64
1631 static void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1632 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1634 TCGv_i32 dst
, src1
, src2
;
1636 src1
= gen_load_fpr_F(dc
, rs1
);
1637 src2
= gen_load_fpr_F(dc
, rs2
);
1638 dst
= gen_dest_fpr_F(dc
);
1640 gen(dst
, src1
, src2
);
1642 gen_store_fpr_F(dc
, rd
, dst
);
1646 static void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1647 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1651 src
= gen_load_fpr_D(dc
, rs
);
1652 dst
= gen_dest_fpr_D(dc
, rd
);
1654 gen(dst
, tcg_env
, src
);
1655 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1657 gen_store_fpr_D(dc
, rd
, dst
);
1660 #ifdef TARGET_SPARC64
1661 static void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1662 void (*gen
)(TCGv_i64
, TCGv_i64
))
1666 src
= gen_load_fpr_D(dc
, rs
);
1667 dst
= gen_dest_fpr_D(dc
, rd
);
1671 gen_store_fpr_D(dc
, rd
, dst
);
1675 static void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1676 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1678 TCGv_i64 dst
, src1
, src2
;
1680 src1
= gen_load_fpr_D(dc
, rs1
);
1681 src2
= gen_load_fpr_D(dc
, rs2
);
1682 dst
= gen_dest_fpr_D(dc
, rd
);
1684 gen(dst
, tcg_env
, src1
, src2
);
1685 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1687 gen_store_fpr_D(dc
, rd
, dst
);
1690 #ifdef TARGET_SPARC64
1691 static void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1692 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1694 TCGv_i64 dst
, src1
, src2
;
1696 src1
= gen_load_fpr_D(dc
, rs1
);
1697 src2
= gen_load_fpr_D(dc
, rs2
);
1698 dst
= gen_dest_fpr_D(dc
, rd
);
1700 gen(dst
, src1
, src2
);
1702 gen_store_fpr_D(dc
, rd
, dst
);
1705 static void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1706 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1708 TCGv_i64 dst
, src1
, src2
;
1710 src1
= gen_load_fpr_D(dc
, rs1
);
1711 src2
= gen_load_fpr_D(dc
, rs2
);
1712 dst
= gen_dest_fpr_D(dc
, rd
);
1714 gen(dst
, cpu_gsr
, src1
, src2
);
1716 gen_store_fpr_D(dc
, rd
, dst
);
1719 static void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1720 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1722 TCGv_i64 dst
, src0
, src1
, src2
;
1724 src1
= gen_load_fpr_D(dc
, rs1
);
1725 src2
= gen_load_fpr_D(dc
, rs2
);
1726 src0
= gen_load_fpr_D(dc
, rd
);
1727 dst
= gen_dest_fpr_D(dc
, rd
);
1729 gen(dst
, src0
, src1
, src2
);
1731 gen_store_fpr_D(dc
, rd
, dst
);
1735 static void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1736 void (*gen
)(TCGv_ptr
))
1738 gen_op_load_fpr_QT1(QFPREG(rs
));
1741 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1743 gen_op_store_QT0_fpr(QFPREG(rd
));
1744 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1747 #ifdef TARGET_SPARC64
1748 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1749 void (*gen
)(TCGv_ptr
))
1751 gen_op_load_fpr_QT1(QFPREG(rs
));
1755 gen_op_store_QT0_fpr(QFPREG(rd
));
1756 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1760 static void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1761 void (*gen
)(TCGv_ptr
))
1763 gen_op_load_fpr_QT0(QFPREG(rs1
));
1764 gen_op_load_fpr_QT1(QFPREG(rs2
));
1767 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1769 gen_op_store_QT0_fpr(QFPREG(rd
));
1770 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1773 static void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1774 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1777 TCGv_i32 src1
, src2
;
1779 src1
= gen_load_fpr_F(dc
, rs1
);
1780 src2
= gen_load_fpr_F(dc
, rs2
);
1781 dst
= gen_dest_fpr_D(dc
, rd
);
1783 gen(dst
, tcg_env
, src1
, src2
);
1784 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1786 gen_store_fpr_D(dc
, rd
, dst
);
1789 static void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1790 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1792 TCGv_i64 src1
, src2
;
1794 src1
= gen_load_fpr_D(dc
, rs1
);
1795 src2
= gen_load_fpr_D(dc
, rs2
);
1797 gen(tcg_env
, src1
, src2
);
1798 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1800 gen_op_store_QT0_fpr(QFPREG(rd
));
1801 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1804 #ifdef TARGET_SPARC64
1805 static void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1806 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1811 src
= gen_load_fpr_F(dc
, rs
);
1812 dst
= gen_dest_fpr_D(dc
, rd
);
1814 gen(dst
, tcg_env
, src
);
1815 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1817 gen_store_fpr_D(dc
, rd
, dst
);
1821 static void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1822 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1827 src
= gen_load_fpr_F(dc
, rs
);
1828 dst
= gen_dest_fpr_D(dc
, rd
);
1830 gen(dst
, tcg_env
, src
);
1832 gen_store_fpr_D(dc
, rd
, dst
);
1835 static void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1836 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1841 src
= gen_load_fpr_D(dc
, rs
);
1842 dst
= gen_dest_fpr_F(dc
);
1844 gen(dst
, tcg_env
, src
);
1845 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1847 gen_store_fpr_F(dc
, rd
, dst
);
1850 static void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1851 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1855 gen_op_load_fpr_QT1(QFPREG(rs
));
1856 dst
= gen_dest_fpr_F(dc
);
1859 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1861 gen_store_fpr_F(dc
, rd
, dst
);
1864 static void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1865 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1869 gen_op_load_fpr_QT1(QFPREG(rs
));
1870 dst
= gen_dest_fpr_D(dc
, rd
);
1873 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
1875 gen_store_fpr_D(dc
, rd
, dst
);
1878 static void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1879 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1883 src
= gen_load_fpr_F(dc
, rs
);
1887 gen_op_store_QT0_fpr(QFPREG(rd
));
1888 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1891 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1892 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1896 src
= gen_load_fpr_D(dc
, rs
);
1900 gen_op_store_QT0_fpr(QFPREG(rd
));
1901 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1904 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1905 TCGv addr
, int mmu_idx
, MemOp memop
)
1907 gen_address_mask(dc
, addr
);
1908 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
| MO_ALIGN
);
1911 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
1913 TCGv m1
= tcg_constant_tl(0xff);
1914 gen_address_mask(dc
, addr
);
1915 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
1919 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1938 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
1940 int asi
= GET_FIELD(insn
, 19, 26);
1941 ASIType type
= GET_ASI_HELPER
;
1942 int mem_idx
= dc
->mem_idx
;
1944 #ifndef TARGET_SPARC64
1945 /* Before v9, all asis are immediate and privileged. */
1947 gen_exception(dc
, TT_ILL_INSN
);
1948 type
= GET_ASI_EXCP
;
1949 } else if (supervisor(dc
)
1950 /* Note that LEON accepts ASI_USERDATA in user mode, for
1951 use with CASA. Also note that previous versions of
1952 QEMU allowed (and old versions of gcc emitted) ASI_P
1953 for LEON, which is incorrect. */
1954 || (asi
== ASI_USERDATA
1955 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1957 case ASI_USERDATA
: /* User data access */
1958 mem_idx
= MMU_USER_IDX
;
1959 type
= GET_ASI_DIRECT
;
1961 case ASI_KERNELDATA
: /* Supervisor data access */
1962 mem_idx
= MMU_KERNEL_IDX
;
1963 type
= GET_ASI_DIRECT
;
1965 case ASI_M_BYPASS
: /* MMU passthrough */
1966 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1967 mem_idx
= MMU_PHYS_IDX
;
1968 type
= GET_ASI_DIRECT
;
1970 case ASI_M_BCOPY
: /* Block copy, sta access */
1971 mem_idx
= MMU_KERNEL_IDX
;
1972 type
= GET_ASI_BCOPY
;
1974 case ASI_M_BFILL
: /* Block fill, stda access */
1975 mem_idx
= MMU_KERNEL_IDX
;
1976 type
= GET_ASI_BFILL
;
1980 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1981 * permissions check in get_physical_address(..).
1983 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1985 gen_exception(dc
, TT_PRIV_INSN
);
1986 type
= GET_ASI_EXCP
;
1992 /* With v9, all asis below 0x80 are privileged. */
1993 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1994 down that bit into DisasContext. For the moment that's ok,
1995 since the direct implementations below doesn't have any ASIs
1996 in the restricted [0x30, 0x7f] range, and the check will be
1997 done properly in the helper. */
1998 if (!supervisor(dc
) && asi
< 0x80) {
1999 gen_exception(dc
, TT_PRIV_ACT
);
2000 type
= GET_ASI_EXCP
;
2003 case ASI_REAL
: /* Bypass */
2004 case ASI_REAL_IO
: /* Bypass, non-cacheable */
2005 case ASI_REAL_L
: /* Bypass LE */
2006 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
2007 case ASI_TWINX_REAL
: /* Real address, twinx */
2008 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
2009 case ASI_QUAD_LDD_PHYS
:
2010 case ASI_QUAD_LDD_PHYS_L
:
2011 mem_idx
= MMU_PHYS_IDX
;
2013 case ASI_N
: /* Nucleus */
2014 case ASI_NL
: /* Nucleus LE */
2017 case ASI_NUCLEUS_QUAD_LDD
:
2018 case ASI_NUCLEUS_QUAD_LDD_L
:
2019 if (hypervisor(dc
)) {
2020 mem_idx
= MMU_PHYS_IDX
;
2022 mem_idx
= MMU_NUCLEUS_IDX
;
2025 case ASI_AIUP
: /* As if user primary */
2026 case ASI_AIUPL
: /* As if user primary LE */
2027 case ASI_TWINX_AIUP
:
2028 case ASI_TWINX_AIUP_L
:
2029 case ASI_BLK_AIUP_4V
:
2030 case ASI_BLK_AIUP_L_4V
:
2033 mem_idx
= MMU_USER_IDX
;
2035 case ASI_AIUS
: /* As if user secondary */
2036 case ASI_AIUSL
: /* As if user secondary LE */
2037 case ASI_TWINX_AIUS
:
2038 case ASI_TWINX_AIUS_L
:
2039 case ASI_BLK_AIUS_4V
:
2040 case ASI_BLK_AIUS_L_4V
:
2043 mem_idx
= MMU_USER_SECONDARY_IDX
;
2045 case ASI_S
: /* Secondary */
2046 case ASI_SL
: /* Secondary LE */
2049 case ASI_BLK_COMMIT_S
:
2056 if (mem_idx
== MMU_USER_IDX
) {
2057 mem_idx
= MMU_USER_SECONDARY_IDX
;
2058 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2059 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2062 case ASI_P
: /* Primary */
2063 case ASI_PL
: /* Primary LE */
2066 case ASI_BLK_COMMIT_P
:
2090 type
= GET_ASI_DIRECT
;
2092 case ASI_TWINX_REAL
:
2093 case ASI_TWINX_REAL_L
:
2096 case ASI_TWINX_AIUP
:
2097 case ASI_TWINX_AIUP_L
:
2098 case ASI_TWINX_AIUS
:
2099 case ASI_TWINX_AIUS_L
:
2104 case ASI_QUAD_LDD_PHYS
:
2105 case ASI_QUAD_LDD_PHYS_L
:
2106 case ASI_NUCLEUS_QUAD_LDD
:
2107 case ASI_NUCLEUS_QUAD_LDD_L
:
2108 type
= GET_ASI_DTWINX
;
2110 case ASI_BLK_COMMIT_P
:
2111 case ASI_BLK_COMMIT_S
:
2112 case ASI_BLK_AIUP_4V
:
2113 case ASI_BLK_AIUP_L_4V
:
2116 case ASI_BLK_AIUS_4V
:
2117 case ASI_BLK_AIUS_L_4V
:
2124 type
= GET_ASI_BLOCK
;
2131 type
= GET_ASI_SHORT
;
2138 type
= GET_ASI_SHORT
;
2141 /* The little-endian asis all have bit 3 set. */
2148 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2151 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2152 int insn
, MemOp memop
)
2154 DisasASI da
= get_asi(dc
, insn
, memop
);
2159 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2160 gen_exception(dc
, TT_ILL_INSN
);
2162 case GET_ASI_DIRECT
:
2163 gen_address_mask(dc
, addr
);
2164 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2168 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2169 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2172 #ifdef TARGET_SPARC64
2173 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
2176 TCGv_i64 t64
= tcg_temp_new_i64();
2177 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2178 tcg_gen_trunc_i64_tl(dst
, t64
);
2186 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2187 int insn
, MemOp memop
)
2189 DisasASI da
= get_asi(dc
, insn
, memop
);
2194 case GET_ASI_DTWINX
: /* Reserved for stda. */
2195 #ifndef TARGET_SPARC64
2196 gen_exception(dc
, TT_ILL_INSN
);
2199 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2200 /* Pre OpenSPARC CPUs don't have these */
2201 gen_exception(dc
, TT_ILL_INSN
);
2204 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2205 * are ST_BLKINIT_ ASIs */
2208 case GET_ASI_DIRECT
:
2209 gen_address_mask(dc
, addr
);
2210 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2212 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2214 /* Copy 32 bytes from the address in SRC to ADDR. */
2215 /* ??? The original qemu code suggests 4-byte alignment, dropping
2216 the low bits, but the only place I can see this used is in the
2217 Linux kernel with 32 byte alignment, which would make more sense
2218 as a cacheline-style operation. */
2220 TCGv saddr
= tcg_temp_new();
2221 TCGv daddr
= tcg_temp_new();
2222 TCGv four
= tcg_constant_tl(4);
2223 TCGv_i32 tmp
= tcg_temp_new_i32();
2226 tcg_gen_andi_tl(saddr
, src
, -4);
2227 tcg_gen_andi_tl(daddr
, addr
, -4);
2228 for (i
= 0; i
< 32; i
+= 4) {
2229 /* Since the loads and stores are paired, allow the
2230 copy to happen in the host endianness. */
2231 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2232 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2233 tcg_gen_add_tl(saddr
, saddr
, four
);
2234 tcg_gen_add_tl(daddr
, daddr
, four
);
2241 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2242 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2245 #ifdef TARGET_SPARC64
2246 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
2249 TCGv_i64 t64
= tcg_temp_new_i64();
2250 tcg_gen_extu_tl_i64(t64
, src
);
2251 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2255 /* A write to a TLB register may alter page maps. End the TB. */
2256 dc
->npc
= DYNAMIC_PC
;
2262 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2263 TCGv addr
, int insn
)
2265 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2270 case GET_ASI_DIRECT
:
2271 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2274 /* ??? Should be DAE_invalid_asi. */
2275 gen_exception(dc
, TT_DATA_ACCESS
);
2280 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2283 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2289 case GET_ASI_DIRECT
:
2290 oldv
= tcg_temp_new();
2291 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2292 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2293 gen_store_gpr(dc
, rd
, oldv
);
2296 /* ??? Should be DAE_invalid_asi. */
2297 gen_exception(dc
, TT_DATA_ACCESS
);
2302 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2304 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2309 case GET_ASI_DIRECT
:
2310 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2313 /* ??? In theory, this should be raise DAE_invalid_asi.
2314 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2315 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2316 gen_helper_exit_atomic(tcg_env
);
2318 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2319 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2323 t64
= tcg_temp_new_i64();
2324 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2326 s64
= tcg_constant_i64(0xff);
2327 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
2329 tcg_gen_trunc_i64_tl(dst
, t64
);
2332 dc
->npc
= DYNAMIC_PC
;
2339 #ifdef TARGET_SPARC64
2340 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2341 int insn
, int size
, int rd
)
2343 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2351 case GET_ASI_DIRECT
:
2352 gen_address_mask(dc
, addr
);
2355 d32
= gen_dest_fpr_F(dc
);
2356 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2357 gen_store_fpr_F(dc
, rd
, d32
);
2360 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2361 da
.memop
| MO_ALIGN_4
);
2364 d64
= tcg_temp_new_i64();
2365 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2366 tcg_gen_addi_tl(addr
, addr
, 8);
2367 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2368 da
.memop
| MO_ALIGN_4
);
2369 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2372 g_assert_not_reached();
2377 /* Valid for lddfa on aligned registers only. */
2378 if (size
== 8 && (rd
& 7) == 0) {
2383 gen_address_mask(dc
, addr
);
2385 /* The first operation checks required alignment. */
2386 memop
= da
.memop
| MO_ALIGN_64
;
2387 eight
= tcg_constant_tl(8);
2388 for (i
= 0; ; ++i
) {
2389 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2394 tcg_gen_add_tl(addr
, addr
, eight
);
2398 gen_exception(dc
, TT_ILL_INSN
);
2403 /* Valid for lddfa only. */
2405 gen_address_mask(dc
, addr
);
2406 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2407 da
.memop
| MO_ALIGN
);
2409 gen_exception(dc
, TT_ILL_INSN
);
2415 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2416 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
| MO_ALIGN
);
2419 /* According to the table in the UA2011 manual, the only
2420 other asis that are valid for ldfa/lddfa/ldqfa are
2421 the NO_FAULT asis. We still need a helper for these,
2422 but we can just use the integer asi helper for them. */
2425 d64
= tcg_temp_new_i64();
2426 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2427 d32
= gen_dest_fpr_F(dc
);
2428 tcg_gen_extrl_i64_i32(d32
, d64
);
2429 gen_store_fpr_F(dc
, rd
, d32
);
2432 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
, r_asi
, r_mop
);
2435 d64
= tcg_temp_new_i64();
2436 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
2437 tcg_gen_addi_tl(addr
, addr
, 8);
2438 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], tcg_env
, addr
, r_asi
, r_mop
);
2439 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2442 g_assert_not_reached();
2449 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2450 int insn
, int size
, int rd
)
2452 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2459 case GET_ASI_DIRECT
:
2460 gen_address_mask(dc
, addr
);
2463 d32
= gen_load_fpr_F(dc
, rd
);
2464 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2467 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2468 da
.memop
| MO_ALIGN_4
);
2471 /* Only 4-byte alignment required. However, it is legal for the
2472 cpu to signal the alignment fault, and the OS trap handler is
2473 required to fix it up. Requiring 16-byte alignment here avoids
2474 having to probe the second page before performing the first
2476 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2477 da
.memop
| MO_ALIGN_16
);
2478 tcg_gen_addi_tl(addr
, addr
, 8);
2479 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2482 g_assert_not_reached();
2487 /* Valid for stdfa on aligned registers only. */
2488 if (size
== 8 && (rd
& 7) == 0) {
2493 gen_address_mask(dc
, addr
);
2495 /* The first operation checks required alignment. */
2496 memop
= da
.memop
| MO_ALIGN_64
;
2497 eight
= tcg_constant_tl(8);
2498 for (i
= 0; ; ++i
) {
2499 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2504 tcg_gen_add_tl(addr
, addr
, eight
);
2508 gen_exception(dc
, TT_ILL_INSN
);
2513 /* Valid for stdfa only. */
2515 gen_address_mask(dc
, addr
);
2516 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2517 da
.memop
| MO_ALIGN
);
2519 gen_exception(dc
, TT_ILL_INSN
);
2524 /* According to the table in the UA2011 manual, the only
2525 other asis that are valid for ldfa/lddfa/ldqfa are
2526 the PST* asis, which aren't currently handled. */
2527 gen_exception(dc
, TT_ILL_INSN
);
2532 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2534 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2535 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2536 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2542 case GET_ASI_DTWINX
:
2543 gen_address_mask(dc
, addr
);
2544 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2545 tcg_gen_addi_tl(addr
, addr
, 8);
2546 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2549 case GET_ASI_DIRECT
:
2551 TCGv_i64 tmp
= tcg_temp_new_i64();
2553 gen_address_mask(dc
, addr
);
2554 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2556 /* Note that LE ldda acts as if each 32-bit register
2557 result is byte swapped. Having just performed one
2558 64-bit bswap, we need now to swap the writebacks. */
2559 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2560 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2562 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2568 /* ??? In theory we've handled all of the ASIs that are valid
2569 for ldda, and this should raise DAE_invalid_asi. However,
2570 real hardware allows others. This can be seen with e.g.
2571 FreeBSD 10.3 wrt ASI_IC_TAG. */
2573 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2574 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2575 TCGv_i64 tmp
= tcg_temp_new_i64();
2578 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2581 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2582 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2584 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2590 gen_store_gpr(dc
, rd
, hi
);
2591 gen_store_gpr(dc
, rd
+ 1, lo
);
2594 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2597 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2598 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2604 case GET_ASI_DTWINX
:
2605 gen_address_mask(dc
, addr
);
2606 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2607 tcg_gen_addi_tl(addr
, addr
, 8);
2608 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2611 case GET_ASI_DIRECT
:
2613 TCGv_i64 t64
= tcg_temp_new_i64();
2615 /* Note that LE stda acts as if each 32-bit register result is
2616 byte swapped. We will perform one 64-bit LE store, so now
2617 we must swap the order of the construction. */
2618 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2619 tcg_gen_concat32_i64(t64
, lo
, hi
);
2621 tcg_gen_concat32_i64(t64
, hi
, lo
);
2623 gen_address_mask(dc
, addr
);
2624 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2629 /* ??? In theory we've handled all of the ASIs that are valid
2630 for stda, and this should raise DAE_invalid_asi. */
2632 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2633 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2634 TCGv_i64 t64
= tcg_temp_new_i64();
2637 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2638 tcg_gen_concat32_i64(t64
, lo
, hi
);
2640 tcg_gen_concat32_i64(t64
, hi
, lo
);
2644 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2650 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2653 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2659 case GET_ASI_DIRECT
:
2660 oldv
= tcg_temp_new();
2661 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2662 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2663 gen_store_gpr(dc
, rd
, oldv
);
2666 /* ??? Should be DAE_invalid_asi. */
2667 gen_exception(dc
, TT_DATA_ACCESS
);
2672 #elif !defined(CONFIG_USER_ONLY)
2673 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2675 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2676 whereby "rd + 1" elicits "error: array subscript is above array".
2677 Since we have already asserted that rd is even, the semantics
2679 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2680 TCGv hi
= gen_dest_gpr(dc
, rd
);
2681 TCGv_i64 t64
= tcg_temp_new_i64();
2682 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2687 case GET_ASI_DIRECT
:
2688 gen_address_mask(dc
, addr
);
2689 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2693 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2694 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2697 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
2702 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2703 gen_store_gpr(dc
, rd
| 1, lo
);
2704 gen_store_gpr(dc
, rd
, hi
);
2707 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2710 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2711 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2712 TCGv_i64 t64
= tcg_temp_new_i64();
2714 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2719 case GET_ASI_DIRECT
:
2720 gen_address_mask(dc
, addr
);
2721 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2724 /* Store 32 bytes of T64 to ADDR. */
2725 /* ??? The original qemu code suggests 8-byte alignment, dropping
2726 the low bits, but the only place I can see this used is in the
2727 Linux kernel with 32 byte alignment, which would make more sense
2728 as a cacheline-style operation. */
2730 TCGv d_addr
= tcg_temp_new();
2731 TCGv eight
= tcg_constant_tl(8);
2734 tcg_gen_andi_tl(d_addr
, addr
, -8);
2735 for (i
= 0; i
< 32; i
+= 8) {
2736 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2737 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2743 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2744 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2747 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2754 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2756 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2757 return gen_load_gpr(dc
, rs1
);
2760 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2762 if (IS_IMM
) { /* immediate */
2763 target_long simm
= GET_FIELDs(insn
, 19, 31);
2764 TCGv t
= tcg_temp_new();
2765 tcg_gen_movi_tl(t
, simm
);
2767 } else { /* register */
2768 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2769 return gen_load_gpr(dc
, rs2
);
2773 #ifdef TARGET_SPARC64
2774 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2776 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2778 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2779 or fold the comparison down to 32 bits and use movcond_i32. Choose
2781 c32
= tcg_temp_new_i32();
2783 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2785 TCGv_i64 c64
= tcg_temp_new_i64();
2786 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2787 tcg_gen_extrl_i64_i32(c32
, c64
);
2790 s1
= gen_load_fpr_F(dc
, rs
);
2791 s2
= gen_load_fpr_F(dc
, rd
);
2792 dst
= gen_dest_fpr_F(dc
);
2793 zero
= tcg_constant_i32(0);
2795 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2797 gen_store_fpr_F(dc
, rd
, dst
);
2800 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2802 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2803 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2804 gen_load_fpr_D(dc
, rs
),
2805 gen_load_fpr_D(dc
, rd
));
2806 gen_store_fpr_D(dc
, rd
, dst
);
2809 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2811 int qd
= QFPREG(rd
);
2812 int qs
= QFPREG(rs
);
2814 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2815 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2816 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2817 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2819 gen_update_fprs_dirty(dc
, qd
);
2822 #ifndef CONFIG_USER_ONLY
2823 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env tcg_env
)
2825 TCGv_i32 r_tl
= tcg_temp_new_i32();
2827 /* load env->tl into r_tl */
2828 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2830 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2831 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2833 /* calculate offset to current trap state from env->ts, reuse r_tl */
2834 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2835 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2837 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2839 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2840 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2841 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2846 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2847 int width
, bool cc
, bool left
)
2850 uint64_t amask
, tabl
, tabr
;
2851 int shift
, imask
, omask
;
2854 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2855 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2856 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2857 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2858 dc
->cc_op
= CC_OP_SUB
;
2861 /* Theory of operation: there are two tables, left and right (not to
2862 be confused with the left and right versions of the opcode). These
2863 are indexed by the low 3 bits of the inputs. To make things "easy",
2864 these tables are loaded into two constants, TABL and TABR below.
2865 The operation index = (input & imask) << shift calculates the index
2866 into the constant, while val = (table >> index) & omask calculates
2867 the value we're looking for. */
2874 tabl
= 0x80c0e0f0f8fcfeffULL
;
2875 tabr
= 0xff7f3f1f0f070301ULL
;
2877 tabl
= 0x0103070f1f3f7fffULL
;
2878 tabr
= 0xfffefcf8f0e0c080ULL
;
2898 tabl
= (2 << 2) | 3;
2899 tabr
= (3 << 2) | 1;
2901 tabl
= (1 << 2) | 3;
2902 tabr
= (3 << 2) | 2;
2909 lo1
= tcg_temp_new();
2910 lo2
= tcg_temp_new();
2911 tcg_gen_andi_tl(lo1
, s1
, imask
);
2912 tcg_gen_andi_tl(lo2
, s2
, imask
);
2913 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2914 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2916 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
2917 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
2918 tcg_gen_andi_tl(lo1
, lo1
, omask
);
2919 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2923 amask
&= 0xffffffffULL
;
2925 tcg_gen_andi_tl(s1
, s1
, amask
);
2926 tcg_gen_andi_tl(s2
, s2
, amask
);
2928 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2929 tcg_gen_and_tl(lo2
, lo2
, lo1
);
2930 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
2933 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2935 TCGv tmp
= tcg_temp_new();
2937 tcg_gen_add_tl(tmp
, s1
, s2
);
2938 tcg_gen_andi_tl(dst
, tmp
, -8);
2940 tcg_gen_neg_tl(tmp
, tmp
);
2942 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2945 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2949 t1
= tcg_temp_new();
2950 t2
= tcg_temp_new();
2951 shift
= tcg_temp_new();
2953 tcg_gen_andi_tl(shift
, gsr
, 7);
2954 tcg_gen_shli_tl(shift
, shift
, 3);
2955 tcg_gen_shl_tl(t1
, s1
, shift
);
2957 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2958 shift of (up to 63) followed by a constant shift of 1. */
2959 tcg_gen_xori_tl(shift
, shift
, 63);
2960 tcg_gen_shr_tl(t2
, s2
, shift
);
2961 tcg_gen_shri_tl(t2
, t2
, 1);
2963 tcg_gen_or_tl(dst
, t1
, t2
);
2967 #define CHECK_IU_FEATURE(dc, FEATURE) \
2968 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2970 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2971 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2974 /* before an instruction, dc->pc must be static */
2975 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2977 unsigned int opc
, rs1
, rs2
, rd
;
2978 TCGv cpu_src1
, cpu_src2
;
2979 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2980 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2983 opc
= GET_FIELD(insn
, 0, 1);
2984 rd
= GET_FIELD(insn
, 2, 6);
2987 case 0: /* branches/sethi */
2989 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2992 #ifdef TARGET_SPARC64
2993 case 0x1: /* V9 BPcc */
2997 target
= GET_FIELD_SP(insn
, 0, 18);
2998 target
= sign_extend(target
, 19);
3000 cc
= GET_FIELD_SP(insn
, 20, 21);
3002 do_branch(dc
, target
, insn
, 0);
3004 do_branch(dc
, target
, insn
, 1);
3009 case 0x3: /* V9 BPr */
3011 target
= GET_FIELD_SP(insn
, 0, 13) |
3012 (GET_FIELD_SP(insn
, 20, 21) << 14);
3013 target
= sign_extend(target
, 16);
3015 cpu_src1
= get_src1(dc
, insn
);
3016 do_branch_reg(dc
, target
, insn
, cpu_src1
);
3019 case 0x5: /* V9 FBPcc */
3021 int cc
= GET_FIELD_SP(insn
, 20, 21);
3022 if (gen_trap_ifnofpu(dc
)) {
3025 target
= GET_FIELD_SP(insn
, 0, 18);
3026 target
= sign_extend(target
, 19);
3028 do_fbranch(dc
, target
, insn
, cc
);
3032 case 0x7: /* CBN+x */
3037 case 0x2: /* BN+x */
3039 target
= GET_FIELD(insn
, 10, 31);
3040 target
= sign_extend(target
, 22);
3042 do_branch(dc
, target
, insn
, 0);
3045 case 0x6: /* FBN+x */
3047 if (gen_trap_ifnofpu(dc
)) {
3050 target
= GET_FIELD(insn
, 10, 31);
3051 target
= sign_extend(target
, 22);
3053 do_fbranch(dc
, target
, insn
, 0);
3056 case 0x4: /* SETHI */
3057 /* Special-case %g0 because that's the canonical nop. */
3059 uint32_t value
= GET_FIELD(insn
, 10, 31);
3060 TCGv t
= gen_dest_gpr(dc
, rd
);
3061 tcg_gen_movi_tl(t
, value
<< 10);
3062 gen_store_gpr(dc
, rd
, t
);
3065 case 0x0: /* UNIMPL */
3074 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
3075 TCGv o7
= gen_dest_gpr(dc
, 15);
3077 tcg_gen_movi_tl(o7
, dc
->pc
);
3078 gen_store_gpr(dc
, 15, o7
);
3081 #ifdef TARGET_SPARC64
3082 if (unlikely(AM_CHECK(dc
))) {
3083 target
&= 0xffffffffULL
;
3089 case 2: /* FPU & Logical Operations */
3091 unsigned int xop
= GET_FIELD(insn
, 7, 12);
3092 TCGv cpu_dst
= tcg_temp_new();
3095 if (xop
== 0x3a) { /* generate trap */
3096 int cond
= GET_FIELD(insn
, 3, 6);
3098 TCGLabel
*l1
= NULL
;
3109 /* Conditional trap. */
3111 #ifdef TARGET_SPARC64
3113 int cc
= GET_FIELD_SP(insn
, 11, 12);
3115 gen_compare(&cmp
, 0, cond
, dc
);
3116 } else if (cc
== 2) {
3117 gen_compare(&cmp
, 1, cond
, dc
);
3122 gen_compare(&cmp
, 0, cond
, dc
);
3124 l1
= gen_new_label();
3125 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
3126 cmp
.c1
, cmp
.c2
, l1
);
3129 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3130 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3132 /* Don't use the normal temporaries, as they may well have
3133 gone out of scope with the branch above. While we're
3134 doing that we might as well pre-truncate to 32-bit. */
3135 trap
= tcg_temp_new_i32();
3137 rs1
= GET_FIELD_SP(insn
, 14, 18);
3139 rs2
= GET_FIELD_SP(insn
, 0, 7);
3141 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
3142 /* Signal that the trap value is fully constant. */
3145 TCGv t1
= gen_load_gpr(dc
, rs1
);
3146 tcg_gen_trunc_tl_i32(trap
, t1
);
3147 tcg_gen_addi_i32(trap
, trap
, rs2
);
3151 rs2
= GET_FIELD_SP(insn
, 0, 4);
3152 t1
= gen_load_gpr(dc
, rs1
);
3153 t2
= gen_load_gpr(dc
, rs2
);
3154 tcg_gen_add_tl(t1
, t1
, t2
);
3155 tcg_gen_trunc_tl_i32(trap
, t1
);
3158 tcg_gen_andi_i32(trap
, trap
, mask
);
3159 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3162 gen_helper_raise_exception(tcg_env
, trap
);
3165 /* An unconditional trap ends the TB. */
3166 dc
->base
.is_jmp
= DISAS_NORETURN
;
3169 /* A conditional trap falls through to the next insn. */
3173 } else if (xop
== 0x28) {
3174 rs1
= GET_FIELD(insn
, 13, 17);
3177 #ifndef TARGET_SPARC64
3178 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3179 manual, rdy on the microSPARC
3181 case 0x0f: /* stbar in the SPARCv8 manual,
3182 rdy on the microSPARC II */
3183 case 0x10 ... 0x1f: /* implementation-dependent in the
3184 SPARCv8 manual, rdy on the
3187 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
3188 TCGv t
= gen_dest_gpr(dc
, rd
);
3189 /* Read Asr17 for a Leon3 monoprocessor */
3190 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
3191 gen_store_gpr(dc
, rd
, t
);
3195 gen_store_gpr(dc
, rd
, cpu_y
);
3197 #ifdef TARGET_SPARC64
3198 case 0x2: /* V9 rdccr */
3200 gen_helper_rdccr(cpu_dst
, tcg_env
);
3201 gen_store_gpr(dc
, rd
, cpu_dst
);
3203 case 0x3: /* V9 rdasi */
3204 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
3205 gen_store_gpr(dc
, rd
, cpu_dst
);
3207 case 0x4: /* V9 rdtick */
3212 r_tickptr
= tcg_temp_new_ptr();
3213 r_const
= tcg_constant_i32(dc
->mem_idx
);
3214 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
3215 offsetof(CPUSPARCState
, tick
));
3216 if (translator_io_start(&dc
->base
)) {
3217 dc
->base
.is_jmp
= DISAS_EXIT
;
3219 gen_helper_tick_get_count(cpu_dst
, tcg_env
, r_tickptr
,
3221 gen_store_gpr(dc
, rd
, cpu_dst
);
3224 case 0x5: /* V9 rdpc */
3226 TCGv t
= gen_dest_gpr(dc
, rd
);
3227 if (unlikely(AM_CHECK(dc
))) {
3228 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
3230 tcg_gen_movi_tl(t
, dc
->pc
);
3232 gen_store_gpr(dc
, rd
, t
);
3235 case 0x6: /* V9 rdfprs */
3236 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
3237 gen_store_gpr(dc
, rd
, cpu_dst
);
3239 case 0xf: /* V9 membar */
3240 break; /* no effect */
3241 case 0x13: /* Graphics Status */
3242 if (gen_trap_ifnofpu(dc
)) {
3245 gen_store_gpr(dc
, rd
, cpu_gsr
);
3247 case 0x16: /* Softint */
3248 tcg_gen_ld32s_tl(cpu_dst
, tcg_env
,
3249 offsetof(CPUSPARCState
, softint
));
3250 gen_store_gpr(dc
, rd
, cpu_dst
);
3252 case 0x17: /* Tick compare */
3253 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
3255 case 0x18: /* System tick */
3260 r_tickptr
= tcg_temp_new_ptr();
3261 r_const
= tcg_constant_i32(dc
->mem_idx
);
3262 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
3263 offsetof(CPUSPARCState
, stick
));
3264 if (translator_io_start(&dc
->base
)) {
3265 dc
->base
.is_jmp
= DISAS_EXIT
;
3267 gen_helper_tick_get_count(cpu_dst
, tcg_env
, r_tickptr
,
3269 gen_store_gpr(dc
, rd
, cpu_dst
);
3272 case 0x19: /* System tick compare */
3273 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
3275 case 0x1a: /* UltraSPARC-T1 Strand status */
3276 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3277 * this ASR as impl. dep
3279 CHECK_IU_FEATURE(dc
, HYPV
);
3281 TCGv t
= gen_dest_gpr(dc
, rd
);
3282 tcg_gen_movi_tl(t
, 1UL);
3283 gen_store_gpr(dc
, rd
, t
);
3286 case 0x10: /* Performance Control */
3287 case 0x11: /* Performance Instrumentation Counter */
3288 case 0x12: /* Dispatch Control */
3289 case 0x14: /* Softint set, WO */
3290 case 0x15: /* Softint clear, WO */
3295 #if !defined(CONFIG_USER_ONLY)
3296 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
3297 #ifndef TARGET_SPARC64
3298 if (!supervisor(dc
)) {
3302 gen_helper_rdpsr(cpu_dst
, tcg_env
);
3304 CHECK_IU_FEATURE(dc
, HYPV
);
3305 if (!hypervisor(dc
))
3307 rs1
= GET_FIELD(insn
, 13, 17);
3310 tcg_gen_ld_i64(cpu_dst
, tcg_env
,
3311 offsetof(CPUSPARCState
, hpstate
));
3314 // gen_op_rdhtstate();
3317 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
3320 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
3323 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
3325 case 31: // hstick_cmpr
3326 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
3332 gen_store_gpr(dc
, rd
, cpu_dst
);
3334 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
3335 if (!supervisor(dc
)) {
3338 cpu_tmp0
= tcg_temp_new();
3339 #ifdef TARGET_SPARC64
3340 rs1
= GET_FIELD(insn
, 13, 17);
3346 r_tsptr
= tcg_temp_new_ptr();
3347 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
3348 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3349 offsetof(trap_state
, tpc
));
3356 r_tsptr
= tcg_temp_new_ptr();
3357 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
3358 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3359 offsetof(trap_state
, tnpc
));
3366 r_tsptr
= tcg_temp_new_ptr();
3367 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
3368 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3369 offsetof(trap_state
, tstate
));
3374 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3376 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
3377 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
3378 offsetof(trap_state
, tt
));
3386 r_tickptr
= tcg_temp_new_ptr();
3387 r_const
= tcg_constant_i32(dc
->mem_idx
);
3388 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
3389 offsetof(CPUSPARCState
, tick
));
3390 if (translator_io_start(&dc
->base
)) {
3391 dc
->base
.is_jmp
= DISAS_EXIT
;
3393 gen_helper_tick_get_count(cpu_tmp0
, tcg_env
,
3394 r_tickptr
, r_const
);
3398 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
3401 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3402 offsetof(CPUSPARCState
, pstate
));
3405 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3406 offsetof(CPUSPARCState
, tl
));
3409 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3410 offsetof(CPUSPARCState
, psrpil
));
3413 gen_helper_rdcwp(cpu_tmp0
, tcg_env
);
3416 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3417 offsetof(CPUSPARCState
, cansave
));
3419 case 11: // canrestore
3420 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3421 offsetof(CPUSPARCState
, canrestore
));
3423 case 12: // cleanwin
3424 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3425 offsetof(CPUSPARCState
, cleanwin
));
3427 case 13: // otherwin
3428 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3429 offsetof(CPUSPARCState
, otherwin
));
3432 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3433 offsetof(CPUSPARCState
, wstate
));
3435 case 16: // UA2005 gl
3436 CHECK_IU_FEATURE(dc
, GL
);
3437 tcg_gen_ld32s_tl(cpu_tmp0
, tcg_env
,
3438 offsetof(CPUSPARCState
, gl
));
3440 case 26: // UA2005 strand status
3441 CHECK_IU_FEATURE(dc
, HYPV
);
3442 if (!hypervisor(dc
))
3444 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
3447 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
3454 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
3456 gen_store_gpr(dc
, rd
, cpu_tmp0
);
3459 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3460 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
3461 #ifdef TARGET_SPARC64
3462 gen_helper_flushw(tcg_env
);
3464 if (!supervisor(dc
))
3466 gen_store_gpr(dc
, rd
, cpu_tbr
);
3470 } else if (xop
== 0x34) { /* FPU Operations */
3471 if (gen_trap_ifnofpu(dc
)) {
3474 gen_op_clear_ieee_excp_and_FTT();
3475 rs1
= GET_FIELD(insn
, 13, 17);
3476 rs2
= GET_FIELD(insn
, 27, 31);
3477 xop
= GET_FIELD(insn
, 18, 26);
3480 case 0x1: /* fmovs */
3481 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3482 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3484 case 0x5: /* fnegs */
3485 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
3487 case 0x9: /* fabss */
3488 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
3490 case 0x29: /* fsqrts */
3491 CHECK_FPU_FEATURE(dc
, FSQRT
);
3492 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3494 case 0x2a: /* fsqrtd */
3495 CHECK_FPU_FEATURE(dc
, FSQRT
);
3496 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3498 case 0x2b: /* fsqrtq */
3499 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3500 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3502 case 0x41: /* fadds */
3503 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3505 case 0x42: /* faddd */
3506 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3508 case 0x43: /* faddq */
3509 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3510 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3512 case 0x45: /* fsubs */
3513 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3515 case 0x46: /* fsubd */
3516 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3518 case 0x47: /* fsubq */
3519 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3520 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3522 case 0x49: /* fmuls */
3523 CHECK_FPU_FEATURE(dc
, FMUL
);
3524 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3526 case 0x4a: /* fmuld */
3527 CHECK_FPU_FEATURE(dc
, FMUL
);
3528 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3530 case 0x4b: /* fmulq */
3531 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3532 CHECK_FPU_FEATURE(dc
, FMUL
);
3533 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3535 case 0x4d: /* fdivs */
3536 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3538 case 0x4e: /* fdivd */
3539 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3541 case 0x4f: /* fdivq */
3542 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3543 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3545 case 0x69: /* fsmuld */
3546 CHECK_FPU_FEATURE(dc
, FSMULD
);
3547 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3549 case 0x6e: /* fdmulq */
3550 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3551 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3553 case 0xc4: /* fitos */
3554 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3556 case 0xc6: /* fdtos */
3557 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3559 case 0xc7: /* fqtos */
3560 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3561 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3563 case 0xc8: /* fitod */
3564 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3566 case 0xc9: /* fstod */
3567 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3569 case 0xcb: /* fqtod */
3570 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3571 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3573 case 0xcc: /* fitoq */
3574 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3575 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3577 case 0xcd: /* fstoq */
3578 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3579 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3581 case 0xce: /* fdtoq */
3582 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3583 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3585 case 0xd1: /* fstoi */
3586 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3588 case 0xd2: /* fdtoi */
3589 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3591 case 0xd3: /* fqtoi */
3592 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3593 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3595 #ifdef TARGET_SPARC64
3596 case 0x2: /* V9 fmovd */
3597 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3598 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3600 case 0x3: /* V9 fmovq */
3601 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3602 gen_move_Q(dc
, rd
, rs2
);
3604 case 0x6: /* V9 fnegd */
3605 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3607 case 0x7: /* V9 fnegq */
3608 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3609 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3611 case 0xa: /* V9 fabsd */
3612 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3614 case 0xb: /* V9 fabsq */
3615 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3616 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3618 case 0x81: /* V9 fstox */
3619 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3621 case 0x82: /* V9 fdtox */
3622 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3624 case 0x83: /* V9 fqtox */
3625 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3626 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3628 case 0x84: /* V9 fxtos */
3629 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3631 case 0x88: /* V9 fxtod */
3632 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3634 case 0x8c: /* V9 fxtoq */
3635 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3636 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3642 } else if (xop
== 0x35) { /* FPU Operations */
3643 #ifdef TARGET_SPARC64
3646 if (gen_trap_ifnofpu(dc
)) {
3649 gen_op_clear_ieee_excp_and_FTT();
3650 rs1
= GET_FIELD(insn
, 13, 17);
3651 rs2
= GET_FIELD(insn
, 27, 31);
3652 xop
= GET_FIELD(insn
, 18, 26);
3654 #ifdef TARGET_SPARC64
3658 cond = GET_FIELD_SP(insn, 10, 12); \
3659 cpu_src1 = get_src1(dc, insn); \
3660 gen_compare_reg(&cmp, cond, cpu_src1); \
3661 gen_fmov##sz(dc, &cmp, rd, rs2); \
3664 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3667 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3670 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3671 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3678 #ifdef TARGET_SPARC64
3679 #define FMOVCC(fcc, sz) \
3682 cond = GET_FIELD_SP(insn, 14, 17); \
3683 gen_fcompare(&cmp, fcc, cond); \
3684 gen_fmov##sz(dc, &cmp, rd, rs2); \
3687 case 0x001: /* V9 fmovscc %fcc0 */
3690 case 0x002: /* V9 fmovdcc %fcc0 */
3693 case 0x003: /* V9 fmovqcc %fcc0 */
3694 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3697 case 0x041: /* V9 fmovscc %fcc1 */
3700 case 0x042: /* V9 fmovdcc %fcc1 */
3703 case 0x043: /* V9 fmovqcc %fcc1 */
3704 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3707 case 0x081: /* V9 fmovscc %fcc2 */
3710 case 0x082: /* V9 fmovdcc %fcc2 */
3713 case 0x083: /* V9 fmovqcc %fcc2 */
3714 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3717 case 0x0c1: /* V9 fmovscc %fcc3 */
3720 case 0x0c2: /* V9 fmovdcc %fcc3 */
3723 case 0x0c3: /* V9 fmovqcc %fcc3 */
3724 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3728 #define FMOVCC(xcc, sz) \
3731 cond = GET_FIELD_SP(insn, 14, 17); \
3732 gen_compare(&cmp, xcc, cond, dc); \
3733 gen_fmov##sz(dc, &cmp, rd, rs2); \
3736 case 0x101: /* V9 fmovscc %icc */
3739 case 0x102: /* V9 fmovdcc %icc */
3742 case 0x103: /* V9 fmovqcc %icc */
3743 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3746 case 0x181: /* V9 fmovscc %xcc */
3749 case 0x182: /* V9 fmovdcc %xcc */
3752 case 0x183: /* V9 fmovqcc %xcc */
3753 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3758 case 0x51: /* fcmps, V9 %fcc */
3759 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3760 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3761 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3763 case 0x52: /* fcmpd, V9 %fcc */
3764 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3765 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3766 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3768 case 0x53: /* fcmpq, V9 %fcc */
3769 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3770 gen_op_load_fpr_QT0(QFPREG(rs1
));
3771 gen_op_load_fpr_QT1(QFPREG(rs2
));
3772 gen_op_fcmpq(rd
& 3);
3774 case 0x55: /* fcmpes, V9 %fcc */
3775 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3776 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3777 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3779 case 0x56: /* fcmped, V9 %fcc */
3780 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3781 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3782 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3784 case 0x57: /* fcmpeq, V9 %fcc */
3785 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3786 gen_op_load_fpr_QT0(QFPREG(rs1
));
3787 gen_op_load_fpr_QT1(QFPREG(rs2
));
3788 gen_op_fcmpeq(rd
& 3);
3793 } else if (xop
== 0x2) {
3794 TCGv dst
= gen_dest_gpr(dc
, rd
);
3795 rs1
= GET_FIELD(insn
, 13, 17);
3797 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3798 if (IS_IMM
) { /* immediate */
3799 simm
= GET_FIELDs(insn
, 19, 31);
3800 tcg_gen_movi_tl(dst
, simm
);
3801 gen_store_gpr(dc
, rd
, dst
);
3802 } else { /* register */
3803 rs2
= GET_FIELD(insn
, 27, 31);
3805 tcg_gen_movi_tl(dst
, 0);
3806 gen_store_gpr(dc
, rd
, dst
);
3808 cpu_src2
= gen_load_gpr(dc
, rs2
);
3809 gen_store_gpr(dc
, rd
, cpu_src2
);
3813 cpu_src1
= get_src1(dc
, insn
);
3814 if (IS_IMM
) { /* immediate */
3815 simm
= GET_FIELDs(insn
, 19, 31);
3816 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3817 gen_store_gpr(dc
, rd
, dst
);
3818 } else { /* register */
3819 rs2
= GET_FIELD(insn
, 27, 31);
3821 /* mov shortcut: or x, %g0, y -> mov x, y */
3822 gen_store_gpr(dc
, rd
, cpu_src1
);
3824 cpu_src2
= gen_load_gpr(dc
, rs2
);
3825 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3826 gen_store_gpr(dc
, rd
, dst
);
3830 #ifdef TARGET_SPARC64
3831 } else if (xop
== 0x25) { /* sll, V9 sllx */
3832 cpu_src1
= get_src1(dc
, insn
);
3833 if (IS_IMM
) { /* immediate */
3834 simm
= GET_FIELDs(insn
, 20, 31);
3835 if (insn
& (1 << 12)) {
3836 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3838 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3840 } else { /* register */
3841 rs2
= GET_FIELD(insn
, 27, 31);
3842 cpu_src2
= gen_load_gpr(dc
, rs2
);
3843 cpu_tmp0
= tcg_temp_new();
3844 if (insn
& (1 << 12)) {
3845 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3847 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3849 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3851 gen_store_gpr(dc
, rd
, cpu_dst
);
3852 } else if (xop
== 0x26) { /* srl, V9 srlx */
3853 cpu_src1
= get_src1(dc
, insn
);
3854 if (IS_IMM
) { /* immediate */
3855 simm
= GET_FIELDs(insn
, 20, 31);
3856 if (insn
& (1 << 12)) {
3857 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3859 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3860 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3862 } else { /* register */
3863 rs2
= GET_FIELD(insn
, 27, 31);
3864 cpu_src2
= gen_load_gpr(dc
, rs2
);
3865 cpu_tmp0
= tcg_temp_new();
3866 if (insn
& (1 << 12)) {
3867 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3868 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3870 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3871 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3872 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3875 gen_store_gpr(dc
, rd
, cpu_dst
);
3876 } else if (xop
== 0x27) { /* sra, V9 srax */
3877 cpu_src1
= get_src1(dc
, insn
);
3878 if (IS_IMM
) { /* immediate */
3879 simm
= GET_FIELDs(insn
, 20, 31);
3880 if (insn
& (1 << 12)) {
3881 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3883 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3884 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3886 } else { /* register */
3887 rs2
= GET_FIELD(insn
, 27, 31);
3888 cpu_src2
= gen_load_gpr(dc
, rs2
);
3889 cpu_tmp0
= tcg_temp_new();
3890 if (insn
& (1 << 12)) {
3891 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3892 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3894 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3895 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3896 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3899 gen_store_gpr(dc
, rd
, cpu_dst
);
3901 } else if (xop
< 0x36) {
3903 cpu_src1
= get_src1(dc
, insn
);
3904 cpu_src2
= get_src2(dc
, insn
);
3905 switch (xop
& ~0x10) {
3908 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3909 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3910 dc
->cc_op
= CC_OP_ADD
;
3912 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3916 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3918 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3919 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3920 dc
->cc_op
= CC_OP_LOGIC
;
3924 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3926 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3927 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3928 dc
->cc_op
= CC_OP_LOGIC
;
3932 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3934 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3935 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3936 dc
->cc_op
= CC_OP_LOGIC
;
3941 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3942 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3943 dc
->cc_op
= CC_OP_SUB
;
3945 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3948 case 0x5: /* andn */
3949 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3951 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3952 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3953 dc
->cc_op
= CC_OP_LOGIC
;
3957 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3959 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3960 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3961 dc
->cc_op
= CC_OP_LOGIC
;
3964 case 0x7: /* xorn */
3965 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3967 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3968 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3969 dc
->cc_op
= CC_OP_LOGIC
;
3972 case 0x8: /* addx, V9 addc */
3973 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3976 #ifdef TARGET_SPARC64
3977 case 0x9: /* V9 mulx */
3978 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3981 case 0xa: /* umul */
3982 CHECK_IU_FEATURE(dc
, MUL
);
3983 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3985 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3986 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3987 dc
->cc_op
= CC_OP_LOGIC
;
3990 case 0xb: /* smul */
3991 CHECK_IU_FEATURE(dc
, MUL
);
3992 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3994 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3995 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3996 dc
->cc_op
= CC_OP_LOGIC
;
3999 case 0xc: /* subx, V9 subc */
4000 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4003 #ifdef TARGET_SPARC64
4004 case 0xd: /* V9 udivx */
4005 gen_helper_udivx(cpu_dst
, tcg_env
, cpu_src1
, cpu_src2
);
4008 case 0xe: /* udiv */
4009 CHECK_IU_FEATURE(dc
, DIV
);
4011 gen_helper_udiv_cc(cpu_dst
, tcg_env
, cpu_src1
,
4013 dc
->cc_op
= CC_OP_DIV
;
4015 gen_helper_udiv(cpu_dst
, tcg_env
, cpu_src1
,
4019 case 0xf: /* sdiv */
4020 CHECK_IU_FEATURE(dc
, DIV
);
4022 gen_helper_sdiv_cc(cpu_dst
, tcg_env
, cpu_src1
,
4024 dc
->cc_op
= CC_OP_DIV
;
4026 gen_helper_sdiv(cpu_dst
, tcg_env
, cpu_src1
,
4033 gen_store_gpr(dc
, rd
, cpu_dst
);
4035 cpu_src1
= get_src1(dc
, insn
);
4036 cpu_src2
= get_src2(dc
, insn
);
4038 case 0x20: /* taddcc */
4039 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4040 gen_store_gpr(dc
, rd
, cpu_dst
);
4041 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
4042 dc
->cc_op
= CC_OP_TADD
;
4044 case 0x21: /* tsubcc */
4045 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4046 gen_store_gpr(dc
, rd
, cpu_dst
);
4047 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
4048 dc
->cc_op
= CC_OP_TSUB
;
4050 case 0x22: /* taddcctv */
4051 gen_helper_taddcctv(cpu_dst
, tcg_env
,
4052 cpu_src1
, cpu_src2
);
4053 gen_store_gpr(dc
, rd
, cpu_dst
);
4054 dc
->cc_op
= CC_OP_TADDTV
;
4056 case 0x23: /* tsubcctv */
4057 gen_helper_tsubcctv(cpu_dst
, tcg_env
,
4058 cpu_src1
, cpu_src2
);
4059 gen_store_gpr(dc
, rd
, cpu_dst
);
4060 dc
->cc_op
= CC_OP_TSUBTV
;
4062 case 0x24: /* mulscc */
4064 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
4065 gen_store_gpr(dc
, rd
, cpu_dst
);
4066 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4067 dc
->cc_op
= CC_OP_ADD
;
4069 #ifndef TARGET_SPARC64
4070 case 0x25: /* sll */
4071 if (IS_IMM
) { /* immediate */
4072 simm
= GET_FIELDs(insn
, 20, 31);
4073 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4074 } else { /* register */
4075 cpu_tmp0
= tcg_temp_new();
4076 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4077 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4079 gen_store_gpr(dc
, rd
, cpu_dst
);
4081 case 0x26: /* srl */
4082 if (IS_IMM
) { /* immediate */
4083 simm
= GET_FIELDs(insn
, 20, 31);
4084 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4085 } else { /* register */
4086 cpu_tmp0
= tcg_temp_new();
4087 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4088 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4090 gen_store_gpr(dc
, rd
, cpu_dst
);
4092 case 0x27: /* sra */
4093 if (IS_IMM
) { /* immediate */
4094 simm
= GET_FIELDs(insn
, 20, 31);
4095 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4096 } else { /* register */
4097 cpu_tmp0
= tcg_temp_new();
4098 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4099 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4101 gen_store_gpr(dc
, rd
, cpu_dst
);
4106 cpu_tmp0
= tcg_temp_new();
4109 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4110 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
4112 #ifndef TARGET_SPARC64
4113 case 0x01 ... 0x0f: /* undefined in the
4117 case 0x10 ... 0x1f: /* implementation-dependent
4121 if ((rd
== 0x13) && (dc
->def
->features
&
4122 CPU_FEATURE_POWERDOWN
)) {
4123 /* LEON3 power-down */
4125 gen_helper_power_down(tcg_env
);
4129 case 0x2: /* V9 wrccr */
4130 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4131 gen_helper_wrccr(tcg_env
, cpu_tmp0
);
4132 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4133 dc
->cc_op
= CC_OP_FLAGS
;
4135 case 0x3: /* V9 wrasi */
4136 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4137 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
4138 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4139 offsetof(CPUSPARCState
, asi
));
4141 * End TB to notice changed ASI.
4142 * TODO: Could notice src1 = %g0 and IS_IMM,
4143 * update DisasContext and not exit the TB.
4147 tcg_gen_lookup_and_goto_ptr();
4148 dc
->base
.is_jmp
= DISAS_NORETURN
;
4150 case 0x6: /* V9 wrfprs */
4151 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4152 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
4156 tcg_gen_exit_tb(NULL
, 0);
4157 dc
->base
.is_jmp
= DISAS_NORETURN
;
4159 case 0xf: /* V9 sir, nop if user */
4160 #if !defined(CONFIG_USER_ONLY)
4161 if (supervisor(dc
)) {
4166 case 0x13: /* Graphics Status */
4167 if (gen_trap_ifnofpu(dc
)) {
4170 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
4172 case 0x14: /* Softint set */
4173 if (!supervisor(dc
))
4175 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4176 gen_helper_set_softint(tcg_env
, cpu_tmp0
);
4178 case 0x15: /* Softint clear */
4179 if (!supervisor(dc
))
4181 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4182 gen_helper_clear_softint(tcg_env
, cpu_tmp0
);
4184 case 0x16: /* Softint write */
4185 if (!supervisor(dc
))
4187 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4188 gen_helper_write_softint(tcg_env
, cpu_tmp0
);
4190 case 0x17: /* Tick compare */
4191 #if !defined(CONFIG_USER_ONLY)
4192 if (!supervisor(dc
))
4198 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
4200 r_tickptr
= tcg_temp_new_ptr();
4201 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
4202 offsetof(CPUSPARCState
, tick
));
4203 translator_io_start(&dc
->base
);
4204 gen_helper_tick_set_limit(r_tickptr
,
4206 /* End TB to handle timer interrupt */
4207 dc
->base
.is_jmp
= DISAS_EXIT
;
4210 case 0x18: /* System tick */
4211 #if !defined(CONFIG_USER_ONLY)
4212 if (!supervisor(dc
))
4218 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
4220 r_tickptr
= tcg_temp_new_ptr();
4221 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
4222 offsetof(CPUSPARCState
, stick
));
4223 translator_io_start(&dc
->base
);
4224 gen_helper_tick_set_count(r_tickptr
,
4226 /* End TB to handle timer interrupt */
4227 dc
->base
.is_jmp
= DISAS_EXIT
;
4230 case 0x19: /* System tick compare */
4231 #if !defined(CONFIG_USER_ONLY)
4232 if (!supervisor(dc
))
4238 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
4240 r_tickptr
= tcg_temp_new_ptr();
4241 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
4242 offsetof(CPUSPARCState
, stick
));
4243 translator_io_start(&dc
->base
);
4244 gen_helper_tick_set_limit(r_tickptr
,
4246 /* End TB to handle timer interrupt */
4247 dc
->base
.is_jmp
= DISAS_EXIT
;
4251 case 0x10: /* Performance Control */
4252 case 0x11: /* Performance Instrumentation
4254 case 0x12: /* Dispatch Control */
4261 #if !defined(CONFIG_USER_ONLY)
4262 case 0x31: /* wrpsr, V9 saved, restored */
4264 if (!supervisor(dc
))
4266 #ifdef TARGET_SPARC64
4269 gen_helper_saved(tcg_env
);
4272 gen_helper_restored(tcg_env
);
4274 case 2: /* UA2005 allclean */
4275 case 3: /* UA2005 otherw */
4276 case 4: /* UA2005 normalw */
4277 case 5: /* UA2005 invalw */
4283 cpu_tmp0
= tcg_temp_new();
4284 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4285 gen_helper_wrpsr(tcg_env
, cpu_tmp0
);
4286 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4287 dc
->cc_op
= CC_OP_FLAGS
;
4290 tcg_gen_exit_tb(NULL
, 0);
4291 dc
->base
.is_jmp
= DISAS_NORETURN
;
4295 case 0x32: /* wrwim, V9 wrpr */
4297 if (!supervisor(dc
))
4299 cpu_tmp0
= tcg_temp_new();
4300 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4301 #ifdef TARGET_SPARC64
4307 r_tsptr
= tcg_temp_new_ptr();
4308 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
4309 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4310 offsetof(trap_state
, tpc
));
4317 r_tsptr
= tcg_temp_new_ptr();
4318 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
4319 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4320 offsetof(trap_state
, tnpc
));
4327 r_tsptr
= tcg_temp_new_ptr();
4328 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
4329 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4330 offsetof(trap_state
,
4338 r_tsptr
= tcg_temp_new_ptr();
4339 gen_load_trap_state_at_tl(r_tsptr
, tcg_env
);
4340 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
4341 offsetof(trap_state
, tt
));
4348 r_tickptr
= tcg_temp_new_ptr();
4349 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
4350 offsetof(CPUSPARCState
, tick
));
4351 translator_io_start(&dc
->base
);
4352 gen_helper_tick_set_count(r_tickptr
,
4354 /* End TB to handle timer interrupt */
4355 dc
->base
.is_jmp
= DISAS_EXIT
;
4359 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
4363 if (translator_io_start(&dc
->base
)) {
4364 dc
->base
.is_jmp
= DISAS_EXIT
;
4366 gen_helper_wrpstate(tcg_env
, cpu_tmp0
);
4367 dc
->npc
= DYNAMIC_PC
;
4371 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4372 offsetof(CPUSPARCState
, tl
));
4373 dc
->npc
= DYNAMIC_PC
;
4376 if (translator_io_start(&dc
->base
)) {
4377 dc
->base
.is_jmp
= DISAS_EXIT
;
4379 gen_helper_wrpil(tcg_env
, cpu_tmp0
);
4382 gen_helper_wrcwp(tcg_env
, cpu_tmp0
);
4385 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4386 offsetof(CPUSPARCState
,
4389 case 11: // canrestore
4390 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4391 offsetof(CPUSPARCState
,
4394 case 12: // cleanwin
4395 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4396 offsetof(CPUSPARCState
,
4399 case 13: // otherwin
4400 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4401 offsetof(CPUSPARCState
,
4405 tcg_gen_st32_tl(cpu_tmp0
, tcg_env
,
4406 offsetof(CPUSPARCState
,
4409 case 16: // UA2005 gl
4410 CHECK_IU_FEATURE(dc
, GL
);
4411 gen_helper_wrgl(tcg_env
, cpu_tmp0
);
4413 case 26: // UA2005 strand status
4414 CHECK_IU_FEATURE(dc
, HYPV
);
4415 if (!hypervisor(dc
))
4417 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
4423 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
4424 if (dc
->def
->nwindows
!= 32) {
4425 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
4426 (1 << dc
->def
->nwindows
) - 1);
4431 case 0x33: /* wrtbr, UA2005 wrhpr */
4433 #ifndef TARGET_SPARC64
4434 if (!supervisor(dc
))
4436 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
4438 CHECK_IU_FEATURE(dc
, HYPV
);
4439 if (!hypervisor(dc
))
4441 cpu_tmp0
= tcg_temp_new();
4442 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4445 tcg_gen_st_i64(cpu_tmp0
, tcg_env
,
4446 offsetof(CPUSPARCState
,
4450 tcg_gen_exit_tb(NULL
, 0);
4451 dc
->base
.is_jmp
= DISAS_NORETURN
;
4454 // XXX gen_op_wrhtstate();
4457 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
4460 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
4462 case 31: // hstick_cmpr
4466 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
4467 r_tickptr
= tcg_temp_new_ptr();
4468 tcg_gen_ld_ptr(r_tickptr
, tcg_env
,
4469 offsetof(CPUSPARCState
, hstick
));
4470 translator_io_start(&dc
->base
);
4471 gen_helper_tick_set_limit(r_tickptr
,
4473 /* End TB to handle timer interrupt */
4474 dc
->base
.is_jmp
= DISAS_EXIT
;
4477 case 6: // hver readonly
4485 #ifdef TARGET_SPARC64
4486 case 0x2c: /* V9 movcc */
4488 int cc
= GET_FIELD_SP(insn
, 11, 12);
4489 int cond
= GET_FIELD_SP(insn
, 14, 17);
4493 if (insn
& (1 << 18)) {
4495 gen_compare(&cmp
, 0, cond
, dc
);
4496 } else if (cc
== 2) {
4497 gen_compare(&cmp
, 1, cond
, dc
);
4502 gen_fcompare(&cmp
, cc
, cond
);
4505 /* The get_src2 above loaded the normal 13-bit
4506 immediate field, not the 11-bit field we have
4507 in movcc. But it did handle the reg case. */
4509 simm
= GET_FIELD_SPs(insn
, 0, 10);
4510 tcg_gen_movi_tl(cpu_src2
, simm
);
4513 dst
= gen_load_gpr(dc
, rd
);
4514 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4517 gen_store_gpr(dc
, rd
, dst
);
4520 case 0x2d: /* V9 sdivx */
4521 gen_helper_sdivx(cpu_dst
, tcg_env
, cpu_src1
, cpu_src2
);
4522 gen_store_gpr(dc
, rd
, cpu_dst
);
4524 case 0x2e: /* V9 popc */
4525 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4526 gen_store_gpr(dc
, rd
, cpu_dst
);
4528 case 0x2f: /* V9 movr */
4530 int cond
= GET_FIELD_SP(insn
, 10, 12);
4534 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4536 /* The get_src2 above loaded the normal 13-bit
4537 immediate field, not the 10-bit field we have
4538 in movr. But it did handle the reg case. */
4540 simm
= GET_FIELD_SPs(insn
, 0, 9);
4541 tcg_gen_movi_tl(cpu_src2
, simm
);
4544 dst
= gen_load_gpr(dc
, rd
);
4545 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4548 gen_store_gpr(dc
, rd
, dst
);
4556 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4557 #ifdef TARGET_SPARC64
4558 int opf
= GET_FIELD_SP(insn
, 5, 13);
4559 rs1
= GET_FIELD(insn
, 13, 17);
4560 rs2
= GET_FIELD(insn
, 27, 31);
4561 if (gen_trap_ifnofpu(dc
)) {
4566 case 0x000: /* VIS I edge8cc */
4567 CHECK_FPU_FEATURE(dc
, VIS1
);
4568 cpu_src1
= gen_load_gpr(dc
, rs1
);
4569 cpu_src2
= gen_load_gpr(dc
, rs2
);
4570 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4571 gen_store_gpr(dc
, rd
, cpu_dst
);
4573 case 0x001: /* VIS II edge8n */
4574 CHECK_FPU_FEATURE(dc
, VIS2
);
4575 cpu_src1
= gen_load_gpr(dc
, rs1
);
4576 cpu_src2
= gen_load_gpr(dc
, rs2
);
4577 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4578 gen_store_gpr(dc
, rd
, cpu_dst
);
4580 case 0x002: /* VIS I edge8lcc */
4581 CHECK_FPU_FEATURE(dc
, VIS1
);
4582 cpu_src1
= gen_load_gpr(dc
, rs1
);
4583 cpu_src2
= gen_load_gpr(dc
, rs2
);
4584 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4585 gen_store_gpr(dc
, rd
, cpu_dst
);
4587 case 0x003: /* VIS II edge8ln */
4588 CHECK_FPU_FEATURE(dc
, VIS2
);
4589 cpu_src1
= gen_load_gpr(dc
, rs1
);
4590 cpu_src2
= gen_load_gpr(dc
, rs2
);
4591 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4592 gen_store_gpr(dc
, rd
, cpu_dst
);
4594 case 0x004: /* VIS I edge16cc */
4595 CHECK_FPU_FEATURE(dc
, VIS1
);
4596 cpu_src1
= gen_load_gpr(dc
, rs1
);
4597 cpu_src2
= gen_load_gpr(dc
, rs2
);
4598 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4599 gen_store_gpr(dc
, rd
, cpu_dst
);
4601 case 0x005: /* VIS II edge16n */
4602 CHECK_FPU_FEATURE(dc
, VIS2
);
4603 cpu_src1
= gen_load_gpr(dc
, rs1
);
4604 cpu_src2
= gen_load_gpr(dc
, rs2
);
4605 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4606 gen_store_gpr(dc
, rd
, cpu_dst
);
4608 case 0x006: /* VIS I edge16lcc */
4609 CHECK_FPU_FEATURE(dc
, VIS1
);
4610 cpu_src1
= gen_load_gpr(dc
, rs1
);
4611 cpu_src2
= gen_load_gpr(dc
, rs2
);
4612 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4613 gen_store_gpr(dc
, rd
, cpu_dst
);
4615 case 0x007: /* VIS II edge16ln */
4616 CHECK_FPU_FEATURE(dc
, VIS2
);
4617 cpu_src1
= gen_load_gpr(dc
, rs1
);
4618 cpu_src2
= gen_load_gpr(dc
, rs2
);
4619 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4620 gen_store_gpr(dc
, rd
, cpu_dst
);
4622 case 0x008: /* VIS I edge32cc */
4623 CHECK_FPU_FEATURE(dc
, VIS1
);
4624 cpu_src1
= gen_load_gpr(dc
, rs1
);
4625 cpu_src2
= gen_load_gpr(dc
, rs2
);
4626 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4627 gen_store_gpr(dc
, rd
, cpu_dst
);
4629 case 0x009: /* VIS II edge32n */
4630 CHECK_FPU_FEATURE(dc
, VIS2
);
4631 cpu_src1
= gen_load_gpr(dc
, rs1
);
4632 cpu_src2
= gen_load_gpr(dc
, rs2
);
4633 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4634 gen_store_gpr(dc
, rd
, cpu_dst
);
4636 case 0x00a: /* VIS I edge32lcc */
4637 CHECK_FPU_FEATURE(dc
, VIS1
);
4638 cpu_src1
= gen_load_gpr(dc
, rs1
);
4639 cpu_src2
= gen_load_gpr(dc
, rs2
);
4640 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4641 gen_store_gpr(dc
, rd
, cpu_dst
);
4643 case 0x00b: /* VIS II edge32ln */
4644 CHECK_FPU_FEATURE(dc
, VIS2
);
4645 cpu_src1
= gen_load_gpr(dc
, rs1
);
4646 cpu_src2
= gen_load_gpr(dc
, rs2
);
4647 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4648 gen_store_gpr(dc
, rd
, cpu_dst
);
4650 case 0x010: /* VIS I array8 */
4651 CHECK_FPU_FEATURE(dc
, VIS1
);
4652 cpu_src1
= gen_load_gpr(dc
, rs1
);
4653 cpu_src2
= gen_load_gpr(dc
, rs2
);
4654 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4655 gen_store_gpr(dc
, rd
, cpu_dst
);
4657 case 0x012: /* VIS I array16 */
4658 CHECK_FPU_FEATURE(dc
, VIS1
);
4659 cpu_src1
= gen_load_gpr(dc
, rs1
);
4660 cpu_src2
= gen_load_gpr(dc
, rs2
);
4661 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4662 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4663 gen_store_gpr(dc
, rd
, cpu_dst
);
4665 case 0x014: /* VIS I array32 */
4666 CHECK_FPU_FEATURE(dc
, VIS1
);
4667 cpu_src1
= gen_load_gpr(dc
, rs1
);
4668 cpu_src2
= gen_load_gpr(dc
, rs2
);
4669 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4670 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4671 gen_store_gpr(dc
, rd
, cpu_dst
);
4673 case 0x018: /* VIS I alignaddr */
4674 CHECK_FPU_FEATURE(dc
, VIS1
);
4675 cpu_src1
= gen_load_gpr(dc
, rs1
);
4676 cpu_src2
= gen_load_gpr(dc
, rs2
);
4677 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4678 gen_store_gpr(dc
, rd
, cpu_dst
);
4680 case 0x01a: /* VIS I alignaddrl */
4681 CHECK_FPU_FEATURE(dc
, VIS1
);
4682 cpu_src1
= gen_load_gpr(dc
, rs1
);
4683 cpu_src2
= gen_load_gpr(dc
, rs2
);
4684 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4685 gen_store_gpr(dc
, rd
, cpu_dst
);
4687 case 0x019: /* VIS II bmask */
4688 CHECK_FPU_FEATURE(dc
, VIS2
);
4689 cpu_src1
= gen_load_gpr(dc
, rs1
);
4690 cpu_src2
= gen_load_gpr(dc
, rs2
);
4691 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4692 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4693 gen_store_gpr(dc
, rd
, cpu_dst
);
4695 case 0x020: /* VIS I fcmple16 */
4696 CHECK_FPU_FEATURE(dc
, VIS1
);
4697 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4698 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4699 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4700 gen_store_gpr(dc
, rd
, cpu_dst
);
4702 case 0x022: /* VIS I fcmpne16 */
4703 CHECK_FPU_FEATURE(dc
, VIS1
);
4704 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4705 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4706 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4707 gen_store_gpr(dc
, rd
, cpu_dst
);
4709 case 0x024: /* VIS I fcmple32 */
4710 CHECK_FPU_FEATURE(dc
, VIS1
);
4711 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4712 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4713 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4714 gen_store_gpr(dc
, rd
, cpu_dst
);
4716 case 0x026: /* VIS I fcmpne32 */
4717 CHECK_FPU_FEATURE(dc
, VIS1
);
4718 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4719 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4720 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4721 gen_store_gpr(dc
, rd
, cpu_dst
);
4723 case 0x028: /* VIS I fcmpgt16 */
4724 CHECK_FPU_FEATURE(dc
, VIS1
);
4725 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4726 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4727 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4728 gen_store_gpr(dc
, rd
, cpu_dst
);
4730 case 0x02a: /* VIS I fcmpeq16 */
4731 CHECK_FPU_FEATURE(dc
, VIS1
);
4732 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4733 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4734 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4735 gen_store_gpr(dc
, rd
, cpu_dst
);
4737 case 0x02c: /* VIS I fcmpgt32 */
4738 CHECK_FPU_FEATURE(dc
, VIS1
);
4739 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4740 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4741 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4742 gen_store_gpr(dc
, rd
, cpu_dst
);
4744 case 0x02e: /* VIS I fcmpeq32 */
4745 CHECK_FPU_FEATURE(dc
, VIS1
);
4746 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4747 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4748 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4749 gen_store_gpr(dc
, rd
, cpu_dst
);
4751 case 0x031: /* VIS I fmul8x16 */
4752 CHECK_FPU_FEATURE(dc
, VIS1
);
4753 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4755 case 0x033: /* VIS I fmul8x16au */
4756 CHECK_FPU_FEATURE(dc
, VIS1
);
4757 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4759 case 0x035: /* VIS I fmul8x16al */
4760 CHECK_FPU_FEATURE(dc
, VIS1
);
4761 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4763 case 0x036: /* VIS I fmul8sux16 */
4764 CHECK_FPU_FEATURE(dc
, VIS1
);
4765 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4767 case 0x037: /* VIS I fmul8ulx16 */
4768 CHECK_FPU_FEATURE(dc
, VIS1
);
4769 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4771 case 0x038: /* VIS I fmuld8sux16 */
4772 CHECK_FPU_FEATURE(dc
, VIS1
);
4773 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4775 case 0x039: /* VIS I fmuld8ulx16 */
4776 CHECK_FPU_FEATURE(dc
, VIS1
);
4777 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4779 case 0x03a: /* VIS I fpack32 */
4780 CHECK_FPU_FEATURE(dc
, VIS1
);
4781 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4783 case 0x03b: /* VIS I fpack16 */
4784 CHECK_FPU_FEATURE(dc
, VIS1
);
4785 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4786 cpu_dst_32
= gen_dest_fpr_F(dc
);
4787 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4788 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4790 case 0x03d: /* VIS I fpackfix */
4791 CHECK_FPU_FEATURE(dc
, VIS1
);
4792 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4793 cpu_dst_32
= gen_dest_fpr_F(dc
);
4794 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4795 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4797 case 0x03e: /* VIS I pdist */
4798 CHECK_FPU_FEATURE(dc
, VIS1
);
4799 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4801 case 0x048: /* VIS I faligndata */
4802 CHECK_FPU_FEATURE(dc
, VIS1
);
4803 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4805 case 0x04b: /* VIS I fpmerge */
4806 CHECK_FPU_FEATURE(dc
, VIS1
);
4807 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4809 case 0x04c: /* VIS II bshuffle */
4810 CHECK_FPU_FEATURE(dc
, VIS2
);
4811 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4813 case 0x04d: /* VIS I fexpand */
4814 CHECK_FPU_FEATURE(dc
, VIS1
);
4815 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4817 case 0x050: /* VIS I fpadd16 */
4818 CHECK_FPU_FEATURE(dc
, VIS1
);
4819 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4821 case 0x051: /* VIS I fpadd16s */
4822 CHECK_FPU_FEATURE(dc
, VIS1
);
4823 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4825 case 0x052: /* VIS I fpadd32 */
4826 CHECK_FPU_FEATURE(dc
, VIS1
);
4827 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4829 case 0x053: /* VIS I fpadd32s */
4830 CHECK_FPU_FEATURE(dc
, VIS1
);
4831 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4833 case 0x054: /* VIS I fpsub16 */
4834 CHECK_FPU_FEATURE(dc
, VIS1
);
4835 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4837 case 0x055: /* VIS I fpsub16s */
4838 CHECK_FPU_FEATURE(dc
, VIS1
);
4839 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4841 case 0x056: /* VIS I fpsub32 */
4842 CHECK_FPU_FEATURE(dc
, VIS1
);
4843 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4845 case 0x057: /* VIS I fpsub32s */
4846 CHECK_FPU_FEATURE(dc
, VIS1
);
4847 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4849 case 0x060: /* VIS I fzero */
4850 CHECK_FPU_FEATURE(dc
, VIS1
);
4851 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4852 tcg_gen_movi_i64(cpu_dst_64
, 0);
4853 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4855 case 0x061: /* VIS I fzeros */
4856 CHECK_FPU_FEATURE(dc
, VIS1
);
4857 cpu_dst_32
= gen_dest_fpr_F(dc
);
4858 tcg_gen_movi_i32(cpu_dst_32
, 0);
4859 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4861 case 0x062: /* VIS I fnor */
4862 CHECK_FPU_FEATURE(dc
, VIS1
);
4863 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4865 case 0x063: /* VIS I fnors */
4866 CHECK_FPU_FEATURE(dc
, VIS1
);
4867 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4869 case 0x064: /* VIS I fandnot2 */
4870 CHECK_FPU_FEATURE(dc
, VIS1
);
4871 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4873 case 0x065: /* VIS I fandnot2s */
4874 CHECK_FPU_FEATURE(dc
, VIS1
);
4875 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4877 case 0x066: /* VIS I fnot2 */
4878 CHECK_FPU_FEATURE(dc
, VIS1
);
4879 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4881 case 0x067: /* VIS I fnot2s */
4882 CHECK_FPU_FEATURE(dc
, VIS1
);
4883 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4885 case 0x068: /* VIS I fandnot1 */
4886 CHECK_FPU_FEATURE(dc
, VIS1
);
4887 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4889 case 0x069: /* VIS I fandnot1s */
4890 CHECK_FPU_FEATURE(dc
, VIS1
);
4891 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4893 case 0x06a: /* VIS I fnot1 */
4894 CHECK_FPU_FEATURE(dc
, VIS1
);
4895 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4897 case 0x06b: /* VIS I fnot1s */
4898 CHECK_FPU_FEATURE(dc
, VIS1
);
4899 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4901 case 0x06c: /* VIS I fxor */
4902 CHECK_FPU_FEATURE(dc
, VIS1
);
4903 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4905 case 0x06d: /* VIS I fxors */
4906 CHECK_FPU_FEATURE(dc
, VIS1
);
4907 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4909 case 0x06e: /* VIS I fnand */
4910 CHECK_FPU_FEATURE(dc
, VIS1
);
4911 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4913 case 0x06f: /* VIS I fnands */
4914 CHECK_FPU_FEATURE(dc
, VIS1
);
4915 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4917 case 0x070: /* VIS I fand */
4918 CHECK_FPU_FEATURE(dc
, VIS1
);
4919 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4921 case 0x071: /* VIS I fands */
4922 CHECK_FPU_FEATURE(dc
, VIS1
);
4923 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4925 case 0x072: /* VIS I fxnor */
4926 CHECK_FPU_FEATURE(dc
, VIS1
);
4927 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4929 case 0x073: /* VIS I fxnors */
4930 CHECK_FPU_FEATURE(dc
, VIS1
);
4931 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4933 case 0x074: /* VIS I fsrc1 */
4934 CHECK_FPU_FEATURE(dc
, VIS1
);
4935 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4936 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4938 case 0x075: /* VIS I fsrc1s */
4939 CHECK_FPU_FEATURE(dc
, VIS1
);
4940 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4941 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4943 case 0x076: /* VIS I fornot2 */
4944 CHECK_FPU_FEATURE(dc
, VIS1
);
4945 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4947 case 0x077: /* VIS I fornot2s */
4948 CHECK_FPU_FEATURE(dc
, VIS1
);
4949 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4951 case 0x078: /* VIS I fsrc2 */
4952 CHECK_FPU_FEATURE(dc
, VIS1
);
4953 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4954 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4956 case 0x079: /* VIS I fsrc2s */
4957 CHECK_FPU_FEATURE(dc
, VIS1
);
4958 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4959 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4961 case 0x07a: /* VIS I fornot1 */
4962 CHECK_FPU_FEATURE(dc
, VIS1
);
4963 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4965 case 0x07b: /* VIS I fornot1s */
4966 CHECK_FPU_FEATURE(dc
, VIS1
);
4967 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4969 case 0x07c: /* VIS I for */
4970 CHECK_FPU_FEATURE(dc
, VIS1
);
4971 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4973 case 0x07d: /* VIS I fors */
4974 CHECK_FPU_FEATURE(dc
, VIS1
);
4975 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4977 case 0x07e: /* VIS I fone */
4978 CHECK_FPU_FEATURE(dc
, VIS1
);
4979 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4980 tcg_gen_movi_i64(cpu_dst_64
, -1);
4981 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4983 case 0x07f: /* VIS I fones */
4984 CHECK_FPU_FEATURE(dc
, VIS1
);
4985 cpu_dst_32
= gen_dest_fpr_F(dc
);
4986 tcg_gen_movi_i32(cpu_dst_32
, -1);
4987 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4989 case 0x080: /* VIS I shutdown */
4990 case 0x081: /* VIS II siam */
4999 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5000 #ifdef TARGET_SPARC64
5005 #ifdef TARGET_SPARC64
5006 } else if (xop
== 0x39) { /* V9 return */
5008 cpu_src1
= get_src1(dc
, insn
);
5009 cpu_tmp0
= tcg_temp_new();
5010 if (IS_IMM
) { /* immediate */
5011 simm
= GET_FIELDs(insn
, 19, 31);
5012 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5013 } else { /* register */
5014 rs2
= GET_FIELD(insn
, 27, 31);
5016 cpu_src2
= gen_load_gpr(dc
, rs2
);
5017 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5019 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5022 gen_helper_restore(tcg_env
);
5024 gen_check_align(cpu_tmp0
, 3);
5025 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5026 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5030 cpu_src1
= get_src1(dc
, insn
);
5031 cpu_tmp0
= tcg_temp_new();
5032 if (IS_IMM
) { /* immediate */
5033 simm
= GET_FIELDs(insn
, 19, 31);
5034 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5035 } else { /* register */
5036 rs2
= GET_FIELD(insn
, 27, 31);
5038 cpu_src2
= gen_load_gpr(dc
, rs2
);
5039 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5041 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5045 case 0x38: /* jmpl */
5047 TCGv t
= gen_dest_gpr(dc
, rd
);
5048 tcg_gen_movi_tl(t
, dc
->pc
);
5049 gen_store_gpr(dc
, rd
, t
);
5052 gen_check_align(cpu_tmp0
, 3);
5053 gen_address_mask(dc
, cpu_tmp0
);
5054 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5055 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5058 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5059 case 0x39: /* rett, V9 return */
5061 if (!supervisor(dc
))
5064 gen_check_align(cpu_tmp0
, 3);
5065 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5066 dc
->npc
= DYNAMIC_PC
;
5067 gen_helper_rett(tcg_env
);
5071 case 0x3b: /* flush */
5072 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
5076 case 0x3c: /* save */
5077 gen_helper_save(tcg_env
);
5078 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5080 case 0x3d: /* restore */
5081 gen_helper_restore(tcg_env
);
5082 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5084 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5085 case 0x3e: /* V9 done/retry */
5089 if (!supervisor(dc
))
5091 dc
->npc
= DYNAMIC_PC
;
5092 dc
->pc
= DYNAMIC_PC
;
5093 translator_io_start(&dc
->base
);
5094 gen_helper_done(tcg_env
);
5097 if (!supervisor(dc
))
5099 dc
->npc
= DYNAMIC_PC
;
5100 dc
->pc
= DYNAMIC_PC
;
5101 translator_io_start(&dc
->base
);
5102 gen_helper_retry(tcg_env
);
5117 case 3: /* load/store instructions */
5119 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5120 /* ??? gen_address_mask prevents us from using a source
5121 register directly. Always generate a temporary. */
5122 TCGv cpu_addr
= tcg_temp_new();
5124 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5125 if (xop
== 0x3c || xop
== 0x3e) {
5126 /* V9 casa/casxa : no offset */
5127 } else if (IS_IMM
) { /* immediate */
5128 simm
= GET_FIELDs(insn
, 19, 31);
5130 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5132 } else { /* register */
5133 rs2
= GET_FIELD(insn
, 27, 31);
5135 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5138 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5139 (xop
> 0x17 && xop
<= 0x1d ) ||
5140 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5141 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5144 case 0x0: /* ld, V9 lduw, load unsigned word */
5145 gen_address_mask(dc
, cpu_addr
);
5146 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5147 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5149 case 0x1: /* ldub, load unsigned byte */
5150 gen_address_mask(dc
, cpu_addr
);
5151 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5152 dc
->mem_idx
, MO_UB
);
5154 case 0x2: /* lduh, load unsigned halfword */
5155 gen_address_mask(dc
, cpu_addr
);
5156 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5157 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5159 case 0x3: /* ldd, load double word */
5165 gen_address_mask(dc
, cpu_addr
);
5166 t64
= tcg_temp_new_i64();
5167 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5168 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5169 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5170 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5171 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5172 tcg_gen_shri_i64(t64
, t64
, 32);
5173 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5174 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5177 case 0x9: /* ldsb, load signed byte */
5178 gen_address_mask(dc
, cpu_addr
);
5179 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_SB
);
5181 case 0xa: /* ldsh, load signed halfword */
5182 gen_address_mask(dc
, cpu_addr
);
5183 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5184 dc
->mem_idx
, MO_TESW
| MO_ALIGN
);
5186 case 0xd: /* ldstub */
5187 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5190 /* swap, swap register with memory. Also atomically */
5191 CHECK_IU_FEATURE(dc
, SWAP
);
5192 cpu_src1
= gen_load_gpr(dc
, rd
);
5193 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5194 dc
->mem_idx
, MO_TEUL
);
5196 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5197 case 0x10: /* lda, V9 lduwa, load word alternate */
5198 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5200 case 0x11: /* lduba, load unsigned byte alternate */
5201 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5203 case 0x12: /* lduha, load unsigned halfword alternate */
5204 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5206 case 0x13: /* ldda, load double word alternate */
5210 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5212 case 0x19: /* ldsba, load signed byte alternate */
5213 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5215 case 0x1a: /* ldsha, load signed halfword alternate */
5216 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5218 case 0x1d: /* ldstuba -- XXX: should be atomically */
5219 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5221 case 0x1f: /* swapa, swap reg with alt. memory. Also
5223 CHECK_IU_FEATURE(dc
, SWAP
);
5224 cpu_src1
= gen_load_gpr(dc
, rd
);
5225 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5228 #ifndef TARGET_SPARC64
5229 case 0x30: /* ldc */
5230 case 0x31: /* ldcsr */
5231 case 0x33: /* lddc */
5235 #ifdef TARGET_SPARC64
5236 case 0x08: /* V9 ldsw */
5237 gen_address_mask(dc
, cpu_addr
);
5238 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5239 dc
->mem_idx
, MO_TESL
| MO_ALIGN
);
5241 case 0x0b: /* V9 ldx */
5242 gen_address_mask(dc
, cpu_addr
);
5243 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5244 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5246 case 0x18: /* V9 ldswa */
5247 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5249 case 0x1b: /* V9 ldxa */
5250 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5252 case 0x2d: /* V9 prefetch, no effect */
5254 case 0x30: /* V9 ldfa */
5255 if (gen_trap_ifnofpu(dc
)) {
5258 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5259 gen_update_fprs_dirty(dc
, rd
);
5261 case 0x33: /* V9 lddfa */
5262 if (gen_trap_ifnofpu(dc
)) {
5265 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5266 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5268 case 0x3d: /* V9 prefetcha, no effect */
5270 case 0x32: /* V9 ldqfa */
5271 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5272 if (gen_trap_ifnofpu(dc
)) {
5275 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5276 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5282 gen_store_gpr(dc
, rd
, cpu_val
);
5283 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5286 } else if (xop
>= 0x20 && xop
< 0x24) {
5287 if (gen_trap_ifnofpu(dc
)) {
5291 case 0x20: /* ldf, load fpreg */
5292 gen_address_mask(dc
, cpu_addr
);
5293 cpu_dst_32
= gen_dest_fpr_F(dc
);
5294 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5295 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5296 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5298 case 0x21: /* ldfsr, V9 ldxfsr */
5299 #ifdef TARGET_SPARC64
5300 gen_address_mask(dc
, cpu_addr
);
5302 TCGv_i64 t64
= tcg_temp_new_i64();
5303 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5304 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5305 gen_helper_ldxfsr(cpu_fsr
, tcg_env
, cpu_fsr
, t64
);
5309 cpu_dst_32
= tcg_temp_new_i32();
5310 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5311 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5312 gen_helper_ldfsr(cpu_fsr
, tcg_env
, cpu_fsr
, cpu_dst_32
);
5314 case 0x22: /* ldqf, load quad fpreg */
5315 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5316 gen_address_mask(dc
, cpu_addr
);
5317 cpu_src1_64
= tcg_temp_new_i64();
5318 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5319 MO_TEUQ
| MO_ALIGN_4
);
5320 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5321 cpu_src2_64
= tcg_temp_new_i64();
5322 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5323 MO_TEUQ
| MO_ALIGN_4
);
5324 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5326 case 0x23: /* lddf, load double fpreg */
5327 gen_address_mask(dc
, cpu_addr
);
5328 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5329 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5330 MO_TEUQ
| MO_ALIGN_4
);
5331 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5336 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5337 xop
== 0xe || xop
== 0x1e) {
5338 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5341 case 0x4: /* st, store word */
5342 gen_address_mask(dc
, cpu_addr
);
5343 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5344 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5346 case 0x5: /* stb, store byte */
5347 gen_address_mask(dc
, cpu_addr
);
5348 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_UB
);
5350 case 0x6: /* sth, store halfword */
5351 gen_address_mask(dc
, cpu_addr
);
5352 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5353 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5355 case 0x7: /* std, store double word */
5362 gen_address_mask(dc
, cpu_addr
);
5363 lo
= gen_load_gpr(dc
, rd
+ 1);
5364 t64
= tcg_temp_new_i64();
5365 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5366 tcg_gen_qemu_st_i64(t64
, cpu_addr
,
5367 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5370 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5371 case 0x14: /* sta, V9 stwa, store word alternate */
5372 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5374 case 0x15: /* stba, store byte alternate */
5375 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5377 case 0x16: /* stha, store halfword alternate */
5378 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5380 case 0x17: /* stda, store double word alternate */
5384 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5387 #ifdef TARGET_SPARC64
5388 case 0x0e: /* V9 stx */
5389 gen_address_mask(dc
, cpu_addr
);
5390 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5391 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5393 case 0x1e: /* V9 stxa */
5394 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5400 } else if (xop
> 0x23 && xop
< 0x28) {
5401 if (gen_trap_ifnofpu(dc
)) {
5405 case 0x24: /* stf, store fpreg */
5406 gen_address_mask(dc
, cpu_addr
);
5407 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5408 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5409 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5411 case 0x25: /* stfsr, V9 stxfsr */
5413 #ifdef TARGET_SPARC64
5414 gen_address_mask(dc
, cpu_addr
);
5416 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5417 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5421 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5422 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5426 #ifdef TARGET_SPARC64
5427 /* V9 stqf, store quad fpreg */
5428 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5429 gen_address_mask(dc
, cpu_addr
);
5430 /* ??? While stqf only requires 4-byte alignment, it is
5431 legal for the cpu to signal the unaligned exception.
5432 The OS trap handler is then required to fix it up.
5433 For qemu, this avoids having to probe the second page
5434 before performing the first write. */
5435 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5436 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5437 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5438 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5439 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5440 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5441 dc
->mem_idx
, MO_TEUQ
);
5443 #else /* !TARGET_SPARC64 */
5444 /* stdfq, store floating point queue */
5445 #if defined(CONFIG_USER_ONLY)
5448 if (!supervisor(dc
))
5450 if (gen_trap_ifnofpu(dc
)) {
5456 case 0x27: /* stdf, store double fpreg */
5457 gen_address_mask(dc
, cpu_addr
);
5458 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5459 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5460 MO_TEUQ
| MO_ALIGN_4
);
5465 } else if (xop
> 0x33 && xop
< 0x3f) {
5467 #ifdef TARGET_SPARC64
5468 case 0x34: /* V9 stfa */
5469 if (gen_trap_ifnofpu(dc
)) {
5472 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5474 case 0x36: /* V9 stqfa */
5476 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5477 if (gen_trap_ifnofpu(dc
)) {
5480 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5483 case 0x37: /* V9 stdfa */
5484 if (gen_trap_ifnofpu(dc
)) {
5487 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5489 case 0x3e: /* V9 casxa */
5490 rs2
= GET_FIELD(insn
, 27, 31);
5491 cpu_src2
= gen_load_gpr(dc
, rs2
);
5492 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5495 case 0x34: /* stc */
5496 case 0x35: /* stcsr */
5497 case 0x36: /* stdcq */
5498 case 0x37: /* stdc */
5501 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5502 case 0x3c: /* V9 or LEON3 casa */
5503 #ifndef TARGET_SPARC64
5504 CHECK_IU_FEATURE(dc
, CASA
);
5506 rs2
= GET_FIELD(insn
, 27, 31);
5507 cpu_src2
= gen_load_gpr(dc
, rs2
);
5508 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5520 /* default case for non jump instructions */
5524 case DYNAMIC_PC_LOOKUP
:
5529 /* we can do a static jump */
5530 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5531 dc
->base
.is_jmp
= DISAS_NORETURN
;
5534 g_assert_not_reached();
5538 dc
->npc
= dc
->npc
+ 4;
5543 gen_exception(dc
, TT_ILL_INSN
);
5546 gen_exception(dc
, TT_UNIMP_FLUSH
);
5548 #if !defined(CONFIG_USER_ONLY)
5550 gen_exception(dc
, TT_PRIV_INSN
);
5554 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5556 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5558 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5561 #ifndef TARGET_SPARC64
5563 gen_exception(dc
, TT_NCP_INSN
);
5568 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5570 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5571 CPUSPARCState
*env
= cs
->env_ptr
;
5574 dc
->pc
= dc
->base
.pc_first
;
5575 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5576 dc
->cc_op
= CC_OP_DYNAMIC
;
5577 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5578 dc
->def
= &env
->def
;
5579 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5580 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5581 #ifndef CONFIG_USER_ONLY
5582 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5584 #ifdef TARGET_SPARC64
5586 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5587 #ifndef CONFIG_USER_ONLY
5588 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5592 * if we reach a page boundary, we stop generation so that the
5593 * PC of a TT_TFAULT exception is always in the right page
5595 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5596 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5599 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5603 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5605 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5606 target_ulong npc
= dc
->npc
;
5611 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5612 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5615 case DYNAMIC_PC_LOOKUP
:
5619 g_assert_not_reached();
5622 tcg_gen_insn_start(dc
->pc
, npc
);
5625 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5627 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5628 CPUSPARCState
*env
= cs
->env_ptr
;
5631 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5632 dc
->base
.pc_next
+= 4;
5633 disas_sparc_insn(dc
, insn
);
5635 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5638 if (dc
->pc
!= dc
->base
.pc_next
) {
5639 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5643 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5645 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5648 switch (dc
->base
.is_jmp
) {
5650 case DISAS_TOO_MANY
:
5651 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5652 /* static PC and NPC: we can use direct chaining */
5653 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5659 case DYNAMIC_PC_LOOKUP
:
5666 g_assert_not_reached();
5669 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5675 tcg_gen_lookup_and_goto_ptr();
5677 tcg_gen_exit_tb(NULL
, 0);
5681 case DISAS_NORETURN
:
5687 tcg_gen_exit_tb(NULL
, 0);
5691 g_assert_not_reached();
5695 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5696 CPUState
*cpu
, FILE *logfile
)
5698 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5699 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5702 static const TranslatorOps sparc_tr_ops
= {
5703 .init_disas_context
= sparc_tr_init_disas_context
,
5704 .tb_start
= sparc_tr_tb_start
,
5705 .insn_start
= sparc_tr_insn_start
,
5706 .translate_insn
= sparc_tr_translate_insn
,
5707 .tb_stop
= sparc_tr_tb_stop
,
5708 .disas_log
= sparc_tr_disas_log
,
5711 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5712 target_ulong pc
, void *host_pc
)
5714 DisasContext dc
= {};
5716 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5719 void sparc_tcg_init(void)
5721 static const char gregnames
[32][4] = {
5722 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5723 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5724 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5725 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5727 static const char fregnames
[32][4] = {
5728 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5729 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5730 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5731 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5734 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5735 #ifdef TARGET_SPARC64
5736 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5737 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5739 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5741 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5742 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5745 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5746 #ifdef TARGET_SPARC64
5747 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5748 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5749 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5750 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5752 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5753 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5754 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5755 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5756 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5758 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5759 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5760 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5761 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5762 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5763 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5764 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5765 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5766 #ifndef CONFIG_USER_ONLY
5767 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5773 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5774 offsetof(CPUSPARCState
, regwptr
),
5777 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5778 *r32
[i
].ptr
= tcg_global_mem_new_i32(tcg_env
, r32
[i
].off
, r32
[i
].name
);
5781 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5782 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5786 for (i
= 1; i
< 8; ++i
) {
5787 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5788 offsetof(CPUSPARCState
, gregs
[i
]),
5792 for (i
= 8; i
< 32; ++i
) {
5793 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5794 (i
- 8) * sizeof(target_ulong
),
5798 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5799 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5800 offsetof(CPUSPARCState
, fpr
[i
]),
5805 void sparc_restore_state_to_opc(CPUState
*cs
,
5806 const TranslationBlock
*tb
,
5807 const uint64_t *data
)
5809 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5810 CPUSPARCState
*env
= &cpu
->env
;
5811 target_ulong pc
= data
[0];
5812 target_ulong npc
= data
[1];
5815 if (npc
== DYNAMIC_PC
) {
5816 /* dynamic NPC: already stored */
5817 } else if (npc
& JUMP_PC
) {
5818 /* jump PC: use 'cond' and the jump targets of the translation */
5820 env
->npc
= npc
& ~3;