4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "exec/cpu_ldst.h"
30 #include "exec/helper-gen.h"
32 #include "exec/translator.h"
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
40 /* Dynamic PC, must exit to main loop. */
42 /* Dynamic PC, one of two values according to jump_pc[T2]. */
44 /* Dynamic PC, may lookup next TB. */
45 #define DYNAMIC_PC_LOOKUP 3
47 #define DISAS_EXIT DISAS_TARGET_0
49 /* global register indexes */
50 static TCGv_ptr cpu_regwptr
;
51 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
52 static TCGv_i32 cpu_cc_op
;
53 static TCGv_i32 cpu_psr
;
54 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
55 static TCGv cpu_regs
[32];
57 #ifndef CONFIG_USER_ONLY
62 static TCGv_i32 cpu_xcc
, cpu_fprs
;
64 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
65 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
69 /* Floating point registers */
70 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
72 typedef struct DisasContext
{
73 DisasContextBase base
;
74 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
79 bool address_mask_32bit
;
80 #ifndef CONFIG_USER_ONLY
87 uint32_t cc_op
; /* current CC operation */
101 // This function uses non-native bit order
102 #define GET_FIELD(X, FROM, TO) \
103 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
105 // This function uses the order in the manuals, i.e. bit 0 is 2^0
106 #define GET_FIELD_SP(X, FROM, TO) \
107 GET_FIELD(X, 31 - (TO), 31 - (FROM))
109 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
110 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
112 #ifdef TARGET_SPARC64
113 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
114 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
116 #define DFPREG(r) (r & 0x1e)
117 #define QFPREG(r) (r & 0x1c)
120 #define UA2005_HTRAP_MASK 0xff
121 #define V8_TRAP_MASK 0x7f
123 static int sign_extend(int x
, int len
)
126 return (x
<< len
) >> len
;
129 #define IS_IMM (insn & (1<<13))
131 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
133 #if defined(TARGET_SPARC64)
134 int bit
= (rd
< 32) ? 1 : 2;
135 /* If we know we've already set this bit within the TB,
136 we can avoid setting it again. */
137 if (!(dc
->fprs_dirty
& bit
)) {
138 dc
->fprs_dirty
|= bit
;
139 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
144 /* floating point registers moves */
145 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
147 TCGv_i32 ret
= tcg_temp_new_i32();
149 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
151 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
156 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
158 TCGv_i64 t
= tcg_temp_new_i64();
160 tcg_gen_extu_i32_i64(t
, v
);
161 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
162 (dst
& 1 ? 0 : 32), 32);
163 gen_update_fprs_dirty(dc
, dst
);
166 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
168 return tcg_temp_new_i32();
171 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
174 return cpu_fpr
[src
/ 2];
177 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
180 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
181 gen_update_fprs_dirty(dc
, dst
);
184 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
186 return cpu_fpr
[DFPREG(dst
) / 2];
189 static void gen_op_load_fpr_QT0(unsigned int src
)
191 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
192 offsetof(CPU_QuadU
, ll
.upper
));
193 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
194 offsetof(CPU_QuadU
, ll
.lower
));
197 static void gen_op_load_fpr_QT1(unsigned int src
)
199 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
200 offsetof(CPU_QuadU
, ll
.upper
));
201 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
202 offsetof(CPU_QuadU
, ll
.lower
));
205 static void gen_op_store_QT0_fpr(unsigned int dst
)
207 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
208 offsetof(CPU_QuadU
, ll
.upper
));
209 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
210 offsetof(CPU_QuadU
, ll
.lower
));
213 static void gen_store_fpr_Q(DisasContext
*dc
, unsigned int dst
,
214 TCGv_i64 v1
, TCGv_i64 v2
)
218 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v1
);
219 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2 + 1], v2
);
220 gen_update_fprs_dirty(dc
, dst
);
223 #ifdef TARGET_SPARC64
224 static TCGv_i64
gen_load_fpr_Q0(DisasContext
*dc
, unsigned int src
)
227 return cpu_fpr
[src
/ 2];
230 static TCGv_i64
gen_load_fpr_Q1(DisasContext
*dc
, unsigned int src
)
233 return cpu_fpr
[src
/ 2 + 1];
236 static void gen_move_Q(DisasContext
*dc
, unsigned int rd
, unsigned int rs
)
241 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
242 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
243 gen_update_fprs_dirty(dc
, rd
);
248 #ifdef CONFIG_USER_ONLY
249 #define supervisor(dc) 0
250 #ifdef TARGET_SPARC64
251 #define hypervisor(dc) 0
254 #ifdef TARGET_SPARC64
255 #define hypervisor(dc) (dc->hypervisor)
256 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
258 #define supervisor(dc) (dc->supervisor)
262 #ifdef TARGET_SPARC64
264 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
266 #define AM_CHECK(dc) (1)
270 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
272 #ifdef TARGET_SPARC64
274 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
278 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
282 return cpu_regs
[reg
];
284 TCGv t
= tcg_temp_new();
285 tcg_gen_movi_tl(t
, 0);
290 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
294 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
298 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
302 return cpu_regs
[reg
];
304 return tcg_temp_new();
308 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
310 return translator_use_goto_tb(&s
->base
, pc
) &&
311 translator_use_goto_tb(&s
->base
, npc
);
314 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
315 target_ulong pc
, target_ulong npc
)
317 if (use_goto_tb(s
, pc
, npc
)) {
318 /* jump to same page: we can use a direct jump */
319 tcg_gen_goto_tb(tb_num
);
320 tcg_gen_movi_tl(cpu_pc
, pc
);
321 tcg_gen_movi_tl(cpu_npc
, npc
);
322 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
324 /* jump to another page: we can use an indirect jump */
325 tcg_gen_movi_tl(cpu_pc
, pc
);
326 tcg_gen_movi_tl(cpu_npc
, npc
);
327 tcg_gen_lookup_and_goto_ptr();
332 static void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
334 tcg_gen_extu_i32_tl(reg
, src
);
335 tcg_gen_extract_tl(reg
, reg
, PSR_NEG_SHIFT
, 1);
338 static void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
340 tcg_gen_extu_i32_tl(reg
, src
);
341 tcg_gen_extract_tl(reg
, reg
, PSR_ZERO_SHIFT
, 1);
344 static void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
346 tcg_gen_extu_i32_tl(reg
, src
);
347 tcg_gen_extract_tl(reg
, reg
, PSR_OVF_SHIFT
, 1);
350 static void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
352 tcg_gen_extu_i32_tl(reg
, src
);
353 tcg_gen_extract_tl(reg
, reg
, PSR_CARRY_SHIFT
, 1);
356 static void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
358 tcg_gen_mov_tl(cpu_cc_src
, src1
);
359 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
360 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
361 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
364 static TCGv_i32
gen_add32_carry32(void)
366 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
368 /* Carry is computed from a previous add: (dst < src) */
369 #if TARGET_LONG_BITS == 64
370 cc_src1_32
= tcg_temp_new_i32();
371 cc_src2_32
= tcg_temp_new_i32();
372 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_dst
);
373 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src
);
375 cc_src1_32
= cpu_cc_dst
;
376 cc_src2_32
= cpu_cc_src
;
379 carry_32
= tcg_temp_new_i32();
380 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
385 static TCGv_i32
gen_sub32_carry32(void)
387 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
389 /* Carry is computed from a previous borrow: (src1 < src2) */
390 #if TARGET_LONG_BITS == 64
391 cc_src1_32
= tcg_temp_new_i32();
392 cc_src2_32
= tcg_temp_new_i32();
393 tcg_gen_extrl_i64_i32(cc_src1_32
, cpu_cc_src
);
394 tcg_gen_extrl_i64_i32(cc_src2_32
, cpu_cc_src2
);
396 cc_src1_32
= cpu_cc_src
;
397 cc_src2_32
= cpu_cc_src2
;
400 carry_32
= tcg_temp_new_i32();
401 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
406 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
407 TCGv src2
, int update_cc
)
415 /* Carry is known to be zero. Fall back to plain ADD. */
417 gen_op_add_cc(dst
, src1
, src2
);
419 tcg_gen_add_tl(dst
, src1
, src2
);
426 if (TARGET_LONG_BITS
== 32) {
427 /* We can re-use the host's hardware carry generation by using
428 an ADD2 opcode. We discard the low part of the output.
429 Ideally we'd combine this operation with the add that
430 generated the carry in the first place. */
431 carry
= tcg_temp_new();
432 tcg_gen_add2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
435 carry_32
= gen_add32_carry32();
441 carry_32
= gen_sub32_carry32();
445 /* We need external help to produce the carry. */
446 carry_32
= tcg_temp_new_i32();
447 gen_helper_compute_C_icc(carry_32
, cpu_env
);
451 #if TARGET_LONG_BITS == 64
452 carry
= tcg_temp_new();
453 tcg_gen_extu_i32_i64(carry
, carry_32
);
458 tcg_gen_add_tl(dst
, src1
, src2
);
459 tcg_gen_add_tl(dst
, dst
, carry
);
463 tcg_gen_mov_tl(cpu_cc_src
, src1
);
464 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
465 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
466 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
467 dc
->cc_op
= CC_OP_ADDX
;
471 static void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
473 tcg_gen_mov_tl(cpu_cc_src
, src1
);
474 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
475 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
476 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
479 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
480 TCGv src2
, int update_cc
)
488 /* Carry is known to be zero. Fall back to plain SUB. */
490 gen_op_sub_cc(dst
, src1
, src2
);
492 tcg_gen_sub_tl(dst
, src1
, src2
);
499 carry_32
= gen_add32_carry32();
505 if (TARGET_LONG_BITS
== 32) {
506 /* We can re-use the host's hardware carry generation by using
507 a SUB2 opcode. We discard the low part of the output.
508 Ideally we'd combine this operation with the add that
509 generated the carry in the first place. */
510 carry
= tcg_temp_new();
511 tcg_gen_sub2_tl(carry
, dst
, cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
514 carry_32
= gen_sub32_carry32();
518 /* We need external help to produce the carry. */
519 carry_32
= tcg_temp_new_i32();
520 gen_helper_compute_C_icc(carry_32
, cpu_env
);
524 #if TARGET_LONG_BITS == 64
525 carry
= tcg_temp_new();
526 tcg_gen_extu_i32_i64(carry
, carry_32
);
531 tcg_gen_sub_tl(dst
, src1
, src2
);
532 tcg_gen_sub_tl(dst
, dst
, carry
);
536 tcg_gen_mov_tl(cpu_cc_src
, src1
);
537 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
538 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
539 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
540 dc
->cc_op
= CC_OP_SUBX
;
544 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
546 TCGv r_temp
, zero
, t0
;
548 r_temp
= tcg_temp_new();
555 zero
= tcg_constant_tl(0);
556 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
557 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
558 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
559 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
563 // env->y = (b2 << 31) | (env->y >> 1);
564 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
565 tcg_gen_deposit_tl(cpu_y
, t0
, cpu_cc_src
, 31, 1);
568 gen_mov_reg_N(t0
, cpu_psr
);
569 gen_mov_reg_V(r_temp
, cpu_psr
);
570 tcg_gen_xor_tl(t0
, t0
, r_temp
);
572 // T0 = (b1 << 31) | (T0 >> 1);
574 tcg_gen_shli_tl(t0
, t0
, 31);
575 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
576 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t0
);
578 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
580 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
583 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
585 #if TARGET_LONG_BITS == 32
587 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
589 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
592 TCGv t0
= tcg_temp_new_i64();
593 TCGv t1
= tcg_temp_new_i64();
596 tcg_gen_ext32s_i64(t0
, src1
);
597 tcg_gen_ext32s_i64(t1
, src2
);
599 tcg_gen_ext32u_i64(t0
, src1
);
600 tcg_gen_ext32u_i64(t1
, src2
);
603 tcg_gen_mul_i64(dst
, t0
, t1
);
604 tcg_gen_shri_i64(cpu_y
, dst
, 32);
608 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
610 /* zero-extend truncated operands before multiplication */
611 gen_op_multiply(dst
, src1
, src2
, 0);
614 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
616 /* sign-extend truncated operands before multiplication */
617 gen_op_multiply(dst
, src1
, src2
, 1);
621 static void gen_op_eval_ba(TCGv dst
)
623 tcg_gen_movi_tl(dst
, 1);
627 static void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
629 gen_mov_reg_Z(dst
, src
);
633 static void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
635 TCGv t0
= tcg_temp_new();
636 gen_mov_reg_N(t0
, src
);
637 gen_mov_reg_V(dst
, src
);
638 tcg_gen_xor_tl(dst
, dst
, t0
);
639 gen_mov_reg_Z(t0
, src
);
640 tcg_gen_or_tl(dst
, dst
, t0
);
644 static void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
646 TCGv t0
= tcg_temp_new();
647 gen_mov_reg_V(t0
, src
);
648 gen_mov_reg_N(dst
, src
);
649 tcg_gen_xor_tl(dst
, dst
, t0
);
653 static void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
655 TCGv t0
= tcg_temp_new();
656 gen_mov_reg_Z(t0
, src
);
657 gen_mov_reg_C(dst
, src
);
658 tcg_gen_or_tl(dst
, dst
, t0
);
662 static void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
664 gen_mov_reg_C(dst
, src
);
668 static void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
670 gen_mov_reg_V(dst
, src
);
674 static void gen_op_eval_bn(TCGv dst
)
676 tcg_gen_movi_tl(dst
, 0);
680 static void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
682 gen_mov_reg_N(dst
, src
);
686 static void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
688 gen_mov_reg_Z(dst
, src
);
689 tcg_gen_xori_tl(dst
, dst
, 0x1);
693 static void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
695 gen_op_eval_ble(dst
, src
);
696 tcg_gen_xori_tl(dst
, dst
, 0x1);
700 static void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
702 gen_op_eval_bl(dst
, src
);
703 tcg_gen_xori_tl(dst
, dst
, 0x1);
707 static void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
709 gen_op_eval_bleu(dst
, src
);
710 tcg_gen_xori_tl(dst
, dst
, 0x1);
714 static void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
716 gen_mov_reg_C(dst
, src
);
717 tcg_gen_xori_tl(dst
, dst
, 0x1);
721 static void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
723 gen_mov_reg_N(dst
, src
);
724 tcg_gen_xori_tl(dst
, dst
, 0x1);
728 static void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
730 gen_mov_reg_V(dst
, src
);
731 tcg_gen_xori_tl(dst
, dst
, 0x1);
735 FPSR bit field FCC1 | FCC0:
741 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
742 unsigned int fcc_offset
)
744 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
745 tcg_gen_andi_tl(reg
, reg
, 0x1);
748 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
750 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
751 tcg_gen_andi_tl(reg
, reg
, 0x1);
755 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
757 TCGv t0
= tcg_temp_new();
758 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
759 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
760 tcg_gen_or_tl(dst
, dst
, t0
);
763 // 1 or 2: FCC0 ^ FCC1
764 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
766 TCGv t0
= tcg_temp_new();
767 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
768 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
769 tcg_gen_xor_tl(dst
, dst
, t0
);
773 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
775 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
779 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
781 TCGv t0
= tcg_temp_new();
782 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
783 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
784 tcg_gen_andc_tl(dst
, dst
, t0
);
788 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
790 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
794 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
796 TCGv t0
= tcg_temp_new();
797 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
798 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
799 tcg_gen_andc_tl(dst
, t0
, dst
);
803 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
805 TCGv t0
= tcg_temp_new();
806 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
807 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
808 tcg_gen_and_tl(dst
, dst
, t0
);
812 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
814 TCGv t0
= tcg_temp_new();
815 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
816 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
817 tcg_gen_or_tl(dst
, dst
, t0
);
818 tcg_gen_xori_tl(dst
, dst
, 0x1);
821 // 0 or 3: !(FCC0 ^ FCC1)
822 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
824 TCGv t0
= tcg_temp_new();
825 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
826 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
827 tcg_gen_xor_tl(dst
, dst
, t0
);
828 tcg_gen_xori_tl(dst
, dst
, 0x1);
832 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
834 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
835 tcg_gen_xori_tl(dst
, dst
, 0x1);
838 // !1: !(FCC0 & !FCC1)
839 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
841 TCGv t0
= tcg_temp_new();
842 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
843 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
844 tcg_gen_andc_tl(dst
, dst
, t0
);
845 tcg_gen_xori_tl(dst
, dst
, 0x1);
849 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
851 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
852 tcg_gen_xori_tl(dst
, dst
, 0x1);
855 // !2: !(!FCC0 & FCC1)
856 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
858 TCGv t0
= tcg_temp_new();
859 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
860 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
861 tcg_gen_andc_tl(dst
, t0
, dst
);
862 tcg_gen_xori_tl(dst
, dst
, 0x1);
865 // !3: !(FCC0 & FCC1)
866 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
868 TCGv t0
= tcg_temp_new();
869 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
870 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
871 tcg_gen_and_tl(dst
, dst
, t0
);
872 tcg_gen_xori_tl(dst
, dst
, 0x1);
875 static void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
876 target_ulong pc2
, TCGv r_cond
)
878 TCGLabel
*l1
= gen_new_label();
880 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
882 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
885 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
888 static void gen_branch_a(DisasContext
*dc
, target_ulong pc1
)
890 TCGLabel
*l1
= gen_new_label();
891 target_ulong npc
= dc
->npc
;
893 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_cond
, 0, l1
);
895 gen_goto_tb(dc
, 0, npc
, pc1
);
898 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
900 dc
->base
.is_jmp
= DISAS_NORETURN
;
903 static void gen_branch_n(DisasContext
*dc
, target_ulong pc1
)
905 target_ulong npc
= dc
->npc
;
910 case DYNAMIC_PC_LOOKUP
:
911 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
912 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
913 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
,
914 cpu_cond
, tcg_constant_tl(0),
915 tcg_constant_tl(pc1
), cpu_npc
);
919 g_assert_not_reached();
923 dc
->jump_pc
[0] = pc1
;
924 dc
->jump_pc
[1] = npc
+ 4;
929 static void gen_generic_branch(DisasContext
*dc
)
931 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
932 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
933 TCGv zero
= tcg_constant_tl(0);
935 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
938 /* call this function before using the condition register as it may
939 have been set for a jump */
940 static void flush_cond(DisasContext
*dc
)
942 if (dc
->npc
== JUMP_PC
) {
943 gen_generic_branch(dc
);
944 dc
->npc
= DYNAMIC_PC_LOOKUP
;
948 static void save_npc(DisasContext
*dc
)
953 gen_generic_branch(dc
);
954 dc
->npc
= DYNAMIC_PC_LOOKUP
;
957 case DYNAMIC_PC_LOOKUP
:
960 g_assert_not_reached();
963 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
967 static void update_psr(DisasContext
*dc
)
969 if (dc
->cc_op
!= CC_OP_FLAGS
) {
970 dc
->cc_op
= CC_OP_FLAGS
;
971 gen_helper_compute_psr(cpu_env
);
975 static void save_state(DisasContext
*dc
)
977 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
981 static void gen_exception(DisasContext
*dc
, int which
)
984 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(which
));
985 dc
->base
.is_jmp
= DISAS_NORETURN
;
988 static void gen_check_align(TCGv addr
, int mask
)
990 gen_helper_check_align(cpu_env
, addr
, tcg_constant_i32(mask
));
993 static void gen_mov_pc_npc(DisasContext
*dc
)
998 gen_generic_branch(dc
);
999 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1000 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1003 case DYNAMIC_PC_LOOKUP
:
1004 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1008 g_assert_not_reached();
1015 static void gen_op_next_insn(void)
1017 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1018 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1021 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1024 static int subcc_cond
[16] = {
1040 -1, /* no overflow */
1043 static int logic_cond
[16] = {
1045 TCG_COND_EQ
, /* eq: Z */
1046 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1047 TCG_COND_LT
, /* lt: N ^ V -> N */
1048 TCG_COND_EQ
, /* leu: C | Z -> Z */
1049 TCG_COND_NEVER
, /* ltu: C -> 0 */
1050 TCG_COND_LT
, /* neg: N */
1051 TCG_COND_NEVER
, /* vs: V -> 0 */
1053 TCG_COND_NE
, /* ne: !Z */
1054 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1055 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1056 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1057 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1058 TCG_COND_GE
, /* pos: !N */
1059 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1065 #ifdef TARGET_SPARC64
1075 switch (dc
->cc_op
) {
1077 cmp
->cond
= logic_cond
[cond
];
1079 cmp
->is_bool
= false;
1080 cmp
->c2
= tcg_constant_tl(0);
1081 #ifdef TARGET_SPARC64
1083 cmp
->c1
= tcg_temp_new();
1084 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1088 cmp
->c1
= cpu_cc_dst
;
1095 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1096 goto do_compare_dst_0
;
1098 case 7: /* overflow */
1099 case 15: /* !overflow */
1103 cmp
->cond
= subcc_cond
[cond
];
1104 cmp
->is_bool
= false;
1105 #ifdef TARGET_SPARC64
1107 /* Note that sign-extension works for unsigned compares as
1108 long as both operands are sign-extended. */
1109 cmp
->c1
= tcg_temp_new();
1110 cmp
->c2
= tcg_temp_new();
1111 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1112 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1116 cmp
->c1
= cpu_cc_src
;
1117 cmp
->c2
= cpu_cc_src2
;
1124 gen_helper_compute_psr(cpu_env
);
1125 dc
->cc_op
= CC_OP_FLAGS
;
1129 /* We're going to generate a boolean result. */
1130 cmp
->cond
= TCG_COND_NE
;
1131 cmp
->is_bool
= true;
1132 cmp
->c1
= r_dst
= tcg_temp_new();
1133 cmp
->c2
= tcg_constant_tl(0);
1137 gen_op_eval_bn(r_dst
);
1140 gen_op_eval_be(r_dst
, r_src
);
1143 gen_op_eval_ble(r_dst
, r_src
);
1146 gen_op_eval_bl(r_dst
, r_src
);
1149 gen_op_eval_bleu(r_dst
, r_src
);
1152 gen_op_eval_bcs(r_dst
, r_src
);
1155 gen_op_eval_bneg(r_dst
, r_src
);
1158 gen_op_eval_bvs(r_dst
, r_src
);
1161 gen_op_eval_ba(r_dst
);
1164 gen_op_eval_bne(r_dst
, r_src
);
1167 gen_op_eval_bg(r_dst
, r_src
);
1170 gen_op_eval_bge(r_dst
, r_src
);
1173 gen_op_eval_bgu(r_dst
, r_src
);
1176 gen_op_eval_bcc(r_dst
, r_src
);
1179 gen_op_eval_bpos(r_dst
, r_src
);
1182 gen_op_eval_bvc(r_dst
, r_src
);
1189 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1191 unsigned int offset
;
1194 /* For now we still generate a straight boolean result. */
1195 cmp
->cond
= TCG_COND_NE
;
1196 cmp
->is_bool
= true;
1197 cmp
->c1
= r_dst
= tcg_temp_new();
1198 cmp
->c2
= tcg_constant_tl(0);
1218 gen_op_eval_bn(r_dst
);
1221 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1224 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1227 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1230 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1233 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1236 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1239 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1242 gen_op_eval_ba(r_dst
);
1245 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1248 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1251 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1254 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1257 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1260 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1263 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1268 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1272 gen_compare(&cmp
, cc
, cond
, dc
);
1274 /* The interface is to return a boolean in r_dst. */
1276 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1278 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1282 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1285 gen_fcompare(&cmp
, cc
, cond
);
1287 /* The interface is to return a boolean in r_dst. */
1289 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1291 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1295 #ifdef TARGET_SPARC64
1297 static const int gen_tcg_cond_reg
[8] = {
1308 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1310 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1311 cmp
->is_bool
= false;
1313 cmp
->c2
= tcg_constant_tl(0);
1316 static void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1319 gen_compare_reg(&cmp
, cond
, r_src
);
1321 /* The interface is to return a boolean in r_dst. */
1322 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1326 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1328 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1329 target_ulong target
= dc
->pc
+ offset
;
1331 #ifdef TARGET_SPARC64
1332 if (unlikely(AM_CHECK(dc
))) {
1333 target
&= 0xffffffffULL
;
1337 /* unconditional not taken */
1339 dc
->pc
= dc
->npc
+ 4;
1340 dc
->npc
= dc
->pc
+ 4;
1343 dc
->npc
= dc
->pc
+ 4;
1345 } else if (cond
== 0x8) {
1346 /* unconditional taken */
1349 dc
->npc
= dc
->pc
+ 4;
1353 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1357 gen_cond(cpu_cond
, cc
, cond
, dc
);
1359 gen_branch_a(dc
, target
);
1361 gen_branch_n(dc
, target
);
1366 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1368 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1369 target_ulong target
= dc
->pc
+ offset
;
1371 #ifdef TARGET_SPARC64
1372 if (unlikely(AM_CHECK(dc
))) {
1373 target
&= 0xffffffffULL
;
1377 /* unconditional not taken */
1379 dc
->pc
= dc
->npc
+ 4;
1380 dc
->npc
= dc
->pc
+ 4;
1383 dc
->npc
= dc
->pc
+ 4;
1385 } else if (cond
== 0x8) {
1386 /* unconditional taken */
1389 dc
->npc
= dc
->pc
+ 4;
1393 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1397 gen_fcond(cpu_cond
, cc
, cond
);
1399 gen_branch_a(dc
, target
);
1401 gen_branch_n(dc
, target
);
1406 #ifdef TARGET_SPARC64
1407 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1410 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1411 target_ulong target
= dc
->pc
+ offset
;
1413 if (unlikely(AM_CHECK(dc
))) {
1414 target
&= 0xffffffffULL
;
1417 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1419 gen_branch_a(dc
, target
);
1421 gen_branch_n(dc
, target
);
1425 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1429 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1432 gen_helper_fcmps_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1435 gen_helper_fcmps_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1438 gen_helper_fcmps_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1443 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1447 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1450 gen_helper_fcmpd_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1453 gen_helper_fcmpd_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1456 gen_helper_fcmpd_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1461 static void gen_op_fcmpq(int fccno
)
1465 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1468 gen_helper_fcmpq_fcc1(cpu_fsr
, cpu_env
);
1471 gen_helper_fcmpq_fcc2(cpu_fsr
, cpu_env
);
1474 gen_helper_fcmpq_fcc3(cpu_fsr
, cpu_env
);
1479 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1483 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1486 gen_helper_fcmpes_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1489 gen_helper_fcmpes_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1492 gen_helper_fcmpes_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1497 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1501 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1504 gen_helper_fcmped_fcc1(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1507 gen_helper_fcmped_fcc2(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1510 gen_helper_fcmped_fcc3(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1515 static void gen_op_fcmpeq(int fccno
)
1519 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1522 gen_helper_fcmpeq_fcc1(cpu_fsr
, cpu_env
);
1525 gen_helper_fcmpeq_fcc2(cpu_fsr
, cpu_env
);
1528 gen_helper_fcmpeq_fcc3(cpu_fsr
, cpu_env
);
1535 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1537 gen_helper_fcmps(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1540 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1542 gen_helper_fcmpd(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1545 static void gen_op_fcmpq(int fccno
)
1547 gen_helper_fcmpq(cpu_fsr
, cpu_env
);
1550 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1552 gen_helper_fcmpes(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1555 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1557 gen_helper_fcmped(cpu_fsr
, cpu_env
, r_rs1
, r_rs2
);
1560 static void gen_op_fcmpeq(int fccno
)
1562 gen_helper_fcmpeq(cpu_fsr
, cpu_env
);
1566 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1568 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1569 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1570 gen_exception(dc
, TT_FP_EXCP
);
1573 static int gen_trap_ifnofpu(DisasContext
*dc
)
1575 #if !defined(CONFIG_USER_ONLY)
1576 if (!dc
->fpu_enabled
) {
1577 gen_exception(dc
, TT_NFPU_INSN
);
1584 static void gen_op_clear_ieee_excp_and_FTT(void)
1586 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1589 static void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1590 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1594 src
= gen_load_fpr_F(dc
, rs
);
1595 dst
= gen_dest_fpr_F(dc
);
1597 gen(dst
, cpu_env
, src
);
1598 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1600 gen_store_fpr_F(dc
, rd
, dst
);
1603 static void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1604 void (*gen
)(TCGv_i32
, TCGv_i32
))
1608 src
= gen_load_fpr_F(dc
, rs
);
1609 dst
= gen_dest_fpr_F(dc
);
1613 gen_store_fpr_F(dc
, rd
, dst
);
1616 static void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1617 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1619 TCGv_i32 dst
, src1
, src2
;
1621 src1
= gen_load_fpr_F(dc
, rs1
);
1622 src2
= gen_load_fpr_F(dc
, rs2
);
1623 dst
= gen_dest_fpr_F(dc
);
1625 gen(dst
, cpu_env
, src1
, src2
);
1626 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1628 gen_store_fpr_F(dc
, rd
, dst
);
1631 #ifdef TARGET_SPARC64
1632 static void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1633 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1635 TCGv_i32 dst
, src1
, src2
;
1637 src1
= gen_load_fpr_F(dc
, rs1
);
1638 src2
= gen_load_fpr_F(dc
, rs2
);
1639 dst
= gen_dest_fpr_F(dc
);
1641 gen(dst
, src1
, src2
);
1643 gen_store_fpr_F(dc
, rd
, dst
);
1647 static void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1648 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1652 src
= gen_load_fpr_D(dc
, rs
);
1653 dst
= gen_dest_fpr_D(dc
, rd
);
1655 gen(dst
, cpu_env
, src
);
1656 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1658 gen_store_fpr_D(dc
, rd
, dst
);
1661 #ifdef TARGET_SPARC64
1662 static void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1663 void (*gen
)(TCGv_i64
, TCGv_i64
))
1667 src
= gen_load_fpr_D(dc
, rs
);
1668 dst
= gen_dest_fpr_D(dc
, rd
);
1672 gen_store_fpr_D(dc
, rd
, dst
);
1676 static void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1677 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1679 TCGv_i64 dst
, src1
, src2
;
1681 src1
= gen_load_fpr_D(dc
, rs1
);
1682 src2
= gen_load_fpr_D(dc
, rs2
);
1683 dst
= gen_dest_fpr_D(dc
, rd
);
1685 gen(dst
, cpu_env
, src1
, src2
);
1686 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1688 gen_store_fpr_D(dc
, rd
, dst
);
1691 #ifdef TARGET_SPARC64
1692 static void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1693 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1695 TCGv_i64 dst
, src1
, src2
;
1697 src1
= gen_load_fpr_D(dc
, rs1
);
1698 src2
= gen_load_fpr_D(dc
, rs2
);
1699 dst
= gen_dest_fpr_D(dc
, rd
);
1701 gen(dst
, src1
, src2
);
1703 gen_store_fpr_D(dc
, rd
, dst
);
1706 static void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1707 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1709 TCGv_i64 dst
, src1
, src2
;
1711 src1
= gen_load_fpr_D(dc
, rs1
);
1712 src2
= gen_load_fpr_D(dc
, rs2
);
1713 dst
= gen_dest_fpr_D(dc
, rd
);
1715 gen(dst
, cpu_gsr
, src1
, src2
);
1717 gen_store_fpr_D(dc
, rd
, dst
);
1720 static void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1721 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1723 TCGv_i64 dst
, src0
, src1
, src2
;
1725 src1
= gen_load_fpr_D(dc
, rs1
);
1726 src2
= gen_load_fpr_D(dc
, rs2
);
1727 src0
= gen_load_fpr_D(dc
, rd
);
1728 dst
= gen_dest_fpr_D(dc
, rd
);
1730 gen(dst
, src0
, src1
, src2
);
1732 gen_store_fpr_D(dc
, rd
, dst
);
1736 static void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1737 void (*gen
)(TCGv_ptr
))
1739 gen_op_load_fpr_QT1(QFPREG(rs
));
1742 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1744 gen_op_store_QT0_fpr(QFPREG(rd
));
1745 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1748 #ifdef TARGET_SPARC64
1749 static void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1750 void (*gen
)(TCGv_ptr
))
1752 gen_op_load_fpr_QT1(QFPREG(rs
));
1756 gen_op_store_QT0_fpr(QFPREG(rd
));
1757 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1761 static void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1762 void (*gen
)(TCGv_ptr
))
1764 gen_op_load_fpr_QT0(QFPREG(rs1
));
1765 gen_op_load_fpr_QT1(QFPREG(rs2
));
1768 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1770 gen_op_store_QT0_fpr(QFPREG(rd
));
1771 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1774 static void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1775 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1778 TCGv_i32 src1
, src2
;
1780 src1
= gen_load_fpr_F(dc
, rs1
);
1781 src2
= gen_load_fpr_F(dc
, rs2
);
1782 dst
= gen_dest_fpr_D(dc
, rd
);
1784 gen(dst
, cpu_env
, src1
, src2
);
1785 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1787 gen_store_fpr_D(dc
, rd
, dst
);
1790 static void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1791 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1793 TCGv_i64 src1
, src2
;
1795 src1
= gen_load_fpr_D(dc
, rs1
);
1796 src2
= gen_load_fpr_D(dc
, rs2
);
1798 gen(cpu_env
, src1
, src2
);
1799 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1801 gen_op_store_QT0_fpr(QFPREG(rd
));
1802 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1805 #ifdef TARGET_SPARC64
1806 static void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1807 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1812 src
= gen_load_fpr_F(dc
, rs
);
1813 dst
= gen_dest_fpr_D(dc
, rd
);
1815 gen(dst
, cpu_env
, src
);
1816 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1818 gen_store_fpr_D(dc
, rd
, dst
);
1822 static void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1823 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1828 src
= gen_load_fpr_F(dc
, rs
);
1829 dst
= gen_dest_fpr_D(dc
, rd
);
1831 gen(dst
, cpu_env
, src
);
1833 gen_store_fpr_D(dc
, rd
, dst
);
1836 static void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1837 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1842 src
= gen_load_fpr_D(dc
, rs
);
1843 dst
= gen_dest_fpr_F(dc
);
1845 gen(dst
, cpu_env
, src
);
1846 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1848 gen_store_fpr_F(dc
, rd
, dst
);
1851 static void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1852 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1856 gen_op_load_fpr_QT1(QFPREG(rs
));
1857 dst
= gen_dest_fpr_F(dc
);
1860 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1862 gen_store_fpr_F(dc
, rd
, dst
);
1865 static void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1866 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1870 gen_op_load_fpr_QT1(QFPREG(rs
));
1871 dst
= gen_dest_fpr_D(dc
, rd
);
1874 gen_helper_check_ieee_exceptions(cpu_fsr
, cpu_env
);
1876 gen_store_fpr_D(dc
, rd
, dst
);
1879 static void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1880 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1884 src
= gen_load_fpr_F(dc
, rs
);
1888 gen_op_store_QT0_fpr(QFPREG(rd
));
1889 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1892 static void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1893 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1897 src
= gen_load_fpr_D(dc
, rs
);
1901 gen_op_store_QT0_fpr(QFPREG(rd
));
1902 gen_update_fprs_dirty(dc
, QFPREG(rd
));
1905 static void gen_swap(DisasContext
*dc
, TCGv dst
, TCGv src
,
1906 TCGv addr
, int mmu_idx
, MemOp memop
)
1908 gen_address_mask(dc
, addr
);
1909 tcg_gen_atomic_xchg_tl(dst
, addr
, src
, mmu_idx
, memop
| MO_ALIGN
);
1912 static void gen_ldstub(DisasContext
*dc
, TCGv dst
, TCGv addr
, int mmu_idx
)
1914 TCGv m1
= tcg_constant_tl(0xff);
1915 gen_address_mask(dc
, addr
);
1916 tcg_gen_atomic_xchg_tl(dst
, addr
, m1
, mmu_idx
, MO_UB
);
1920 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
1939 static DisasASI
get_asi(DisasContext
*dc
, int insn
, MemOp memop
)
1941 int asi
= GET_FIELD(insn
, 19, 26);
1942 ASIType type
= GET_ASI_HELPER
;
1943 int mem_idx
= dc
->mem_idx
;
1945 #ifndef TARGET_SPARC64
1946 /* Before v9, all asis are immediate and privileged. */
1948 gen_exception(dc
, TT_ILL_INSN
);
1949 type
= GET_ASI_EXCP
;
1950 } else if (supervisor(dc
)
1951 /* Note that LEON accepts ASI_USERDATA in user mode, for
1952 use with CASA. Also note that previous versions of
1953 QEMU allowed (and old versions of gcc emitted) ASI_P
1954 for LEON, which is incorrect. */
1955 || (asi
== ASI_USERDATA
1956 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1958 case ASI_USERDATA
: /* User data access */
1959 mem_idx
= MMU_USER_IDX
;
1960 type
= GET_ASI_DIRECT
;
1962 case ASI_KERNELDATA
: /* Supervisor data access */
1963 mem_idx
= MMU_KERNEL_IDX
;
1964 type
= GET_ASI_DIRECT
;
1966 case ASI_M_BYPASS
: /* MMU passthrough */
1967 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1968 mem_idx
= MMU_PHYS_IDX
;
1969 type
= GET_ASI_DIRECT
;
1971 case ASI_M_BCOPY
: /* Block copy, sta access */
1972 mem_idx
= MMU_KERNEL_IDX
;
1973 type
= GET_ASI_BCOPY
;
1975 case ASI_M_BFILL
: /* Block fill, stda access */
1976 mem_idx
= MMU_KERNEL_IDX
;
1977 type
= GET_ASI_BFILL
;
1981 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1982 * permissions check in get_physical_address(..).
1984 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1986 gen_exception(dc
, TT_PRIV_INSN
);
1987 type
= GET_ASI_EXCP
;
1993 /* With v9, all asis below 0x80 are privileged. */
1994 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1995 down that bit into DisasContext. For the moment that's ok,
1996 since the direct implementations below doesn't have any ASIs
1997 in the restricted [0x30, 0x7f] range, and the check will be
1998 done properly in the helper. */
1999 if (!supervisor(dc
) && asi
< 0x80) {
2000 gen_exception(dc
, TT_PRIV_ACT
);
2001 type
= GET_ASI_EXCP
;
2004 case ASI_REAL
: /* Bypass */
2005 case ASI_REAL_IO
: /* Bypass, non-cacheable */
2006 case ASI_REAL_L
: /* Bypass LE */
2007 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
2008 case ASI_TWINX_REAL
: /* Real address, twinx */
2009 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
2010 case ASI_QUAD_LDD_PHYS
:
2011 case ASI_QUAD_LDD_PHYS_L
:
2012 mem_idx
= MMU_PHYS_IDX
;
2014 case ASI_N
: /* Nucleus */
2015 case ASI_NL
: /* Nucleus LE */
2018 case ASI_NUCLEUS_QUAD_LDD
:
2019 case ASI_NUCLEUS_QUAD_LDD_L
:
2020 if (hypervisor(dc
)) {
2021 mem_idx
= MMU_PHYS_IDX
;
2023 mem_idx
= MMU_NUCLEUS_IDX
;
2026 case ASI_AIUP
: /* As if user primary */
2027 case ASI_AIUPL
: /* As if user primary LE */
2028 case ASI_TWINX_AIUP
:
2029 case ASI_TWINX_AIUP_L
:
2030 case ASI_BLK_AIUP_4V
:
2031 case ASI_BLK_AIUP_L_4V
:
2034 mem_idx
= MMU_USER_IDX
;
2036 case ASI_AIUS
: /* As if user secondary */
2037 case ASI_AIUSL
: /* As if user secondary LE */
2038 case ASI_TWINX_AIUS
:
2039 case ASI_TWINX_AIUS_L
:
2040 case ASI_BLK_AIUS_4V
:
2041 case ASI_BLK_AIUS_L_4V
:
2044 mem_idx
= MMU_USER_SECONDARY_IDX
;
2046 case ASI_S
: /* Secondary */
2047 case ASI_SL
: /* Secondary LE */
2050 case ASI_BLK_COMMIT_S
:
2057 if (mem_idx
== MMU_USER_IDX
) {
2058 mem_idx
= MMU_USER_SECONDARY_IDX
;
2059 } else if (mem_idx
== MMU_KERNEL_IDX
) {
2060 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
2063 case ASI_P
: /* Primary */
2064 case ASI_PL
: /* Primary LE */
2067 case ASI_BLK_COMMIT_P
:
2091 type
= GET_ASI_DIRECT
;
2093 case ASI_TWINX_REAL
:
2094 case ASI_TWINX_REAL_L
:
2097 case ASI_TWINX_AIUP
:
2098 case ASI_TWINX_AIUP_L
:
2099 case ASI_TWINX_AIUS
:
2100 case ASI_TWINX_AIUS_L
:
2105 case ASI_QUAD_LDD_PHYS
:
2106 case ASI_QUAD_LDD_PHYS_L
:
2107 case ASI_NUCLEUS_QUAD_LDD
:
2108 case ASI_NUCLEUS_QUAD_LDD_L
:
2109 type
= GET_ASI_DTWINX
;
2111 case ASI_BLK_COMMIT_P
:
2112 case ASI_BLK_COMMIT_S
:
2113 case ASI_BLK_AIUP_4V
:
2114 case ASI_BLK_AIUP_L_4V
:
2117 case ASI_BLK_AIUS_4V
:
2118 case ASI_BLK_AIUS_L_4V
:
2125 type
= GET_ASI_BLOCK
;
2132 type
= GET_ASI_SHORT
;
2139 type
= GET_ASI_SHORT
;
2142 /* The little-endian asis all have bit 3 set. */
2149 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
2152 static void gen_ld_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
,
2153 int insn
, MemOp memop
)
2155 DisasASI da
= get_asi(dc
, insn
, memop
);
2160 case GET_ASI_DTWINX
: /* Reserved for ldda. */
2161 gen_exception(dc
, TT_ILL_INSN
);
2163 case GET_ASI_DIRECT
:
2164 gen_address_mask(dc
, addr
);
2165 tcg_gen_qemu_ld_tl(dst
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2169 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2170 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2173 #ifdef TARGET_SPARC64
2174 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_mop
);
2177 TCGv_i64 t64
= tcg_temp_new_i64();
2178 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2179 tcg_gen_trunc_i64_tl(dst
, t64
);
2187 static void gen_st_asi(DisasContext
*dc
, TCGv src
, TCGv addr
,
2188 int insn
, MemOp memop
)
2190 DisasASI da
= get_asi(dc
, insn
, memop
);
2195 case GET_ASI_DTWINX
: /* Reserved for stda. */
2196 #ifndef TARGET_SPARC64
2197 gen_exception(dc
, TT_ILL_INSN
);
2200 if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
2201 /* Pre OpenSPARC CPUs don't have these */
2202 gen_exception(dc
, TT_ILL_INSN
);
2205 /* in OpenSPARC T1+ CPUs TWINX ASIs in store instructions
2206 * are ST_BLKINIT_ ASIs */
2209 case GET_ASI_DIRECT
:
2210 gen_address_mask(dc
, addr
);
2211 tcg_gen_qemu_st_tl(src
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2213 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
2215 /* Copy 32 bytes from the address in SRC to ADDR. */
2216 /* ??? The original qemu code suggests 4-byte alignment, dropping
2217 the low bits, but the only place I can see this used is in the
2218 Linux kernel with 32 byte alignment, which would make more sense
2219 as a cacheline-style operation. */
2221 TCGv saddr
= tcg_temp_new();
2222 TCGv daddr
= tcg_temp_new();
2223 TCGv four
= tcg_constant_tl(4);
2224 TCGv_i32 tmp
= tcg_temp_new_i32();
2227 tcg_gen_andi_tl(saddr
, src
, -4);
2228 tcg_gen_andi_tl(daddr
, addr
, -4);
2229 for (i
= 0; i
< 32; i
+= 4) {
2230 /* Since the loads and stores are paired, allow the
2231 copy to happen in the host endianness. */
2232 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
.mem_idx
, MO_UL
);
2233 tcg_gen_qemu_st_i32(tmp
, daddr
, da
.mem_idx
, MO_UL
);
2234 tcg_gen_add_tl(saddr
, saddr
, four
);
2235 tcg_gen_add_tl(daddr
, daddr
, four
);
2242 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2243 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
2246 #ifdef TARGET_SPARC64
2247 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_mop
);
2250 TCGv_i64 t64
= tcg_temp_new_i64();
2251 tcg_gen_extu_tl_i64(t64
, src
);
2252 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2256 /* A write to a TLB register may alter page maps. End the TB. */
2257 dc
->npc
= DYNAMIC_PC
;
2263 static void gen_swap_asi(DisasContext
*dc
, TCGv dst
, TCGv src
,
2264 TCGv addr
, int insn
)
2266 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2271 case GET_ASI_DIRECT
:
2272 gen_swap(dc
, dst
, src
, addr
, da
.mem_idx
, da
.memop
);
2275 /* ??? Should be DAE_invalid_asi. */
2276 gen_exception(dc
, TT_DATA_ACCESS
);
2281 static void gen_cas_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2284 DisasASI da
= get_asi(dc
, insn
, MO_TEUL
);
2290 case GET_ASI_DIRECT
:
2291 oldv
= tcg_temp_new();
2292 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2293 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2294 gen_store_gpr(dc
, rd
, oldv
);
2297 /* ??? Should be DAE_invalid_asi. */
2298 gen_exception(dc
, TT_DATA_ACCESS
);
2303 static void gen_ldstub_asi(DisasContext
*dc
, TCGv dst
, TCGv addr
, int insn
)
2305 DisasASI da
= get_asi(dc
, insn
, MO_UB
);
2310 case GET_ASI_DIRECT
:
2311 gen_ldstub(dc
, dst
, addr
, da
.mem_idx
);
2314 /* ??? In theory, this should be raise DAE_invalid_asi.
2315 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
2316 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
2317 gen_helper_exit_atomic(cpu_env
);
2319 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2320 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
2324 t64
= tcg_temp_new_i64();
2325 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2327 s64
= tcg_constant_i64(0xff);
2328 gen_helper_st_asi(cpu_env
, addr
, s64
, r_asi
, r_mop
);
2330 tcg_gen_trunc_i64_tl(dst
, t64
);
2333 dc
->npc
= DYNAMIC_PC
;
2340 #ifdef TARGET_SPARC64
2341 static void gen_ldf_asi(DisasContext
*dc
, TCGv addr
,
2342 int insn
, int size
, int rd
)
2344 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2352 case GET_ASI_DIRECT
:
2353 gen_address_mask(dc
, addr
);
2356 d32
= gen_dest_fpr_F(dc
);
2357 tcg_gen_qemu_ld_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2358 gen_store_fpr_F(dc
, rd
, d32
);
2361 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2362 da
.memop
| MO_ALIGN_4
);
2365 d64
= tcg_temp_new_i64();
2366 tcg_gen_qemu_ld_i64(d64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_4
);
2367 tcg_gen_addi_tl(addr
, addr
, 8);
2368 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
,
2369 da
.memop
| MO_ALIGN_4
);
2370 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2373 g_assert_not_reached();
2378 /* Valid for lddfa on aligned registers only. */
2379 if (size
== 8 && (rd
& 7) == 0) {
2384 gen_address_mask(dc
, addr
);
2386 /* The first operation checks required alignment. */
2387 memop
= da
.memop
| MO_ALIGN_64
;
2388 eight
= tcg_constant_tl(8);
2389 for (i
= 0; ; ++i
) {
2390 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2395 tcg_gen_add_tl(addr
, addr
, eight
);
2399 gen_exception(dc
, TT_ILL_INSN
);
2404 /* Valid for lddfa only. */
2406 gen_address_mask(dc
, addr
);
2407 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2408 da
.memop
| MO_ALIGN
);
2410 gen_exception(dc
, TT_ILL_INSN
);
2416 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2417 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
| MO_ALIGN
);
2420 /* According to the table in the UA2011 manual, the only
2421 other asis that are valid for ldfa/lddfa/ldqfa are
2422 the NO_FAULT asis. We still need a helper for these,
2423 but we can just use the integer asi helper for them. */
2426 d64
= tcg_temp_new_i64();
2427 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2428 d32
= gen_dest_fpr_F(dc
);
2429 tcg_gen_extrl_i64_i32(d32
, d64
);
2430 gen_store_fpr_F(dc
, rd
, d32
);
2433 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], cpu_env
, addr
, r_asi
, r_mop
);
2436 d64
= tcg_temp_new_i64();
2437 gen_helper_ld_asi(d64
, cpu_env
, addr
, r_asi
, r_mop
);
2438 tcg_gen_addi_tl(addr
, addr
, 8);
2439 gen_helper_ld_asi(cpu_fpr
[rd
/2+1], cpu_env
, addr
, r_asi
, r_mop
);
2440 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
2443 g_assert_not_reached();
2450 static void gen_stf_asi(DisasContext
*dc
, TCGv addr
,
2451 int insn
, int size
, int rd
)
2453 DisasASI da
= get_asi(dc
, insn
, (size
== 4 ? MO_TEUL
: MO_TEUQ
));
2460 case GET_ASI_DIRECT
:
2461 gen_address_mask(dc
, addr
);
2464 d32
= gen_load_fpr_F(dc
, rd
);
2465 tcg_gen_qemu_st_i32(d32
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2468 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2469 da
.memop
| MO_ALIGN_4
);
2472 /* Only 4-byte alignment required. However, it is legal for the
2473 cpu to signal the alignment fault, and the OS trap handler is
2474 required to fix it up. Requiring 16-byte alignment here avoids
2475 having to probe the second page before performing the first
2477 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2478 da
.memop
| MO_ALIGN_16
);
2479 tcg_gen_addi_tl(addr
, addr
, 8);
2480 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/2+1], addr
, da
.mem_idx
, da
.memop
);
2483 g_assert_not_reached();
2488 /* Valid for stdfa on aligned registers only. */
2489 if (size
== 8 && (rd
& 7) == 0) {
2494 gen_address_mask(dc
, addr
);
2496 /* The first operation checks required alignment. */
2497 memop
= da
.memop
| MO_ALIGN_64
;
2498 eight
= tcg_constant_tl(8);
2499 for (i
= 0; ; ++i
) {
2500 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
,
2505 tcg_gen_add_tl(addr
, addr
, eight
);
2509 gen_exception(dc
, TT_ILL_INSN
);
2514 /* Valid for stdfa only. */
2516 gen_address_mask(dc
, addr
);
2517 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
.mem_idx
,
2518 da
.memop
| MO_ALIGN
);
2520 gen_exception(dc
, TT_ILL_INSN
);
2525 /* According to the table in the UA2011 manual, the only
2526 other asis that are valid for ldfa/lddfa/ldqfa are
2527 the PST* asis, which aren't currently handled. */
2528 gen_exception(dc
, TT_ILL_INSN
);
2533 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2535 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2536 TCGv_i64 hi
= gen_dest_gpr(dc
, rd
);
2537 TCGv_i64 lo
= gen_dest_gpr(dc
, rd
+ 1);
2543 case GET_ASI_DTWINX
:
2544 gen_address_mask(dc
, addr
);
2545 tcg_gen_qemu_ld_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2546 tcg_gen_addi_tl(addr
, addr
, 8);
2547 tcg_gen_qemu_ld_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2550 case GET_ASI_DIRECT
:
2552 TCGv_i64 tmp
= tcg_temp_new_i64();
2554 gen_address_mask(dc
, addr
);
2555 tcg_gen_qemu_ld_i64(tmp
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2557 /* Note that LE ldda acts as if each 32-bit register
2558 result is byte swapped. Having just performed one
2559 64-bit bswap, we need now to swap the writebacks. */
2560 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2561 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2563 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2569 /* ??? In theory we've handled all of the ASIs that are valid
2570 for ldda, and this should raise DAE_invalid_asi. However,
2571 real hardware allows others. This can be seen with e.g.
2572 FreeBSD 10.3 wrt ASI_IC_TAG. */
2574 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2575 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2576 TCGv_i64 tmp
= tcg_temp_new_i64();
2579 gen_helper_ld_asi(tmp
, cpu_env
, addr
, r_asi
, r_mop
);
2582 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2583 tcg_gen_extr32_i64(lo
, hi
, tmp
);
2585 tcg_gen_extr32_i64(hi
, lo
, tmp
);
2591 gen_store_gpr(dc
, rd
, hi
);
2592 gen_store_gpr(dc
, rd
+ 1, lo
);
2595 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2598 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2599 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2605 case GET_ASI_DTWINX
:
2606 gen_address_mask(dc
, addr
);
2607 tcg_gen_qemu_st_i64(hi
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN_16
);
2608 tcg_gen_addi_tl(addr
, addr
, 8);
2609 tcg_gen_qemu_st_i64(lo
, addr
, da
.mem_idx
, da
.memop
);
2612 case GET_ASI_DIRECT
:
2614 TCGv_i64 t64
= tcg_temp_new_i64();
2616 /* Note that LE stda acts as if each 32-bit register result is
2617 byte swapped. We will perform one 64-bit LE store, so now
2618 we must swap the order of the construction. */
2619 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2620 tcg_gen_concat32_i64(t64
, lo
, hi
);
2622 tcg_gen_concat32_i64(t64
, hi
, lo
);
2624 gen_address_mask(dc
, addr
);
2625 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2630 /* ??? In theory we've handled all of the ASIs that are valid
2631 for stda, and this should raise DAE_invalid_asi. */
2633 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2634 TCGv_i32 r_mop
= tcg_constant_i32(da
.memop
);
2635 TCGv_i64 t64
= tcg_temp_new_i64();
2638 if ((da
.memop
& MO_BSWAP
) == MO_TE
) {
2639 tcg_gen_concat32_i64(t64
, lo
, hi
);
2641 tcg_gen_concat32_i64(t64
, hi
, lo
);
2645 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2651 static void gen_casx_asi(DisasContext
*dc
, TCGv addr
, TCGv cmpv
,
2654 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2660 case GET_ASI_DIRECT
:
2661 oldv
= tcg_temp_new();
2662 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, gen_load_gpr(dc
, rd
),
2663 da
.mem_idx
, da
.memop
| MO_ALIGN
);
2664 gen_store_gpr(dc
, rd
, oldv
);
2667 /* ??? Should be DAE_invalid_asi. */
2668 gen_exception(dc
, TT_DATA_ACCESS
);
2673 #elif !defined(CONFIG_USER_ONLY)
2674 static void gen_ldda_asi(DisasContext
*dc
, TCGv addr
, int insn
, int rd
)
2676 /* ??? Work around an apparent bug in Ubuntu gcc 4.8.2-10ubuntu2+12,
2677 whereby "rd + 1" elicits "error: array subscript is above array".
2678 Since we have already asserted that rd is even, the semantics
2680 TCGv lo
= gen_dest_gpr(dc
, rd
| 1);
2681 TCGv hi
= gen_dest_gpr(dc
, rd
);
2682 TCGv_i64 t64
= tcg_temp_new_i64();
2683 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2688 case GET_ASI_DIRECT
:
2689 gen_address_mask(dc
, addr
);
2690 tcg_gen_qemu_ld_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2694 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2695 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2698 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_mop
);
2703 tcg_gen_extr_i64_i32(lo
, hi
, t64
);
2704 gen_store_gpr(dc
, rd
| 1, lo
);
2705 gen_store_gpr(dc
, rd
, hi
);
2708 static void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2711 DisasASI da
= get_asi(dc
, insn
, MO_TEUQ
);
2712 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2713 TCGv_i64 t64
= tcg_temp_new_i64();
2715 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2720 case GET_ASI_DIRECT
:
2721 gen_address_mask(dc
, addr
);
2722 tcg_gen_qemu_st_i64(t64
, addr
, da
.mem_idx
, da
.memop
| MO_ALIGN
);
2725 /* Store 32 bytes of T64 to ADDR. */
2726 /* ??? The original qemu code suggests 8-byte alignment, dropping
2727 the low bits, but the only place I can see this used is in the
2728 Linux kernel with 32 byte alignment, which would make more sense
2729 as a cacheline-style operation. */
2731 TCGv d_addr
= tcg_temp_new();
2732 TCGv eight
= tcg_constant_tl(8);
2735 tcg_gen_andi_tl(d_addr
, addr
, -8);
2736 for (i
= 0; i
< 32; i
+= 8) {
2737 tcg_gen_qemu_st_i64(t64
, d_addr
, da
.mem_idx
, da
.memop
);
2738 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2744 TCGv_i32 r_asi
= tcg_constant_i32(da
.asi
);
2745 TCGv_i32 r_mop
= tcg_constant_i32(MO_UQ
);
2748 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_mop
);
2755 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2757 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2758 return gen_load_gpr(dc
, rs1
);
2761 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2763 if (IS_IMM
) { /* immediate */
2764 target_long simm
= GET_FIELDs(insn
, 19, 31);
2765 TCGv t
= tcg_temp_new();
2766 tcg_gen_movi_tl(t
, simm
);
2768 } else { /* register */
2769 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2770 return gen_load_gpr(dc
, rs2
);
2774 #ifdef TARGET_SPARC64
2775 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2777 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2779 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2780 or fold the comparison down to 32 bits and use movcond_i32. Choose
2782 c32
= tcg_temp_new_i32();
2784 tcg_gen_extrl_i64_i32(c32
, cmp
->c1
);
2786 TCGv_i64 c64
= tcg_temp_new_i64();
2787 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2788 tcg_gen_extrl_i64_i32(c32
, c64
);
2791 s1
= gen_load_fpr_F(dc
, rs
);
2792 s2
= gen_load_fpr_F(dc
, rd
);
2793 dst
= gen_dest_fpr_F(dc
);
2794 zero
= tcg_constant_i32(0);
2796 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2798 gen_store_fpr_F(dc
, rd
, dst
);
2801 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2803 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2804 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2805 gen_load_fpr_D(dc
, rs
),
2806 gen_load_fpr_D(dc
, rd
));
2807 gen_store_fpr_D(dc
, rd
, dst
);
2810 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2812 int qd
= QFPREG(rd
);
2813 int qs
= QFPREG(rs
);
2815 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2816 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2817 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2818 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2820 gen_update_fprs_dirty(dc
, qd
);
2823 #ifndef CONFIG_USER_ONLY
2824 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_env cpu_env
)
2826 TCGv_i32 r_tl
= tcg_temp_new_i32();
2828 /* load env->tl into r_tl */
2829 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2831 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2832 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2834 /* calculate offset to current trap state from env->ts, reuse r_tl */
2835 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2836 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2838 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2840 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2841 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2842 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2847 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2848 int width
, bool cc
, bool left
)
2851 uint64_t amask
, tabl
, tabr
;
2852 int shift
, imask
, omask
;
2855 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2856 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2857 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2858 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2859 dc
->cc_op
= CC_OP_SUB
;
2862 /* Theory of operation: there are two tables, left and right (not to
2863 be confused with the left and right versions of the opcode). These
2864 are indexed by the low 3 bits of the inputs. To make things "easy",
2865 these tables are loaded into two constants, TABL and TABR below.
2866 The operation index = (input & imask) << shift calculates the index
2867 into the constant, while val = (table >> index) & omask calculates
2868 the value we're looking for. */
2875 tabl
= 0x80c0e0f0f8fcfeffULL
;
2876 tabr
= 0xff7f3f1f0f070301ULL
;
2878 tabl
= 0x0103070f1f3f7fffULL
;
2879 tabr
= 0xfffefcf8f0e0c080ULL
;
2899 tabl
= (2 << 2) | 3;
2900 tabr
= (3 << 2) | 1;
2902 tabl
= (1 << 2) | 3;
2903 tabr
= (3 << 2) | 2;
2910 lo1
= tcg_temp_new();
2911 lo2
= tcg_temp_new();
2912 tcg_gen_andi_tl(lo1
, s1
, imask
);
2913 tcg_gen_andi_tl(lo2
, s2
, imask
);
2914 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2915 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2917 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
2918 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
2919 tcg_gen_andi_tl(lo1
, lo1
, omask
);
2920 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2924 amask
&= 0xffffffffULL
;
2926 tcg_gen_andi_tl(s1
, s1
, amask
);
2927 tcg_gen_andi_tl(s2
, s2
, amask
);
2929 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
2930 tcg_gen_and_tl(lo2
, lo2
, lo1
);
2931 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
2934 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2936 TCGv tmp
= tcg_temp_new();
2938 tcg_gen_add_tl(tmp
, s1
, s2
);
2939 tcg_gen_andi_tl(dst
, tmp
, -8);
2941 tcg_gen_neg_tl(tmp
, tmp
);
2943 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2946 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2950 t1
= tcg_temp_new();
2951 t2
= tcg_temp_new();
2952 shift
= tcg_temp_new();
2954 tcg_gen_andi_tl(shift
, gsr
, 7);
2955 tcg_gen_shli_tl(shift
, shift
, 3);
2956 tcg_gen_shl_tl(t1
, s1
, shift
);
2958 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2959 shift of (up to 63) followed by a constant shift of 1. */
2960 tcg_gen_xori_tl(shift
, shift
, 63);
2961 tcg_gen_shr_tl(t2
, s2
, shift
);
2962 tcg_gen_shri_tl(t2
, t2
, 1);
2964 tcg_gen_or_tl(dst
, t1
, t2
);
2968 #define CHECK_IU_FEATURE(dc, FEATURE) \
2969 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2971 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2972 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2975 /* before an instruction, dc->pc must be static */
2976 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2978 unsigned int opc
, rs1
, rs2
, rd
;
2979 TCGv cpu_src1
, cpu_src2
;
2980 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2981 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2984 opc
= GET_FIELD(insn
, 0, 1);
2985 rd
= GET_FIELD(insn
, 2, 6);
2988 case 0: /* branches/sethi */
2990 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2993 #ifdef TARGET_SPARC64
2994 case 0x1: /* V9 BPcc */
2998 target
= GET_FIELD_SP(insn
, 0, 18);
2999 target
= sign_extend(target
, 19);
3001 cc
= GET_FIELD_SP(insn
, 20, 21);
3003 do_branch(dc
, target
, insn
, 0);
3005 do_branch(dc
, target
, insn
, 1);
3010 case 0x3: /* V9 BPr */
3012 target
= GET_FIELD_SP(insn
, 0, 13) |
3013 (GET_FIELD_SP(insn
, 20, 21) << 14);
3014 target
= sign_extend(target
, 16);
3016 cpu_src1
= get_src1(dc
, insn
);
3017 do_branch_reg(dc
, target
, insn
, cpu_src1
);
3020 case 0x5: /* V9 FBPcc */
3022 int cc
= GET_FIELD_SP(insn
, 20, 21);
3023 if (gen_trap_ifnofpu(dc
)) {
3026 target
= GET_FIELD_SP(insn
, 0, 18);
3027 target
= sign_extend(target
, 19);
3029 do_fbranch(dc
, target
, insn
, cc
);
3033 case 0x7: /* CBN+x */
3038 case 0x2: /* BN+x */
3040 target
= GET_FIELD(insn
, 10, 31);
3041 target
= sign_extend(target
, 22);
3043 do_branch(dc
, target
, insn
, 0);
3046 case 0x6: /* FBN+x */
3048 if (gen_trap_ifnofpu(dc
)) {
3051 target
= GET_FIELD(insn
, 10, 31);
3052 target
= sign_extend(target
, 22);
3054 do_fbranch(dc
, target
, insn
, 0);
3057 case 0x4: /* SETHI */
3058 /* Special-case %g0 because that's the canonical nop. */
3060 uint32_t value
= GET_FIELD(insn
, 10, 31);
3061 TCGv t
= gen_dest_gpr(dc
, rd
);
3062 tcg_gen_movi_tl(t
, value
<< 10);
3063 gen_store_gpr(dc
, rd
, t
);
3066 case 0x0: /* UNIMPL */
3075 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
3076 TCGv o7
= gen_dest_gpr(dc
, 15);
3078 tcg_gen_movi_tl(o7
, dc
->pc
);
3079 gen_store_gpr(dc
, 15, o7
);
3082 #ifdef TARGET_SPARC64
3083 if (unlikely(AM_CHECK(dc
))) {
3084 target
&= 0xffffffffULL
;
3090 case 2: /* FPU & Logical Operations */
3092 unsigned int xop
= GET_FIELD(insn
, 7, 12);
3093 TCGv cpu_dst
= tcg_temp_new();
3096 if (xop
== 0x3a) { /* generate trap */
3097 int cond
= GET_FIELD(insn
, 3, 6);
3099 TCGLabel
*l1
= NULL
;
3110 /* Conditional trap. */
3112 #ifdef TARGET_SPARC64
3114 int cc
= GET_FIELD_SP(insn
, 11, 12);
3116 gen_compare(&cmp
, 0, cond
, dc
);
3117 } else if (cc
== 2) {
3118 gen_compare(&cmp
, 1, cond
, dc
);
3123 gen_compare(&cmp
, 0, cond
, dc
);
3125 l1
= gen_new_label();
3126 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
3127 cmp
.c1
, cmp
.c2
, l1
);
3130 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
3131 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
3133 /* Don't use the normal temporaries, as they may well have
3134 gone out of scope with the branch above. While we're
3135 doing that we might as well pre-truncate to 32-bit. */
3136 trap
= tcg_temp_new_i32();
3138 rs1
= GET_FIELD_SP(insn
, 14, 18);
3140 rs2
= GET_FIELD_SP(insn
, 0, 7);
3142 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
3143 /* Signal that the trap value is fully constant. */
3146 TCGv t1
= gen_load_gpr(dc
, rs1
);
3147 tcg_gen_trunc_tl_i32(trap
, t1
);
3148 tcg_gen_addi_i32(trap
, trap
, rs2
);
3152 rs2
= GET_FIELD_SP(insn
, 0, 4);
3153 t1
= gen_load_gpr(dc
, rs1
);
3154 t2
= gen_load_gpr(dc
, rs2
);
3155 tcg_gen_add_tl(t1
, t1
, t2
);
3156 tcg_gen_trunc_tl_i32(trap
, t1
);
3159 tcg_gen_andi_i32(trap
, trap
, mask
);
3160 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
3163 gen_helper_raise_exception(cpu_env
, trap
);
3166 /* An unconditional trap ends the TB. */
3167 dc
->base
.is_jmp
= DISAS_NORETURN
;
3170 /* A conditional trap falls through to the next insn. */
3174 } else if (xop
== 0x28) {
3175 rs1
= GET_FIELD(insn
, 13, 17);
3178 #ifndef TARGET_SPARC64
3179 case 0x01 ... 0x0e: /* undefined in the SPARCv8
3180 manual, rdy on the microSPARC
3182 case 0x0f: /* stbar in the SPARCv8 manual,
3183 rdy on the microSPARC II */
3184 case 0x10 ... 0x1f: /* implementation-dependent in the
3185 SPARCv8 manual, rdy on the
3188 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
3189 TCGv t
= gen_dest_gpr(dc
, rd
);
3190 /* Read Asr17 for a Leon3 monoprocessor */
3191 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
3192 gen_store_gpr(dc
, rd
, t
);
3196 gen_store_gpr(dc
, rd
, cpu_y
);
3198 #ifdef TARGET_SPARC64
3199 case 0x2: /* V9 rdccr */
3201 gen_helper_rdccr(cpu_dst
, cpu_env
);
3202 gen_store_gpr(dc
, rd
, cpu_dst
);
3204 case 0x3: /* V9 rdasi */
3205 tcg_gen_movi_tl(cpu_dst
, dc
->asi
);
3206 gen_store_gpr(dc
, rd
, cpu_dst
);
3208 case 0x4: /* V9 rdtick */
3213 r_tickptr
= tcg_temp_new_ptr();
3214 r_const
= tcg_constant_i32(dc
->mem_idx
);
3215 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3216 offsetof(CPUSPARCState
, tick
));
3217 if (translator_io_start(&dc
->base
)) {
3218 dc
->base
.is_jmp
= DISAS_EXIT
;
3220 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3222 gen_store_gpr(dc
, rd
, cpu_dst
);
3225 case 0x5: /* V9 rdpc */
3227 TCGv t
= gen_dest_gpr(dc
, rd
);
3228 if (unlikely(AM_CHECK(dc
))) {
3229 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
3231 tcg_gen_movi_tl(t
, dc
->pc
);
3233 gen_store_gpr(dc
, rd
, t
);
3236 case 0x6: /* V9 rdfprs */
3237 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
3238 gen_store_gpr(dc
, rd
, cpu_dst
);
3240 case 0xf: /* V9 membar */
3241 break; /* no effect */
3242 case 0x13: /* Graphics Status */
3243 if (gen_trap_ifnofpu(dc
)) {
3246 gen_store_gpr(dc
, rd
, cpu_gsr
);
3248 case 0x16: /* Softint */
3249 tcg_gen_ld32s_tl(cpu_dst
, cpu_env
,
3250 offsetof(CPUSPARCState
, softint
));
3251 gen_store_gpr(dc
, rd
, cpu_dst
);
3253 case 0x17: /* Tick compare */
3254 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
3256 case 0x18: /* System tick */
3261 r_tickptr
= tcg_temp_new_ptr();
3262 r_const
= tcg_constant_i32(dc
->mem_idx
);
3263 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3264 offsetof(CPUSPARCState
, stick
));
3265 if (translator_io_start(&dc
->base
)) {
3266 dc
->base
.is_jmp
= DISAS_EXIT
;
3268 gen_helper_tick_get_count(cpu_dst
, cpu_env
, r_tickptr
,
3270 gen_store_gpr(dc
, rd
, cpu_dst
);
3273 case 0x19: /* System tick compare */
3274 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
3276 case 0x1a: /* UltraSPARC-T1 Strand status */
3277 /* XXX HYPV check maybe not enough, UA2005 & UA2007 describe
3278 * this ASR as impl. dep
3280 CHECK_IU_FEATURE(dc
, HYPV
);
3282 TCGv t
= gen_dest_gpr(dc
, rd
);
3283 tcg_gen_movi_tl(t
, 1UL);
3284 gen_store_gpr(dc
, rd
, t
);
3287 case 0x10: /* Performance Control */
3288 case 0x11: /* Performance Instrumentation Counter */
3289 case 0x12: /* Dispatch Control */
3290 case 0x14: /* Softint set, WO */
3291 case 0x15: /* Softint clear, WO */
3296 #if !defined(CONFIG_USER_ONLY)
3297 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
3298 #ifndef TARGET_SPARC64
3299 if (!supervisor(dc
)) {
3303 gen_helper_rdpsr(cpu_dst
, cpu_env
);
3305 CHECK_IU_FEATURE(dc
, HYPV
);
3306 if (!hypervisor(dc
))
3308 rs1
= GET_FIELD(insn
, 13, 17);
3311 tcg_gen_ld_i64(cpu_dst
, cpu_env
,
3312 offsetof(CPUSPARCState
, hpstate
));
3315 // gen_op_rdhtstate();
3318 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
3321 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
3324 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
3326 case 31: // hstick_cmpr
3327 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
3333 gen_store_gpr(dc
, rd
, cpu_dst
);
3335 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
3336 if (!supervisor(dc
)) {
3339 cpu_tmp0
= tcg_temp_new();
3340 #ifdef TARGET_SPARC64
3341 rs1
= GET_FIELD(insn
, 13, 17);
3347 r_tsptr
= tcg_temp_new_ptr();
3348 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3349 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3350 offsetof(trap_state
, tpc
));
3357 r_tsptr
= tcg_temp_new_ptr();
3358 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3359 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3360 offsetof(trap_state
, tnpc
));
3367 r_tsptr
= tcg_temp_new_ptr();
3368 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3369 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
3370 offsetof(trap_state
, tstate
));
3375 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3377 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3378 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
3379 offsetof(trap_state
, tt
));
3387 r_tickptr
= tcg_temp_new_ptr();
3388 r_const
= tcg_constant_i32(dc
->mem_idx
);
3389 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3390 offsetof(CPUSPARCState
, tick
));
3391 if (translator_io_start(&dc
->base
)) {
3392 dc
->base
.is_jmp
= DISAS_EXIT
;
3394 gen_helper_tick_get_count(cpu_tmp0
, cpu_env
,
3395 r_tickptr
, r_const
);
3399 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
3402 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3403 offsetof(CPUSPARCState
, pstate
));
3406 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3407 offsetof(CPUSPARCState
, tl
));
3410 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3411 offsetof(CPUSPARCState
, psrpil
));
3414 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
3417 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3418 offsetof(CPUSPARCState
, cansave
));
3420 case 11: // canrestore
3421 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3422 offsetof(CPUSPARCState
, canrestore
));
3424 case 12: // cleanwin
3425 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3426 offsetof(CPUSPARCState
, cleanwin
));
3428 case 13: // otherwin
3429 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3430 offsetof(CPUSPARCState
, otherwin
));
3433 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3434 offsetof(CPUSPARCState
, wstate
));
3436 case 16: // UA2005 gl
3437 CHECK_IU_FEATURE(dc
, GL
);
3438 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
3439 offsetof(CPUSPARCState
, gl
));
3441 case 26: // UA2005 strand status
3442 CHECK_IU_FEATURE(dc
, HYPV
);
3443 if (!hypervisor(dc
))
3445 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
3448 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
3455 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
3457 gen_store_gpr(dc
, rd
, cpu_tmp0
);
3460 #if defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)
3461 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
3462 #ifdef TARGET_SPARC64
3463 gen_helper_flushw(cpu_env
);
3465 if (!supervisor(dc
))
3467 gen_store_gpr(dc
, rd
, cpu_tbr
);
3471 } else if (xop
== 0x34) { /* FPU Operations */
3472 if (gen_trap_ifnofpu(dc
)) {
3475 gen_op_clear_ieee_excp_and_FTT();
3476 rs1
= GET_FIELD(insn
, 13, 17);
3477 rs2
= GET_FIELD(insn
, 27, 31);
3478 xop
= GET_FIELD(insn
, 18, 26);
3481 case 0x1: /* fmovs */
3482 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3483 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3485 case 0x5: /* fnegs */
3486 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
3488 case 0x9: /* fabss */
3489 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
3491 case 0x29: /* fsqrts */
3492 CHECK_FPU_FEATURE(dc
, FSQRT
);
3493 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
3495 case 0x2a: /* fsqrtd */
3496 CHECK_FPU_FEATURE(dc
, FSQRT
);
3497 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3499 case 0x2b: /* fsqrtq */
3500 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3501 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3503 case 0x41: /* fadds */
3504 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3506 case 0x42: /* faddd */
3507 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3509 case 0x43: /* faddq */
3510 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3511 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3513 case 0x45: /* fsubs */
3514 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3516 case 0x46: /* fsubd */
3517 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3519 case 0x47: /* fsubq */
3520 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3521 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3523 case 0x49: /* fmuls */
3524 CHECK_FPU_FEATURE(dc
, FMUL
);
3525 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3527 case 0x4a: /* fmuld */
3528 CHECK_FPU_FEATURE(dc
, FMUL
);
3529 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3531 case 0x4b: /* fmulq */
3532 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3533 CHECK_FPU_FEATURE(dc
, FMUL
);
3534 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3536 case 0x4d: /* fdivs */
3537 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3539 case 0x4e: /* fdivd */
3540 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3542 case 0x4f: /* fdivq */
3543 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3544 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3546 case 0x69: /* fsmuld */
3547 CHECK_FPU_FEATURE(dc
, FSMULD
);
3548 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3550 case 0x6e: /* fdmulq */
3551 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3552 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3554 case 0xc4: /* fitos */
3555 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3557 case 0xc6: /* fdtos */
3558 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3560 case 0xc7: /* fqtos */
3561 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3562 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3564 case 0xc8: /* fitod */
3565 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3567 case 0xc9: /* fstod */
3568 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3570 case 0xcb: /* fqtod */
3571 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3572 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3574 case 0xcc: /* fitoq */
3575 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3576 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3578 case 0xcd: /* fstoq */
3579 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3580 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3582 case 0xce: /* fdtoq */
3583 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3584 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3586 case 0xd1: /* fstoi */
3587 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3589 case 0xd2: /* fdtoi */
3590 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3592 case 0xd3: /* fqtoi */
3593 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3594 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3596 #ifdef TARGET_SPARC64
3597 case 0x2: /* V9 fmovd */
3598 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3599 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3601 case 0x3: /* V9 fmovq */
3602 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3603 gen_move_Q(dc
, rd
, rs2
);
3605 case 0x6: /* V9 fnegd */
3606 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3608 case 0x7: /* V9 fnegq */
3609 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3610 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3612 case 0xa: /* V9 fabsd */
3613 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3615 case 0xb: /* V9 fabsq */
3616 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3617 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3619 case 0x81: /* V9 fstox */
3620 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3622 case 0x82: /* V9 fdtox */
3623 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3625 case 0x83: /* V9 fqtox */
3626 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3627 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3629 case 0x84: /* V9 fxtos */
3630 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3632 case 0x88: /* V9 fxtod */
3633 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3635 case 0x8c: /* V9 fxtoq */
3636 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3637 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3643 } else if (xop
== 0x35) { /* FPU Operations */
3644 #ifdef TARGET_SPARC64
3647 if (gen_trap_ifnofpu(dc
)) {
3650 gen_op_clear_ieee_excp_and_FTT();
3651 rs1
= GET_FIELD(insn
, 13, 17);
3652 rs2
= GET_FIELD(insn
, 27, 31);
3653 xop
= GET_FIELD(insn
, 18, 26);
3655 #ifdef TARGET_SPARC64
3659 cond = GET_FIELD_SP(insn, 10, 12); \
3660 cpu_src1 = get_src1(dc, insn); \
3661 gen_compare_reg(&cmp, cond, cpu_src1); \
3662 gen_fmov##sz(dc, &cmp, rd, rs2); \
3665 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3668 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3671 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3672 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3679 #ifdef TARGET_SPARC64
3680 #define FMOVCC(fcc, sz) \
3683 cond = GET_FIELD_SP(insn, 14, 17); \
3684 gen_fcompare(&cmp, fcc, cond); \
3685 gen_fmov##sz(dc, &cmp, rd, rs2); \
3688 case 0x001: /* V9 fmovscc %fcc0 */
3691 case 0x002: /* V9 fmovdcc %fcc0 */
3694 case 0x003: /* V9 fmovqcc %fcc0 */
3695 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3698 case 0x041: /* V9 fmovscc %fcc1 */
3701 case 0x042: /* V9 fmovdcc %fcc1 */
3704 case 0x043: /* V9 fmovqcc %fcc1 */
3705 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3708 case 0x081: /* V9 fmovscc %fcc2 */
3711 case 0x082: /* V9 fmovdcc %fcc2 */
3714 case 0x083: /* V9 fmovqcc %fcc2 */
3715 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3718 case 0x0c1: /* V9 fmovscc %fcc3 */
3721 case 0x0c2: /* V9 fmovdcc %fcc3 */
3724 case 0x0c3: /* V9 fmovqcc %fcc3 */
3725 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3729 #define FMOVCC(xcc, sz) \
3732 cond = GET_FIELD_SP(insn, 14, 17); \
3733 gen_compare(&cmp, xcc, cond, dc); \
3734 gen_fmov##sz(dc, &cmp, rd, rs2); \
3737 case 0x101: /* V9 fmovscc %icc */
3740 case 0x102: /* V9 fmovdcc %icc */
3743 case 0x103: /* V9 fmovqcc %icc */
3744 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3747 case 0x181: /* V9 fmovscc %xcc */
3750 case 0x182: /* V9 fmovdcc %xcc */
3753 case 0x183: /* V9 fmovqcc %xcc */
3754 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3759 case 0x51: /* fcmps, V9 %fcc */
3760 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3761 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3762 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3764 case 0x52: /* fcmpd, V9 %fcc */
3765 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3766 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3767 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3769 case 0x53: /* fcmpq, V9 %fcc */
3770 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3771 gen_op_load_fpr_QT0(QFPREG(rs1
));
3772 gen_op_load_fpr_QT1(QFPREG(rs2
));
3773 gen_op_fcmpq(rd
& 3);
3775 case 0x55: /* fcmpes, V9 %fcc */
3776 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3777 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3778 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3780 case 0x56: /* fcmped, V9 %fcc */
3781 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3782 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3783 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3785 case 0x57: /* fcmpeq, V9 %fcc */
3786 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3787 gen_op_load_fpr_QT0(QFPREG(rs1
));
3788 gen_op_load_fpr_QT1(QFPREG(rs2
));
3789 gen_op_fcmpeq(rd
& 3);
3794 } else if (xop
== 0x2) {
3795 TCGv dst
= gen_dest_gpr(dc
, rd
);
3796 rs1
= GET_FIELD(insn
, 13, 17);
3798 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3799 if (IS_IMM
) { /* immediate */
3800 simm
= GET_FIELDs(insn
, 19, 31);
3801 tcg_gen_movi_tl(dst
, simm
);
3802 gen_store_gpr(dc
, rd
, dst
);
3803 } else { /* register */
3804 rs2
= GET_FIELD(insn
, 27, 31);
3806 tcg_gen_movi_tl(dst
, 0);
3807 gen_store_gpr(dc
, rd
, dst
);
3809 cpu_src2
= gen_load_gpr(dc
, rs2
);
3810 gen_store_gpr(dc
, rd
, cpu_src2
);
3814 cpu_src1
= get_src1(dc
, insn
);
3815 if (IS_IMM
) { /* immediate */
3816 simm
= GET_FIELDs(insn
, 19, 31);
3817 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3818 gen_store_gpr(dc
, rd
, dst
);
3819 } else { /* register */
3820 rs2
= GET_FIELD(insn
, 27, 31);
3822 /* mov shortcut: or x, %g0, y -> mov x, y */
3823 gen_store_gpr(dc
, rd
, cpu_src1
);
3825 cpu_src2
= gen_load_gpr(dc
, rs2
);
3826 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3827 gen_store_gpr(dc
, rd
, dst
);
3831 #ifdef TARGET_SPARC64
3832 } else if (xop
== 0x25) { /* sll, V9 sllx */
3833 cpu_src1
= get_src1(dc
, insn
);
3834 if (IS_IMM
) { /* immediate */
3835 simm
= GET_FIELDs(insn
, 20, 31);
3836 if (insn
& (1 << 12)) {
3837 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3839 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3841 } else { /* register */
3842 rs2
= GET_FIELD(insn
, 27, 31);
3843 cpu_src2
= gen_load_gpr(dc
, rs2
);
3844 cpu_tmp0
= tcg_temp_new();
3845 if (insn
& (1 << 12)) {
3846 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3848 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3850 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3852 gen_store_gpr(dc
, rd
, cpu_dst
);
3853 } else if (xop
== 0x26) { /* srl, V9 srlx */
3854 cpu_src1
= get_src1(dc
, insn
);
3855 if (IS_IMM
) { /* immediate */
3856 simm
= GET_FIELDs(insn
, 20, 31);
3857 if (insn
& (1 << 12)) {
3858 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3860 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3861 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3863 } else { /* register */
3864 rs2
= GET_FIELD(insn
, 27, 31);
3865 cpu_src2
= gen_load_gpr(dc
, rs2
);
3866 cpu_tmp0
= tcg_temp_new();
3867 if (insn
& (1 << 12)) {
3868 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3869 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3871 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3872 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3873 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3876 gen_store_gpr(dc
, rd
, cpu_dst
);
3877 } else if (xop
== 0x27) { /* sra, V9 srax */
3878 cpu_src1
= get_src1(dc
, insn
);
3879 if (IS_IMM
) { /* immediate */
3880 simm
= GET_FIELDs(insn
, 20, 31);
3881 if (insn
& (1 << 12)) {
3882 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3884 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3885 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3887 } else { /* register */
3888 rs2
= GET_FIELD(insn
, 27, 31);
3889 cpu_src2
= gen_load_gpr(dc
, rs2
);
3890 cpu_tmp0
= tcg_temp_new();
3891 if (insn
& (1 << 12)) {
3892 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3893 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3895 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3896 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3897 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3900 gen_store_gpr(dc
, rd
, cpu_dst
);
3902 } else if (xop
< 0x36) {
3904 cpu_src1
= get_src1(dc
, insn
);
3905 cpu_src2
= get_src2(dc
, insn
);
3906 switch (xop
& ~0x10) {
3909 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3910 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3911 dc
->cc_op
= CC_OP_ADD
;
3913 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3917 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3919 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3920 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3921 dc
->cc_op
= CC_OP_LOGIC
;
3925 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3927 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3928 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3929 dc
->cc_op
= CC_OP_LOGIC
;
3933 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3935 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3936 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3937 dc
->cc_op
= CC_OP_LOGIC
;
3942 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3943 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3944 dc
->cc_op
= CC_OP_SUB
;
3946 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3949 case 0x5: /* andn */
3950 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3952 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3953 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3954 dc
->cc_op
= CC_OP_LOGIC
;
3958 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3960 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3961 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3962 dc
->cc_op
= CC_OP_LOGIC
;
3965 case 0x7: /* xorn */
3966 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3968 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3969 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3970 dc
->cc_op
= CC_OP_LOGIC
;
3973 case 0x8: /* addx, V9 addc */
3974 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3977 #ifdef TARGET_SPARC64
3978 case 0x9: /* V9 mulx */
3979 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3982 case 0xa: /* umul */
3983 CHECK_IU_FEATURE(dc
, MUL
);
3984 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3986 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3987 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3988 dc
->cc_op
= CC_OP_LOGIC
;
3991 case 0xb: /* smul */
3992 CHECK_IU_FEATURE(dc
, MUL
);
3993 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3995 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3996 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3997 dc
->cc_op
= CC_OP_LOGIC
;
4000 case 0xc: /* subx, V9 subc */
4001 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
4004 #ifdef TARGET_SPARC64
4005 case 0xd: /* V9 udivx */
4006 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4009 case 0xe: /* udiv */
4010 CHECK_IU_FEATURE(dc
, DIV
);
4012 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4014 dc
->cc_op
= CC_OP_DIV
;
4016 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
4020 case 0xf: /* sdiv */
4021 CHECK_IU_FEATURE(dc
, DIV
);
4023 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
4025 dc
->cc_op
= CC_OP_DIV
;
4027 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
4034 gen_store_gpr(dc
, rd
, cpu_dst
);
4036 cpu_src1
= get_src1(dc
, insn
);
4037 cpu_src2
= get_src2(dc
, insn
);
4039 case 0x20: /* taddcc */
4040 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4041 gen_store_gpr(dc
, rd
, cpu_dst
);
4042 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
4043 dc
->cc_op
= CC_OP_TADD
;
4045 case 0x21: /* tsubcc */
4046 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
4047 gen_store_gpr(dc
, rd
, cpu_dst
);
4048 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
4049 dc
->cc_op
= CC_OP_TSUB
;
4051 case 0x22: /* taddcctv */
4052 gen_helper_taddcctv(cpu_dst
, cpu_env
,
4053 cpu_src1
, cpu_src2
);
4054 gen_store_gpr(dc
, rd
, cpu_dst
);
4055 dc
->cc_op
= CC_OP_TADDTV
;
4057 case 0x23: /* tsubcctv */
4058 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
4059 cpu_src1
, cpu_src2
);
4060 gen_store_gpr(dc
, rd
, cpu_dst
);
4061 dc
->cc_op
= CC_OP_TSUBTV
;
4063 case 0x24: /* mulscc */
4065 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
4066 gen_store_gpr(dc
, rd
, cpu_dst
);
4067 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
4068 dc
->cc_op
= CC_OP_ADD
;
4070 #ifndef TARGET_SPARC64
4071 case 0x25: /* sll */
4072 if (IS_IMM
) { /* immediate */
4073 simm
= GET_FIELDs(insn
, 20, 31);
4074 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4075 } else { /* register */
4076 cpu_tmp0
= tcg_temp_new();
4077 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4078 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4080 gen_store_gpr(dc
, rd
, cpu_dst
);
4082 case 0x26: /* srl */
4083 if (IS_IMM
) { /* immediate */
4084 simm
= GET_FIELDs(insn
, 20, 31);
4085 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4086 } else { /* register */
4087 cpu_tmp0
= tcg_temp_new();
4088 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4089 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4091 gen_store_gpr(dc
, rd
, cpu_dst
);
4093 case 0x27: /* sra */
4094 if (IS_IMM
) { /* immediate */
4095 simm
= GET_FIELDs(insn
, 20, 31);
4096 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
4097 } else { /* register */
4098 cpu_tmp0
= tcg_temp_new();
4099 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
4100 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
4102 gen_store_gpr(dc
, rd
, cpu_dst
);
4107 cpu_tmp0
= tcg_temp_new();
4110 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4111 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
4113 #ifndef TARGET_SPARC64
4114 case 0x01 ... 0x0f: /* undefined in the
4118 case 0x10 ... 0x1f: /* implementation-dependent
4122 if ((rd
== 0x13) && (dc
->def
->features
&
4123 CPU_FEATURE_POWERDOWN
)) {
4124 /* LEON3 power-down */
4126 gen_helper_power_down(cpu_env
);
4130 case 0x2: /* V9 wrccr */
4131 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4132 gen_helper_wrccr(cpu_env
, cpu_tmp0
);
4133 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4134 dc
->cc_op
= CC_OP_FLAGS
;
4136 case 0x3: /* V9 wrasi */
4137 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4138 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xff);
4139 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4140 offsetof(CPUSPARCState
, asi
));
4142 * End TB to notice changed ASI.
4143 * TODO: Could notice src1 = %g0 and IS_IMM,
4144 * update DisasContext and not exit the TB.
4148 tcg_gen_lookup_and_goto_ptr();
4149 dc
->base
.is_jmp
= DISAS_NORETURN
;
4151 case 0x6: /* V9 wrfprs */
4152 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4153 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_tmp0
);
4157 tcg_gen_exit_tb(NULL
, 0);
4158 dc
->base
.is_jmp
= DISAS_NORETURN
;
4160 case 0xf: /* V9 sir, nop if user */
4161 #if !defined(CONFIG_USER_ONLY)
4162 if (supervisor(dc
)) {
4167 case 0x13: /* Graphics Status */
4168 if (gen_trap_ifnofpu(dc
)) {
4171 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
4173 case 0x14: /* Softint set */
4174 if (!supervisor(dc
))
4176 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4177 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
4179 case 0x15: /* Softint clear */
4180 if (!supervisor(dc
))
4182 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4183 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
4185 case 0x16: /* Softint write */
4186 if (!supervisor(dc
))
4188 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4189 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
4191 case 0x17: /* Tick compare */
4192 #if !defined(CONFIG_USER_ONLY)
4193 if (!supervisor(dc
))
4199 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
4201 r_tickptr
= tcg_temp_new_ptr();
4202 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4203 offsetof(CPUSPARCState
, tick
));
4204 translator_io_start(&dc
->base
);
4205 gen_helper_tick_set_limit(r_tickptr
,
4207 /* End TB to handle timer interrupt */
4208 dc
->base
.is_jmp
= DISAS_EXIT
;
4211 case 0x18: /* System tick */
4212 #if !defined(CONFIG_USER_ONLY)
4213 if (!supervisor(dc
))
4219 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
,
4221 r_tickptr
= tcg_temp_new_ptr();
4222 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4223 offsetof(CPUSPARCState
, stick
));
4224 translator_io_start(&dc
->base
);
4225 gen_helper_tick_set_count(r_tickptr
,
4227 /* End TB to handle timer interrupt */
4228 dc
->base
.is_jmp
= DISAS_EXIT
;
4231 case 0x19: /* System tick compare */
4232 #if !defined(CONFIG_USER_ONLY)
4233 if (!supervisor(dc
))
4239 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
4241 r_tickptr
= tcg_temp_new_ptr();
4242 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4243 offsetof(CPUSPARCState
, stick
));
4244 translator_io_start(&dc
->base
);
4245 gen_helper_tick_set_limit(r_tickptr
,
4247 /* End TB to handle timer interrupt */
4248 dc
->base
.is_jmp
= DISAS_EXIT
;
4252 case 0x10: /* Performance Control */
4253 case 0x11: /* Performance Instrumentation
4255 case 0x12: /* Dispatch Control */
4262 #if !defined(CONFIG_USER_ONLY)
4263 case 0x31: /* wrpsr, V9 saved, restored */
4265 if (!supervisor(dc
))
4267 #ifdef TARGET_SPARC64
4270 gen_helper_saved(cpu_env
);
4273 gen_helper_restored(cpu_env
);
4275 case 2: /* UA2005 allclean */
4276 case 3: /* UA2005 otherw */
4277 case 4: /* UA2005 normalw */
4278 case 5: /* UA2005 invalw */
4284 cpu_tmp0
= tcg_temp_new();
4285 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4286 gen_helper_wrpsr(cpu_env
, cpu_tmp0
);
4287 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
4288 dc
->cc_op
= CC_OP_FLAGS
;
4291 tcg_gen_exit_tb(NULL
, 0);
4292 dc
->base
.is_jmp
= DISAS_NORETURN
;
4296 case 0x32: /* wrwim, V9 wrpr */
4298 if (!supervisor(dc
))
4300 cpu_tmp0
= tcg_temp_new();
4301 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4302 #ifdef TARGET_SPARC64
4308 r_tsptr
= tcg_temp_new_ptr();
4309 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4310 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4311 offsetof(trap_state
, tpc
));
4318 r_tsptr
= tcg_temp_new_ptr();
4319 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4320 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4321 offsetof(trap_state
, tnpc
));
4328 r_tsptr
= tcg_temp_new_ptr();
4329 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4330 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
4331 offsetof(trap_state
,
4339 r_tsptr
= tcg_temp_new_ptr();
4340 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
4341 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
4342 offsetof(trap_state
, tt
));
4349 r_tickptr
= tcg_temp_new_ptr();
4350 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4351 offsetof(CPUSPARCState
, tick
));
4352 translator_io_start(&dc
->base
);
4353 gen_helper_tick_set_count(r_tickptr
,
4355 /* End TB to handle timer interrupt */
4356 dc
->base
.is_jmp
= DISAS_EXIT
;
4360 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
4364 if (translator_io_start(&dc
->base
)) {
4365 dc
->base
.is_jmp
= DISAS_EXIT
;
4367 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
4368 dc
->npc
= DYNAMIC_PC
;
4372 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4373 offsetof(CPUSPARCState
, tl
));
4374 dc
->npc
= DYNAMIC_PC
;
4377 if (translator_io_start(&dc
->base
)) {
4378 dc
->base
.is_jmp
= DISAS_EXIT
;
4380 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
4383 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
4386 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4387 offsetof(CPUSPARCState
,
4390 case 11: // canrestore
4391 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4392 offsetof(CPUSPARCState
,
4395 case 12: // cleanwin
4396 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4397 offsetof(CPUSPARCState
,
4400 case 13: // otherwin
4401 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4402 offsetof(CPUSPARCState
,
4406 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
4407 offsetof(CPUSPARCState
,
4410 case 16: // UA2005 gl
4411 CHECK_IU_FEATURE(dc
, GL
);
4412 gen_helper_wrgl(cpu_env
, cpu_tmp0
);
4414 case 26: // UA2005 strand status
4415 CHECK_IU_FEATURE(dc
, HYPV
);
4416 if (!hypervisor(dc
))
4418 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
4424 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
4425 if (dc
->def
->nwindows
!= 32) {
4426 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
4427 (1 << dc
->def
->nwindows
) - 1);
4432 case 0x33: /* wrtbr, UA2005 wrhpr */
4434 #ifndef TARGET_SPARC64
4435 if (!supervisor(dc
))
4437 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
4439 CHECK_IU_FEATURE(dc
, HYPV
);
4440 if (!hypervisor(dc
))
4442 cpu_tmp0
= tcg_temp_new();
4443 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
4446 tcg_gen_st_i64(cpu_tmp0
, cpu_env
,
4447 offsetof(CPUSPARCState
,
4451 tcg_gen_exit_tb(NULL
, 0);
4452 dc
->base
.is_jmp
= DISAS_NORETURN
;
4455 // XXX gen_op_wrhtstate();
4458 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
4461 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
4463 case 31: // hstick_cmpr
4467 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
4468 r_tickptr
= tcg_temp_new_ptr();
4469 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
4470 offsetof(CPUSPARCState
, hstick
));
4471 translator_io_start(&dc
->base
);
4472 gen_helper_tick_set_limit(r_tickptr
,
4474 /* End TB to handle timer interrupt */
4475 dc
->base
.is_jmp
= DISAS_EXIT
;
4478 case 6: // hver readonly
4486 #ifdef TARGET_SPARC64
4487 case 0x2c: /* V9 movcc */
4489 int cc
= GET_FIELD_SP(insn
, 11, 12);
4490 int cond
= GET_FIELD_SP(insn
, 14, 17);
4494 if (insn
& (1 << 18)) {
4496 gen_compare(&cmp
, 0, cond
, dc
);
4497 } else if (cc
== 2) {
4498 gen_compare(&cmp
, 1, cond
, dc
);
4503 gen_fcompare(&cmp
, cc
, cond
);
4506 /* The get_src2 above loaded the normal 13-bit
4507 immediate field, not the 11-bit field we have
4508 in movcc. But it did handle the reg case. */
4510 simm
= GET_FIELD_SPs(insn
, 0, 10);
4511 tcg_gen_movi_tl(cpu_src2
, simm
);
4514 dst
= gen_load_gpr(dc
, rd
);
4515 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4518 gen_store_gpr(dc
, rd
, dst
);
4521 case 0x2d: /* V9 sdivx */
4522 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
4523 gen_store_gpr(dc
, rd
, cpu_dst
);
4525 case 0x2e: /* V9 popc */
4526 tcg_gen_ctpop_tl(cpu_dst
, cpu_src2
);
4527 gen_store_gpr(dc
, rd
, cpu_dst
);
4529 case 0x2f: /* V9 movr */
4531 int cond
= GET_FIELD_SP(insn
, 10, 12);
4535 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4537 /* The get_src2 above loaded the normal 13-bit
4538 immediate field, not the 10-bit field we have
4539 in movr. But it did handle the reg case. */
4541 simm
= GET_FIELD_SPs(insn
, 0, 9);
4542 tcg_gen_movi_tl(cpu_src2
, simm
);
4545 dst
= gen_load_gpr(dc
, rd
);
4546 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4549 gen_store_gpr(dc
, rd
, dst
);
4557 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4558 #ifdef TARGET_SPARC64
4559 int opf
= GET_FIELD_SP(insn
, 5, 13);
4560 rs1
= GET_FIELD(insn
, 13, 17);
4561 rs2
= GET_FIELD(insn
, 27, 31);
4562 if (gen_trap_ifnofpu(dc
)) {
4567 case 0x000: /* VIS I edge8cc */
4568 CHECK_FPU_FEATURE(dc
, VIS1
);
4569 cpu_src1
= gen_load_gpr(dc
, rs1
);
4570 cpu_src2
= gen_load_gpr(dc
, rs2
);
4571 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4572 gen_store_gpr(dc
, rd
, cpu_dst
);
4574 case 0x001: /* VIS II edge8n */
4575 CHECK_FPU_FEATURE(dc
, VIS2
);
4576 cpu_src1
= gen_load_gpr(dc
, rs1
);
4577 cpu_src2
= gen_load_gpr(dc
, rs2
);
4578 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4579 gen_store_gpr(dc
, rd
, cpu_dst
);
4581 case 0x002: /* VIS I edge8lcc */
4582 CHECK_FPU_FEATURE(dc
, VIS1
);
4583 cpu_src1
= gen_load_gpr(dc
, rs1
);
4584 cpu_src2
= gen_load_gpr(dc
, rs2
);
4585 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4586 gen_store_gpr(dc
, rd
, cpu_dst
);
4588 case 0x003: /* VIS II edge8ln */
4589 CHECK_FPU_FEATURE(dc
, VIS2
);
4590 cpu_src1
= gen_load_gpr(dc
, rs1
);
4591 cpu_src2
= gen_load_gpr(dc
, rs2
);
4592 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4593 gen_store_gpr(dc
, rd
, cpu_dst
);
4595 case 0x004: /* VIS I edge16cc */
4596 CHECK_FPU_FEATURE(dc
, VIS1
);
4597 cpu_src1
= gen_load_gpr(dc
, rs1
);
4598 cpu_src2
= gen_load_gpr(dc
, rs2
);
4599 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4600 gen_store_gpr(dc
, rd
, cpu_dst
);
4602 case 0x005: /* VIS II edge16n */
4603 CHECK_FPU_FEATURE(dc
, VIS2
);
4604 cpu_src1
= gen_load_gpr(dc
, rs1
);
4605 cpu_src2
= gen_load_gpr(dc
, rs2
);
4606 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4607 gen_store_gpr(dc
, rd
, cpu_dst
);
4609 case 0x006: /* VIS I edge16lcc */
4610 CHECK_FPU_FEATURE(dc
, VIS1
);
4611 cpu_src1
= gen_load_gpr(dc
, rs1
);
4612 cpu_src2
= gen_load_gpr(dc
, rs2
);
4613 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4614 gen_store_gpr(dc
, rd
, cpu_dst
);
4616 case 0x007: /* VIS II edge16ln */
4617 CHECK_FPU_FEATURE(dc
, VIS2
);
4618 cpu_src1
= gen_load_gpr(dc
, rs1
);
4619 cpu_src2
= gen_load_gpr(dc
, rs2
);
4620 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4621 gen_store_gpr(dc
, rd
, cpu_dst
);
4623 case 0x008: /* VIS I edge32cc */
4624 CHECK_FPU_FEATURE(dc
, VIS1
);
4625 cpu_src1
= gen_load_gpr(dc
, rs1
);
4626 cpu_src2
= gen_load_gpr(dc
, rs2
);
4627 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4628 gen_store_gpr(dc
, rd
, cpu_dst
);
4630 case 0x009: /* VIS II edge32n */
4631 CHECK_FPU_FEATURE(dc
, VIS2
);
4632 cpu_src1
= gen_load_gpr(dc
, rs1
);
4633 cpu_src2
= gen_load_gpr(dc
, rs2
);
4634 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4635 gen_store_gpr(dc
, rd
, cpu_dst
);
4637 case 0x00a: /* VIS I edge32lcc */
4638 CHECK_FPU_FEATURE(dc
, VIS1
);
4639 cpu_src1
= gen_load_gpr(dc
, rs1
);
4640 cpu_src2
= gen_load_gpr(dc
, rs2
);
4641 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4642 gen_store_gpr(dc
, rd
, cpu_dst
);
4644 case 0x00b: /* VIS II edge32ln */
4645 CHECK_FPU_FEATURE(dc
, VIS2
);
4646 cpu_src1
= gen_load_gpr(dc
, rs1
);
4647 cpu_src2
= gen_load_gpr(dc
, rs2
);
4648 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4649 gen_store_gpr(dc
, rd
, cpu_dst
);
4651 case 0x010: /* VIS I array8 */
4652 CHECK_FPU_FEATURE(dc
, VIS1
);
4653 cpu_src1
= gen_load_gpr(dc
, rs1
);
4654 cpu_src2
= gen_load_gpr(dc
, rs2
);
4655 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4656 gen_store_gpr(dc
, rd
, cpu_dst
);
4658 case 0x012: /* VIS I array16 */
4659 CHECK_FPU_FEATURE(dc
, VIS1
);
4660 cpu_src1
= gen_load_gpr(dc
, rs1
);
4661 cpu_src2
= gen_load_gpr(dc
, rs2
);
4662 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4663 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4664 gen_store_gpr(dc
, rd
, cpu_dst
);
4666 case 0x014: /* VIS I array32 */
4667 CHECK_FPU_FEATURE(dc
, VIS1
);
4668 cpu_src1
= gen_load_gpr(dc
, rs1
);
4669 cpu_src2
= gen_load_gpr(dc
, rs2
);
4670 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4671 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4672 gen_store_gpr(dc
, rd
, cpu_dst
);
4674 case 0x018: /* VIS I alignaddr */
4675 CHECK_FPU_FEATURE(dc
, VIS1
);
4676 cpu_src1
= gen_load_gpr(dc
, rs1
);
4677 cpu_src2
= gen_load_gpr(dc
, rs2
);
4678 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4679 gen_store_gpr(dc
, rd
, cpu_dst
);
4681 case 0x01a: /* VIS I alignaddrl */
4682 CHECK_FPU_FEATURE(dc
, VIS1
);
4683 cpu_src1
= gen_load_gpr(dc
, rs1
);
4684 cpu_src2
= gen_load_gpr(dc
, rs2
);
4685 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4686 gen_store_gpr(dc
, rd
, cpu_dst
);
4688 case 0x019: /* VIS II bmask */
4689 CHECK_FPU_FEATURE(dc
, VIS2
);
4690 cpu_src1
= gen_load_gpr(dc
, rs1
);
4691 cpu_src2
= gen_load_gpr(dc
, rs2
);
4692 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4693 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4694 gen_store_gpr(dc
, rd
, cpu_dst
);
4696 case 0x020: /* VIS I fcmple16 */
4697 CHECK_FPU_FEATURE(dc
, VIS1
);
4698 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4699 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4700 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4701 gen_store_gpr(dc
, rd
, cpu_dst
);
4703 case 0x022: /* VIS I fcmpne16 */
4704 CHECK_FPU_FEATURE(dc
, VIS1
);
4705 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4706 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4707 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4708 gen_store_gpr(dc
, rd
, cpu_dst
);
4710 case 0x024: /* VIS I fcmple32 */
4711 CHECK_FPU_FEATURE(dc
, VIS1
);
4712 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4713 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4714 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4715 gen_store_gpr(dc
, rd
, cpu_dst
);
4717 case 0x026: /* VIS I fcmpne32 */
4718 CHECK_FPU_FEATURE(dc
, VIS1
);
4719 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4720 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4721 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4722 gen_store_gpr(dc
, rd
, cpu_dst
);
4724 case 0x028: /* VIS I fcmpgt16 */
4725 CHECK_FPU_FEATURE(dc
, VIS1
);
4726 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4727 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4728 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4729 gen_store_gpr(dc
, rd
, cpu_dst
);
4731 case 0x02a: /* VIS I fcmpeq16 */
4732 CHECK_FPU_FEATURE(dc
, VIS1
);
4733 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4734 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4735 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4736 gen_store_gpr(dc
, rd
, cpu_dst
);
4738 case 0x02c: /* VIS I fcmpgt32 */
4739 CHECK_FPU_FEATURE(dc
, VIS1
);
4740 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4741 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4742 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4743 gen_store_gpr(dc
, rd
, cpu_dst
);
4745 case 0x02e: /* VIS I fcmpeq32 */
4746 CHECK_FPU_FEATURE(dc
, VIS1
);
4747 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4748 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4749 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4750 gen_store_gpr(dc
, rd
, cpu_dst
);
4752 case 0x031: /* VIS I fmul8x16 */
4753 CHECK_FPU_FEATURE(dc
, VIS1
);
4754 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4756 case 0x033: /* VIS I fmul8x16au */
4757 CHECK_FPU_FEATURE(dc
, VIS1
);
4758 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4760 case 0x035: /* VIS I fmul8x16al */
4761 CHECK_FPU_FEATURE(dc
, VIS1
);
4762 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4764 case 0x036: /* VIS I fmul8sux16 */
4765 CHECK_FPU_FEATURE(dc
, VIS1
);
4766 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4768 case 0x037: /* VIS I fmul8ulx16 */
4769 CHECK_FPU_FEATURE(dc
, VIS1
);
4770 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4772 case 0x038: /* VIS I fmuld8sux16 */
4773 CHECK_FPU_FEATURE(dc
, VIS1
);
4774 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4776 case 0x039: /* VIS I fmuld8ulx16 */
4777 CHECK_FPU_FEATURE(dc
, VIS1
);
4778 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4780 case 0x03a: /* VIS I fpack32 */
4781 CHECK_FPU_FEATURE(dc
, VIS1
);
4782 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4784 case 0x03b: /* VIS I fpack16 */
4785 CHECK_FPU_FEATURE(dc
, VIS1
);
4786 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4787 cpu_dst_32
= gen_dest_fpr_F(dc
);
4788 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4789 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4791 case 0x03d: /* VIS I fpackfix */
4792 CHECK_FPU_FEATURE(dc
, VIS1
);
4793 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4794 cpu_dst_32
= gen_dest_fpr_F(dc
);
4795 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4796 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4798 case 0x03e: /* VIS I pdist */
4799 CHECK_FPU_FEATURE(dc
, VIS1
);
4800 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4802 case 0x048: /* VIS I faligndata */
4803 CHECK_FPU_FEATURE(dc
, VIS1
);
4804 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4806 case 0x04b: /* VIS I fpmerge */
4807 CHECK_FPU_FEATURE(dc
, VIS1
);
4808 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4810 case 0x04c: /* VIS II bshuffle */
4811 CHECK_FPU_FEATURE(dc
, VIS2
);
4812 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4814 case 0x04d: /* VIS I fexpand */
4815 CHECK_FPU_FEATURE(dc
, VIS1
);
4816 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4818 case 0x050: /* VIS I fpadd16 */
4819 CHECK_FPU_FEATURE(dc
, VIS1
);
4820 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4822 case 0x051: /* VIS I fpadd16s */
4823 CHECK_FPU_FEATURE(dc
, VIS1
);
4824 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4826 case 0x052: /* VIS I fpadd32 */
4827 CHECK_FPU_FEATURE(dc
, VIS1
);
4828 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4830 case 0x053: /* VIS I fpadd32s */
4831 CHECK_FPU_FEATURE(dc
, VIS1
);
4832 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4834 case 0x054: /* VIS I fpsub16 */
4835 CHECK_FPU_FEATURE(dc
, VIS1
);
4836 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4838 case 0x055: /* VIS I fpsub16s */
4839 CHECK_FPU_FEATURE(dc
, VIS1
);
4840 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4842 case 0x056: /* VIS I fpsub32 */
4843 CHECK_FPU_FEATURE(dc
, VIS1
);
4844 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4846 case 0x057: /* VIS I fpsub32s */
4847 CHECK_FPU_FEATURE(dc
, VIS1
);
4848 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4850 case 0x060: /* VIS I fzero */
4851 CHECK_FPU_FEATURE(dc
, VIS1
);
4852 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4853 tcg_gen_movi_i64(cpu_dst_64
, 0);
4854 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4856 case 0x061: /* VIS I fzeros */
4857 CHECK_FPU_FEATURE(dc
, VIS1
);
4858 cpu_dst_32
= gen_dest_fpr_F(dc
);
4859 tcg_gen_movi_i32(cpu_dst_32
, 0);
4860 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4862 case 0x062: /* VIS I fnor */
4863 CHECK_FPU_FEATURE(dc
, VIS1
);
4864 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4866 case 0x063: /* VIS I fnors */
4867 CHECK_FPU_FEATURE(dc
, VIS1
);
4868 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4870 case 0x064: /* VIS I fandnot2 */
4871 CHECK_FPU_FEATURE(dc
, VIS1
);
4872 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4874 case 0x065: /* VIS I fandnot2s */
4875 CHECK_FPU_FEATURE(dc
, VIS1
);
4876 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4878 case 0x066: /* VIS I fnot2 */
4879 CHECK_FPU_FEATURE(dc
, VIS1
);
4880 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4882 case 0x067: /* VIS I fnot2s */
4883 CHECK_FPU_FEATURE(dc
, VIS1
);
4884 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4886 case 0x068: /* VIS I fandnot1 */
4887 CHECK_FPU_FEATURE(dc
, VIS1
);
4888 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4890 case 0x069: /* VIS I fandnot1s */
4891 CHECK_FPU_FEATURE(dc
, VIS1
);
4892 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4894 case 0x06a: /* VIS I fnot1 */
4895 CHECK_FPU_FEATURE(dc
, VIS1
);
4896 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4898 case 0x06b: /* VIS I fnot1s */
4899 CHECK_FPU_FEATURE(dc
, VIS1
);
4900 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4902 case 0x06c: /* VIS I fxor */
4903 CHECK_FPU_FEATURE(dc
, VIS1
);
4904 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4906 case 0x06d: /* VIS I fxors */
4907 CHECK_FPU_FEATURE(dc
, VIS1
);
4908 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4910 case 0x06e: /* VIS I fnand */
4911 CHECK_FPU_FEATURE(dc
, VIS1
);
4912 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4914 case 0x06f: /* VIS I fnands */
4915 CHECK_FPU_FEATURE(dc
, VIS1
);
4916 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4918 case 0x070: /* VIS I fand */
4919 CHECK_FPU_FEATURE(dc
, VIS1
);
4920 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4922 case 0x071: /* VIS I fands */
4923 CHECK_FPU_FEATURE(dc
, VIS1
);
4924 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4926 case 0x072: /* VIS I fxnor */
4927 CHECK_FPU_FEATURE(dc
, VIS1
);
4928 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4930 case 0x073: /* VIS I fxnors */
4931 CHECK_FPU_FEATURE(dc
, VIS1
);
4932 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4934 case 0x074: /* VIS I fsrc1 */
4935 CHECK_FPU_FEATURE(dc
, VIS1
);
4936 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4937 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4939 case 0x075: /* VIS I fsrc1s */
4940 CHECK_FPU_FEATURE(dc
, VIS1
);
4941 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4942 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4944 case 0x076: /* VIS I fornot2 */
4945 CHECK_FPU_FEATURE(dc
, VIS1
);
4946 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4948 case 0x077: /* VIS I fornot2s */
4949 CHECK_FPU_FEATURE(dc
, VIS1
);
4950 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4952 case 0x078: /* VIS I fsrc2 */
4953 CHECK_FPU_FEATURE(dc
, VIS1
);
4954 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4955 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4957 case 0x079: /* VIS I fsrc2s */
4958 CHECK_FPU_FEATURE(dc
, VIS1
);
4959 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4960 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4962 case 0x07a: /* VIS I fornot1 */
4963 CHECK_FPU_FEATURE(dc
, VIS1
);
4964 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4966 case 0x07b: /* VIS I fornot1s */
4967 CHECK_FPU_FEATURE(dc
, VIS1
);
4968 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4970 case 0x07c: /* VIS I for */
4971 CHECK_FPU_FEATURE(dc
, VIS1
);
4972 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4974 case 0x07d: /* VIS I fors */
4975 CHECK_FPU_FEATURE(dc
, VIS1
);
4976 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4978 case 0x07e: /* VIS I fone */
4979 CHECK_FPU_FEATURE(dc
, VIS1
);
4980 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4981 tcg_gen_movi_i64(cpu_dst_64
, -1);
4982 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4984 case 0x07f: /* VIS I fones */
4985 CHECK_FPU_FEATURE(dc
, VIS1
);
4986 cpu_dst_32
= gen_dest_fpr_F(dc
);
4987 tcg_gen_movi_i32(cpu_dst_32
, -1);
4988 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4990 case 0x080: /* VIS I shutdown */
4991 case 0x081: /* VIS II siam */
5000 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
5001 #ifdef TARGET_SPARC64
5006 #ifdef TARGET_SPARC64
5007 } else if (xop
== 0x39) { /* V9 return */
5009 cpu_src1
= get_src1(dc
, insn
);
5010 cpu_tmp0
= tcg_temp_new();
5011 if (IS_IMM
) { /* immediate */
5012 simm
= GET_FIELDs(insn
, 19, 31);
5013 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5014 } else { /* register */
5015 rs2
= GET_FIELD(insn
, 27, 31);
5017 cpu_src2
= gen_load_gpr(dc
, rs2
);
5018 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5020 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5023 gen_helper_restore(cpu_env
);
5025 gen_check_align(cpu_tmp0
, 3);
5026 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5027 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5031 cpu_src1
= get_src1(dc
, insn
);
5032 cpu_tmp0
= tcg_temp_new();
5033 if (IS_IMM
) { /* immediate */
5034 simm
= GET_FIELDs(insn
, 19, 31);
5035 tcg_gen_addi_tl(cpu_tmp0
, cpu_src1
, simm
);
5036 } else { /* register */
5037 rs2
= GET_FIELD(insn
, 27, 31);
5039 cpu_src2
= gen_load_gpr(dc
, rs2
);
5040 tcg_gen_add_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
5042 tcg_gen_mov_tl(cpu_tmp0
, cpu_src1
);
5046 case 0x38: /* jmpl */
5048 TCGv t
= gen_dest_gpr(dc
, rd
);
5049 tcg_gen_movi_tl(t
, dc
->pc
);
5050 gen_store_gpr(dc
, rd
, t
);
5053 gen_check_align(cpu_tmp0
, 3);
5054 gen_address_mask(dc
, cpu_tmp0
);
5055 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5056 dc
->npc
= DYNAMIC_PC_LOOKUP
;
5059 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5060 case 0x39: /* rett, V9 return */
5062 if (!supervisor(dc
))
5065 gen_check_align(cpu_tmp0
, 3);
5066 tcg_gen_mov_tl(cpu_npc
, cpu_tmp0
);
5067 dc
->npc
= DYNAMIC_PC
;
5068 gen_helper_rett(cpu_env
);
5072 case 0x3b: /* flush */
5073 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
5077 case 0x3c: /* save */
5078 gen_helper_save(cpu_env
);
5079 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5081 case 0x3d: /* restore */
5082 gen_helper_restore(cpu_env
);
5083 gen_store_gpr(dc
, rd
, cpu_tmp0
);
5085 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
5086 case 0x3e: /* V9 done/retry */
5090 if (!supervisor(dc
))
5092 dc
->npc
= DYNAMIC_PC
;
5093 dc
->pc
= DYNAMIC_PC
;
5094 translator_io_start(&dc
->base
);
5095 gen_helper_done(cpu_env
);
5098 if (!supervisor(dc
))
5100 dc
->npc
= DYNAMIC_PC
;
5101 dc
->pc
= DYNAMIC_PC
;
5102 translator_io_start(&dc
->base
);
5103 gen_helper_retry(cpu_env
);
5118 case 3: /* load/store instructions */
5120 unsigned int xop
= GET_FIELD(insn
, 7, 12);
5121 /* ??? gen_address_mask prevents us from using a source
5122 register directly. Always generate a temporary. */
5123 TCGv cpu_addr
= tcg_temp_new();
5125 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
5126 if (xop
== 0x3c || xop
== 0x3e) {
5127 /* V9 casa/casxa : no offset */
5128 } else if (IS_IMM
) { /* immediate */
5129 simm
= GET_FIELDs(insn
, 19, 31);
5131 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
5133 } else { /* register */
5134 rs2
= GET_FIELD(insn
, 27, 31);
5136 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
5139 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
5140 (xop
> 0x17 && xop
<= 0x1d ) ||
5141 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
5142 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
5145 case 0x0: /* ld, V9 lduw, load unsigned word */
5146 gen_address_mask(dc
, cpu_addr
);
5147 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5148 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5150 case 0x1: /* ldub, load unsigned byte */
5151 gen_address_mask(dc
, cpu_addr
);
5152 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5153 dc
->mem_idx
, MO_UB
);
5155 case 0x2: /* lduh, load unsigned halfword */
5156 gen_address_mask(dc
, cpu_addr
);
5157 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5158 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5160 case 0x3: /* ldd, load double word */
5166 gen_address_mask(dc
, cpu_addr
);
5167 t64
= tcg_temp_new_i64();
5168 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5169 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5170 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5171 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5172 gen_store_gpr(dc
, rd
+ 1, cpu_val
);
5173 tcg_gen_shri_i64(t64
, t64
, 32);
5174 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
5175 tcg_gen_ext32u_tl(cpu_val
, cpu_val
);
5178 case 0x9: /* ldsb, load signed byte */
5179 gen_address_mask(dc
, cpu_addr
);
5180 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_SB
);
5182 case 0xa: /* ldsh, load signed halfword */
5183 gen_address_mask(dc
, cpu_addr
);
5184 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5185 dc
->mem_idx
, MO_TESW
| MO_ALIGN
);
5187 case 0xd: /* ldstub */
5188 gen_ldstub(dc
, cpu_val
, cpu_addr
, dc
->mem_idx
);
5191 /* swap, swap register with memory. Also atomically */
5192 CHECK_IU_FEATURE(dc
, SWAP
);
5193 cpu_src1
= gen_load_gpr(dc
, rd
);
5194 gen_swap(dc
, cpu_val
, cpu_src1
, cpu_addr
,
5195 dc
->mem_idx
, MO_TEUL
);
5197 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5198 case 0x10: /* lda, V9 lduwa, load word alternate */
5199 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5201 case 0x11: /* lduba, load unsigned byte alternate */
5202 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5204 case 0x12: /* lduha, load unsigned halfword alternate */
5205 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5207 case 0x13: /* ldda, load double word alternate */
5211 gen_ldda_asi(dc
, cpu_addr
, insn
, rd
);
5213 case 0x19: /* ldsba, load signed byte alternate */
5214 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_SB
);
5216 case 0x1a: /* ldsha, load signed halfword alternate */
5217 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESW
);
5219 case 0x1d: /* ldstuba -- XXX: should be atomically */
5220 gen_ldstub_asi(dc
, cpu_val
, cpu_addr
, insn
);
5222 case 0x1f: /* swapa, swap reg with alt. memory. Also
5224 CHECK_IU_FEATURE(dc
, SWAP
);
5225 cpu_src1
= gen_load_gpr(dc
, rd
);
5226 gen_swap_asi(dc
, cpu_val
, cpu_src1
, cpu_addr
, insn
);
5229 #ifndef TARGET_SPARC64
5230 case 0x30: /* ldc */
5231 case 0x31: /* ldcsr */
5232 case 0x33: /* lddc */
5236 #ifdef TARGET_SPARC64
5237 case 0x08: /* V9 ldsw */
5238 gen_address_mask(dc
, cpu_addr
);
5239 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5240 dc
->mem_idx
, MO_TESL
| MO_ALIGN
);
5242 case 0x0b: /* V9 ldx */
5243 gen_address_mask(dc
, cpu_addr
);
5244 tcg_gen_qemu_ld_tl(cpu_val
, cpu_addr
,
5245 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5247 case 0x18: /* V9 ldswa */
5248 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TESL
);
5250 case 0x1b: /* V9 ldxa */
5251 gen_ld_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5253 case 0x2d: /* V9 prefetch, no effect */
5255 case 0x30: /* V9 ldfa */
5256 if (gen_trap_ifnofpu(dc
)) {
5259 gen_ldf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5260 gen_update_fprs_dirty(dc
, rd
);
5262 case 0x33: /* V9 lddfa */
5263 if (gen_trap_ifnofpu(dc
)) {
5266 gen_ldf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5267 gen_update_fprs_dirty(dc
, DFPREG(rd
));
5269 case 0x3d: /* V9 prefetcha, no effect */
5271 case 0x32: /* V9 ldqfa */
5272 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5273 if (gen_trap_ifnofpu(dc
)) {
5276 gen_ldf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5277 gen_update_fprs_dirty(dc
, QFPREG(rd
));
5283 gen_store_gpr(dc
, rd
, cpu_val
);
5284 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5287 } else if (xop
>= 0x20 && xop
< 0x24) {
5288 if (gen_trap_ifnofpu(dc
)) {
5292 case 0x20: /* ldf, load fpreg */
5293 gen_address_mask(dc
, cpu_addr
);
5294 cpu_dst_32
= gen_dest_fpr_F(dc
);
5295 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5296 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5297 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
5299 case 0x21: /* ldfsr, V9 ldxfsr */
5300 #ifdef TARGET_SPARC64
5301 gen_address_mask(dc
, cpu_addr
);
5303 TCGv_i64 t64
= tcg_temp_new_i64();
5304 tcg_gen_qemu_ld_i64(t64
, cpu_addr
,
5305 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5306 gen_helper_ldxfsr(cpu_fsr
, cpu_env
, cpu_fsr
, t64
);
5310 cpu_dst_32
= tcg_temp_new_i32();
5311 tcg_gen_qemu_ld_i32(cpu_dst_32
, cpu_addr
,
5312 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5313 gen_helper_ldfsr(cpu_fsr
, cpu_env
, cpu_fsr
, cpu_dst_32
);
5315 case 0x22: /* ldqf, load quad fpreg */
5316 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5317 gen_address_mask(dc
, cpu_addr
);
5318 cpu_src1_64
= tcg_temp_new_i64();
5319 tcg_gen_qemu_ld_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5320 MO_TEUQ
| MO_ALIGN_4
);
5321 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5322 cpu_src2_64
= tcg_temp_new_i64();
5323 tcg_gen_qemu_ld_i64(cpu_src2_64
, cpu_addr
, dc
->mem_idx
,
5324 MO_TEUQ
| MO_ALIGN_4
);
5325 gen_store_fpr_Q(dc
, rd
, cpu_src1_64
, cpu_src2_64
);
5327 case 0x23: /* lddf, load double fpreg */
5328 gen_address_mask(dc
, cpu_addr
);
5329 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
5330 tcg_gen_qemu_ld_i64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
,
5331 MO_TEUQ
| MO_ALIGN_4
);
5332 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
5337 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
5338 xop
== 0xe || xop
== 0x1e) {
5339 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
5342 case 0x4: /* st, store word */
5343 gen_address_mask(dc
, cpu_addr
);
5344 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5345 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5347 case 0x5: /* stb, store byte */
5348 gen_address_mask(dc
, cpu_addr
);
5349 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
, dc
->mem_idx
, MO_UB
);
5351 case 0x6: /* sth, store halfword */
5352 gen_address_mask(dc
, cpu_addr
);
5353 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5354 dc
->mem_idx
, MO_TEUW
| MO_ALIGN
);
5356 case 0x7: /* std, store double word */
5363 gen_address_mask(dc
, cpu_addr
);
5364 lo
= gen_load_gpr(dc
, rd
+ 1);
5365 t64
= tcg_temp_new_i64();
5366 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
5367 tcg_gen_qemu_st_i64(t64
, cpu_addr
,
5368 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5371 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5372 case 0x14: /* sta, V9 stwa, store word alternate */
5373 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUL
);
5375 case 0x15: /* stba, store byte alternate */
5376 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_UB
);
5378 case 0x16: /* stha, store halfword alternate */
5379 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUW
);
5381 case 0x17: /* stda, store double word alternate */
5385 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
5388 #ifdef TARGET_SPARC64
5389 case 0x0e: /* V9 stx */
5390 gen_address_mask(dc
, cpu_addr
);
5391 tcg_gen_qemu_st_tl(cpu_val
, cpu_addr
,
5392 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5394 case 0x1e: /* V9 stxa */
5395 gen_st_asi(dc
, cpu_val
, cpu_addr
, insn
, MO_TEUQ
);
5401 } else if (xop
> 0x23 && xop
< 0x28) {
5402 if (gen_trap_ifnofpu(dc
)) {
5406 case 0x24: /* stf, store fpreg */
5407 gen_address_mask(dc
, cpu_addr
);
5408 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
5409 tcg_gen_qemu_st_i32(cpu_src1_32
, cpu_addr
,
5410 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5412 case 0x25: /* stfsr, V9 stxfsr */
5414 #ifdef TARGET_SPARC64
5415 gen_address_mask(dc
, cpu_addr
);
5417 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5418 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN
);
5422 tcg_gen_qemu_st_tl(cpu_fsr
, cpu_addr
,
5423 dc
->mem_idx
, MO_TEUL
| MO_ALIGN
);
5427 #ifdef TARGET_SPARC64
5428 /* V9 stqf, store quad fpreg */
5429 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5430 gen_address_mask(dc
, cpu_addr
);
5431 /* ??? While stqf only requires 4-byte alignment, it is
5432 legal for the cpu to signal the unaligned exception.
5433 The OS trap handler is then required to fix it up.
5434 For qemu, this avoids having to probe the second page
5435 before performing the first write. */
5436 cpu_src1_64
= gen_load_fpr_Q0(dc
, rd
);
5437 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5438 dc
->mem_idx
, MO_TEUQ
| MO_ALIGN_16
);
5439 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, 8);
5440 cpu_src2_64
= gen_load_fpr_Q1(dc
, rd
);
5441 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
,
5442 dc
->mem_idx
, MO_TEUQ
);
5444 #else /* !TARGET_SPARC64 */
5445 /* stdfq, store floating point queue */
5446 #if defined(CONFIG_USER_ONLY)
5449 if (!supervisor(dc
))
5451 if (gen_trap_ifnofpu(dc
)) {
5457 case 0x27: /* stdf, store double fpreg */
5458 gen_address_mask(dc
, cpu_addr
);
5459 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5460 tcg_gen_qemu_st_i64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
,
5461 MO_TEUQ
| MO_ALIGN_4
);
5466 } else if (xop
> 0x33 && xop
< 0x3f) {
5468 #ifdef TARGET_SPARC64
5469 case 0x34: /* V9 stfa */
5470 if (gen_trap_ifnofpu(dc
)) {
5473 gen_stf_asi(dc
, cpu_addr
, insn
, 4, rd
);
5475 case 0x36: /* V9 stqfa */
5477 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5478 if (gen_trap_ifnofpu(dc
)) {
5481 gen_stf_asi(dc
, cpu_addr
, insn
, 16, QFPREG(rd
));
5484 case 0x37: /* V9 stdfa */
5485 if (gen_trap_ifnofpu(dc
)) {
5488 gen_stf_asi(dc
, cpu_addr
, insn
, 8, DFPREG(rd
));
5490 case 0x3e: /* V9 casxa */
5491 rs2
= GET_FIELD(insn
, 27, 31);
5492 cpu_src2
= gen_load_gpr(dc
, rs2
);
5493 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5496 case 0x34: /* stc */
5497 case 0x35: /* stcsr */
5498 case 0x36: /* stdcq */
5499 case 0x37: /* stdc */
5502 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
5503 case 0x3c: /* V9 or LEON3 casa */
5504 #ifndef TARGET_SPARC64
5505 CHECK_IU_FEATURE(dc
, CASA
);
5507 rs2
= GET_FIELD(insn
, 27, 31);
5508 cpu_src2
= gen_load_gpr(dc
, rs2
);
5509 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5521 /* default case for non jump instructions */
5525 case DYNAMIC_PC_LOOKUP
:
5530 /* we can do a static jump */
5531 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5532 dc
->base
.is_jmp
= DISAS_NORETURN
;
5535 g_assert_not_reached();
5539 dc
->npc
= dc
->npc
+ 4;
5544 gen_exception(dc
, TT_ILL_INSN
);
5547 gen_exception(dc
, TT_UNIMP_FLUSH
);
5549 #if !defined(CONFIG_USER_ONLY)
5551 gen_exception(dc
, TT_PRIV_INSN
);
5555 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
5557 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5559 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
5562 #ifndef TARGET_SPARC64
5564 gen_exception(dc
, TT_NCP_INSN
);
5569 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5571 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5572 CPUSPARCState
*env
= cs
->env_ptr
;
5575 dc
->pc
= dc
->base
.pc_first
;
5576 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5577 dc
->cc_op
= CC_OP_DYNAMIC
;
5578 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5579 dc
->def
= &env
->def
;
5580 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5581 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5582 #ifndef CONFIG_USER_ONLY
5583 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5585 #ifdef TARGET_SPARC64
5587 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5588 #ifndef CONFIG_USER_ONLY
5589 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5593 * if we reach a page boundary, we stop generation so that the
5594 * PC of a TT_TFAULT exception is always in the right page
5596 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5597 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5600 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5604 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5606 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5607 target_ulong npc
= dc
->npc
;
5612 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5613 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5616 case DYNAMIC_PC_LOOKUP
:
5620 g_assert_not_reached();
5623 tcg_gen_insn_start(dc
->pc
, npc
);
5626 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5628 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5629 CPUSPARCState
*env
= cs
->env_ptr
;
5632 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5633 dc
->base
.pc_next
+= 4;
5634 disas_sparc_insn(dc
, insn
);
5636 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5639 if (dc
->pc
!= dc
->base
.pc_next
) {
5640 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5644 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5646 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5649 switch (dc
->base
.is_jmp
) {
5651 case DISAS_TOO_MANY
:
5652 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5653 /* static PC and NPC: we can use direct chaining */
5654 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5660 case DYNAMIC_PC_LOOKUP
:
5667 g_assert_not_reached();
5670 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5676 tcg_gen_lookup_and_goto_ptr();
5678 tcg_gen_exit_tb(NULL
, 0);
5682 case DISAS_NORETURN
:
5688 tcg_gen_exit_tb(NULL
, 0);
5692 g_assert_not_reached();
5696 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5697 CPUState
*cpu
, FILE *logfile
)
5699 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5700 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5703 static const TranslatorOps sparc_tr_ops
= {
5704 .init_disas_context
= sparc_tr_init_disas_context
,
5705 .tb_start
= sparc_tr_tb_start
,
5706 .insn_start
= sparc_tr_insn_start
,
5707 .translate_insn
= sparc_tr_translate_insn
,
5708 .tb_stop
= sparc_tr_tb_stop
,
5709 .disas_log
= sparc_tr_disas_log
,
5712 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5713 target_ulong pc
, void *host_pc
)
5715 DisasContext dc
= {};
5717 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5720 void sparc_tcg_init(void)
5722 static const char gregnames
[32][4] = {
5723 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5724 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5725 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5726 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5728 static const char fregnames
[32][4] = {
5729 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5730 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5731 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5732 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5735 static const struct { TCGv_i32
*ptr
; int off
; const char *name
; } r32
[] = {
5736 #ifdef TARGET_SPARC64
5737 { &cpu_xcc
, offsetof(CPUSPARCState
, xcc
), "xcc" },
5738 { &cpu_fprs
, offsetof(CPUSPARCState
, fprs
), "fprs" },
5740 { &cpu_wim
, offsetof(CPUSPARCState
, wim
), "wim" },
5742 { &cpu_cc_op
, offsetof(CPUSPARCState
, cc_op
), "cc_op" },
5743 { &cpu_psr
, offsetof(CPUSPARCState
, psr
), "psr" },
5746 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5747 #ifdef TARGET_SPARC64
5748 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5749 { &cpu_tick_cmpr
, offsetof(CPUSPARCState
, tick_cmpr
), "tick_cmpr" },
5750 { &cpu_stick_cmpr
, offsetof(CPUSPARCState
, stick_cmpr
), "stick_cmpr" },
5751 { &cpu_hstick_cmpr
, offsetof(CPUSPARCState
, hstick_cmpr
),
5753 { &cpu_hintp
, offsetof(CPUSPARCState
, hintp
), "hintp" },
5754 { &cpu_htba
, offsetof(CPUSPARCState
, htba
), "htba" },
5755 { &cpu_hver
, offsetof(CPUSPARCState
, hver
), "hver" },
5756 { &cpu_ssr
, offsetof(CPUSPARCState
, ssr
), "ssr" },
5757 { &cpu_ver
, offsetof(CPUSPARCState
, version
), "ver" },
5759 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5760 { &cpu_cc_src
, offsetof(CPUSPARCState
, cc_src
), "cc_src" },
5761 { &cpu_cc_src2
, offsetof(CPUSPARCState
, cc_src2
), "cc_src2" },
5762 { &cpu_cc_dst
, offsetof(CPUSPARCState
, cc_dst
), "cc_dst" },
5763 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5764 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5765 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5766 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5767 #ifndef CONFIG_USER_ONLY
5768 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5774 cpu_regwptr
= tcg_global_mem_new_ptr(cpu_env
,
5775 offsetof(CPUSPARCState
, regwptr
),
5778 for (i
= 0; i
< ARRAY_SIZE(r32
); ++i
) {
5779 *r32
[i
].ptr
= tcg_global_mem_new_i32(cpu_env
, r32
[i
].off
, r32
[i
].name
);
5782 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5783 *rtl
[i
].ptr
= tcg_global_mem_new(cpu_env
, rtl
[i
].off
, rtl
[i
].name
);
5787 for (i
= 1; i
< 8; ++i
) {
5788 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
5789 offsetof(CPUSPARCState
, gregs
[i
]),
5793 for (i
= 8; i
< 32; ++i
) {
5794 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5795 (i
- 8) * sizeof(target_ulong
),
5799 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5800 cpu_fpr
[i
] = tcg_global_mem_new_i64(cpu_env
,
5801 offsetof(CPUSPARCState
, fpr
[i
]),
5806 void sparc_restore_state_to_opc(CPUState
*cs
,
5807 const TranslationBlock
*tb
,
5808 const uint64_t *data
)
5810 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5811 CPUSPARCState
*env
= &cpu
->env
;
5812 target_ulong pc
= data
[0];
5813 target_ulong npc
= data
[1];
5816 if (npc
== DYNAMIC_PC
) {
5817 /* dynamic NPC: already stored */
5818 } else if (npc
& JUMP_PC
) {
5819 /* jump PC: use 'cond' and the jump targets of the translation */
5821 env
->npc
= npc
& ~3;