4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env
, cpu_regwptr
;
43 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
44 static TCGv_i32 cpu_cc_op
;
45 static TCGv_i32 cpu_psr
;
46 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
48 #ifndef CONFIG_USER_ONLY
51 static TCGv cpu_cond
, cpu_dst
, cpu_addr
, cpu_val
;
53 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
55 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
56 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
57 static TCGv_i32 cpu_softint
;
61 /* local register indexes (only used inside old micro ops) */
63 static TCGv_i32 cpu_tmp32
;
64 static TCGv_i64 cpu_tmp64
;
65 /* Floating point registers */
66 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
68 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
69 static target_ulong gen_opc_jump_pc
[2];
71 #include "gen-icount.h"
73 typedef struct DisasContext
{
74 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
75 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
76 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
80 int address_mask_32bit
;
82 uint32_t cc_op
; /* current CC operation */
83 struct TranslationBlock
*tb
;
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
118 static int sign_extend(int x
, int len
)
121 return (x
<< len
) >> len
;
124 #define IS_IMM (insn & (1<<13))
126 static inline void gen_update_fprs_dirty(int rd
)
128 #if defined(TARGET_SPARC64)
129 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
133 /* floating point registers moves */
134 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
136 #if TCG_TARGET_REG_BITS == 32
138 return TCGV_LOW(cpu_fpr
[src
/ 2]);
140 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
144 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
146 TCGv_i32 ret
= tcg_temp_local_new_i32();
147 TCGv_i64 t
= tcg_temp_new_i64();
149 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
150 tcg_gen_trunc_i64_i32(ret
, t
);
151 tcg_temp_free_i64(t
);
153 dc
->t32
[dc
->n_t32
++] = ret
;
154 assert(dc
->n_t32
<= ARRAY_SIZE(dc
->t32
));
161 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
163 #if TCG_TARGET_REG_BITS == 32
165 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
167 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
170 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
171 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
172 (dst
& 1 ? 0 : 32), 32);
174 gen_update_fprs_dirty(dst
);
177 static TCGv_i32
gen_dest_fpr_F(void)
182 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
185 return cpu_fpr
[src
/ 2];
188 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
191 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
192 gen_update_fprs_dirty(dst
);
195 static TCGv_i64
gen_dest_fpr_D(void)
200 static void gen_op_load_fpr_QT0(unsigned int src
)
202 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
203 offsetof(CPU_QuadU
, ll
.upper
));
204 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
205 offsetof(CPU_QuadU
, ll
.lower
));
208 static void gen_op_load_fpr_QT1(unsigned int src
)
210 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
211 offsetof(CPU_QuadU
, ll
.upper
));
212 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
213 offsetof(CPU_QuadU
, ll
.lower
));
216 static void gen_op_store_QT0_fpr(unsigned int dst
)
218 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
219 offsetof(CPU_QuadU
, ll
.upper
));
220 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
221 offsetof(CPU_QuadU
, ll
.lower
));
224 #ifdef TARGET_SPARC64
225 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
230 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
231 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
232 gen_update_fprs_dirty(rd
);
237 #ifdef CONFIG_USER_ONLY
238 #define supervisor(dc) 0
239 #ifdef TARGET_SPARC64
240 #define hypervisor(dc) 0
243 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
244 #ifdef TARGET_SPARC64
245 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
250 #ifdef TARGET_SPARC64
252 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
254 #define AM_CHECK(dc) (1)
258 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
260 #ifdef TARGET_SPARC64
262 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
266 static inline void gen_movl_reg_TN(int reg
, TCGv tn
)
269 tcg_gen_movi_tl(tn
, 0);
271 tcg_gen_mov_tl(tn
, cpu_gregs
[reg
]);
273 tcg_gen_ld_tl(tn
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
277 static inline void gen_movl_TN_reg(int reg
, TCGv tn
)
282 tcg_gen_mov_tl(cpu_gregs
[reg
], tn
);
284 tcg_gen_st_tl(tn
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
288 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
289 target_ulong pc
, target_ulong npc
)
291 TranslationBlock
*tb
;
294 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
295 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
297 /* jump to same page: we can use a direct jump */
298 tcg_gen_goto_tb(tb_num
);
299 tcg_gen_movi_tl(cpu_pc
, pc
);
300 tcg_gen_movi_tl(cpu_npc
, npc
);
301 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
303 /* jump to another page: currently not optimized */
304 tcg_gen_movi_tl(cpu_pc
, pc
);
305 tcg_gen_movi_tl(cpu_npc
, npc
);
311 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
313 tcg_gen_extu_i32_tl(reg
, src
);
314 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
315 tcg_gen_andi_tl(reg
, reg
, 0x1);
318 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
320 tcg_gen_extu_i32_tl(reg
, src
);
321 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
322 tcg_gen_andi_tl(reg
, reg
, 0x1);
325 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
327 tcg_gen_extu_i32_tl(reg
, src
);
328 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
329 tcg_gen_andi_tl(reg
, reg
, 0x1);
332 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
334 tcg_gen_extu_i32_tl(reg
, src
);
335 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
336 tcg_gen_andi_tl(reg
, reg
, 0x1);
339 static inline void gen_op_addi_cc(TCGv dst
, TCGv src1
, target_long src2
)
341 tcg_gen_mov_tl(cpu_cc_src
, src1
);
342 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
343 tcg_gen_addi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
344 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
347 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
349 tcg_gen_mov_tl(cpu_cc_src
, src1
);
350 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
351 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
352 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
355 static TCGv_i32
gen_add32_carry32(void)
357 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
359 /* Carry is computed from a previous add: (dst < src) */
360 #if TARGET_LONG_BITS == 64
361 cc_src1_32
= tcg_temp_new_i32();
362 cc_src2_32
= tcg_temp_new_i32();
363 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_dst
);
364 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src
);
366 cc_src1_32
= cpu_cc_dst
;
367 cc_src2_32
= cpu_cc_src
;
370 carry_32
= tcg_temp_new_i32();
371 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
373 #if TARGET_LONG_BITS == 64
374 tcg_temp_free_i32(cc_src1_32
);
375 tcg_temp_free_i32(cc_src2_32
);
381 static TCGv_i32
gen_sub32_carry32(void)
383 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
385 /* Carry is computed from a previous borrow: (src1 < src2) */
386 #if TARGET_LONG_BITS == 64
387 cc_src1_32
= tcg_temp_new_i32();
388 cc_src2_32
= tcg_temp_new_i32();
389 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_src
);
390 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src2
);
392 cc_src1_32
= cpu_cc_src
;
393 cc_src2_32
= cpu_cc_src2
;
396 carry_32
= tcg_temp_new_i32();
397 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
399 #if TARGET_LONG_BITS == 64
400 tcg_temp_free_i32(cc_src1_32
);
401 tcg_temp_free_i32(cc_src2_32
);
407 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
408 TCGv src2
, int update_cc
)
416 /* Carry is known to be zero. Fall back to plain ADD. */
418 gen_op_add_cc(dst
, src1
, src2
);
420 tcg_gen_add_tl(dst
, src1
, src2
);
427 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
429 /* For 32-bit hosts, we can re-use the host's hardware carry
430 generation by using an ADD2 opcode. We discard the low
431 part of the output. Ideally we'd combine this operation
432 with the add that generated the carry in the first place. */
433 TCGv dst_low
= tcg_temp_new();
434 tcg_gen_op6_i32(INDEX_op_add2_i32
, dst_low
, dst
,
435 cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
436 tcg_temp_free(dst_low
);
440 carry_32
= gen_add32_carry32();
446 carry_32
= gen_sub32_carry32();
450 /* We need external help to produce the carry. */
451 carry_32
= tcg_temp_new_i32();
452 gen_helper_compute_C_icc(carry_32
, cpu_env
);
456 #if TARGET_LONG_BITS == 64
457 carry
= tcg_temp_new();
458 tcg_gen_extu_i32_i64(carry
, carry_32
);
463 tcg_gen_add_tl(dst
, src1
, src2
);
464 tcg_gen_add_tl(dst
, dst
, carry
);
466 tcg_temp_free_i32(carry_32
);
467 #if TARGET_LONG_BITS == 64
468 tcg_temp_free(carry
);
471 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
475 tcg_gen_mov_tl(cpu_cc_src
, src1
);
476 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
477 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
478 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
479 dc
->cc_op
= CC_OP_ADDX
;
483 static inline void gen_op_subi_cc(TCGv dst
, TCGv src1
, target_long src2
, DisasContext
*dc
)
485 tcg_gen_mov_tl(cpu_cc_src
, src1
);
486 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
488 tcg_gen_mov_tl(cpu_cc_dst
, src1
);
489 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
490 dc
->cc_op
= CC_OP_LOGIC
;
492 tcg_gen_subi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
493 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
494 dc
->cc_op
= CC_OP_SUB
;
496 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
499 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
501 tcg_gen_mov_tl(cpu_cc_src
, src1
);
502 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
503 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
504 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
507 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
508 TCGv src2
, int update_cc
)
516 /* Carry is known to be zero. Fall back to plain SUB. */
518 gen_op_sub_cc(dst
, src1
, src2
);
520 tcg_gen_sub_tl(dst
, src1
, src2
);
527 carry_32
= gen_add32_carry32();
533 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
535 /* For 32-bit hosts, we can re-use the host's hardware carry
536 generation by using a SUB2 opcode. We discard the low
537 part of the output. Ideally we'd combine this operation
538 with the add that generated the carry in the first place. */
539 TCGv dst_low
= tcg_temp_new();
540 tcg_gen_op6_i32(INDEX_op_sub2_i32
, dst_low
, dst
,
541 cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
542 tcg_temp_free(dst_low
);
546 carry_32
= gen_sub32_carry32();
550 /* We need external help to produce the carry. */
551 carry_32
= tcg_temp_new_i32();
552 gen_helper_compute_C_icc(carry_32
, cpu_env
);
556 #if TARGET_LONG_BITS == 64
557 carry
= tcg_temp_new();
558 tcg_gen_extu_i32_i64(carry
, carry_32
);
563 tcg_gen_sub_tl(dst
, src1
, src2
);
564 tcg_gen_sub_tl(dst
, dst
, carry
);
566 tcg_temp_free_i32(carry_32
);
567 #if TARGET_LONG_BITS == 64
568 tcg_temp_free(carry
);
571 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
575 tcg_gen_mov_tl(cpu_cc_src
, src1
);
576 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
577 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
578 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
579 dc
->cc_op
= CC_OP_SUBX
;
583 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
587 r_temp
= tcg_temp_new();
593 zero
= tcg_const_tl(0);
594 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
595 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
596 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
597 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
602 // env->y = (b2 << 31) | (env->y >> 1);
603 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
604 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
605 tcg_gen_shri_tl(cpu_tmp0
, cpu_y
, 1);
606 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0x7fffffff);
607 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, r_temp
);
608 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
611 gen_mov_reg_N(cpu_tmp0
, cpu_psr
);
612 gen_mov_reg_V(r_temp
, cpu_psr
);
613 tcg_gen_xor_tl(cpu_tmp0
, cpu_tmp0
, r_temp
);
614 tcg_temp_free(r_temp
);
616 // T0 = (b1 << 31) | (T0 >> 1);
618 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, 31);
619 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
620 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
622 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
624 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
627 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
629 TCGv_i32 r_src1
, r_src2
;
630 TCGv_i64 r_temp
, r_temp2
;
632 r_src1
= tcg_temp_new_i32();
633 r_src2
= tcg_temp_new_i32();
635 tcg_gen_trunc_tl_i32(r_src1
, src1
);
636 tcg_gen_trunc_tl_i32(r_src2
, src2
);
638 r_temp
= tcg_temp_new_i64();
639 r_temp2
= tcg_temp_new_i64();
642 tcg_gen_ext_i32_i64(r_temp
, r_src2
);
643 tcg_gen_ext_i32_i64(r_temp2
, r_src1
);
645 tcg_gen_extu_i32_i64(r_temp
, r_src2
);
646 tcg_gen_extu_i32_i64(r_temp2
, r_src1
);
649 tcg_gen_mul_i64(r_temp2
, r_temp
, r_temp2
);
651 tcg_gen_shri_i64(r_temp
, r_temp2
, 32);
652 tcg_gen_trunc_i64_tl(cpu_tmp0
, r_temp
);
653 tcg_temp_free_i64(r_temp
);
654 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
656 tcg_gen_trunc_i64_tl(dst
, r_temp2
);
658 tcg_temp_free_i64(r_temp2
);
660 tcg_temp_free_i32(r_src1
);
661 tcg_temp_free_i32(r_src2
);
664 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
666 /* zero-extend truncated operands before multiplication */
667 gen_op_multiply(dst
, src1
, src2
, 0);
670 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
672 /* sign-extend truncated operands before multiplication */
673 gen_op_multiply(dst
, src1
, src2
, 1);
677 static inline void gen_op_eval_ba(TCGv dst
)
679 tcg_gen_movi_tl(dst
, 1);
683 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
685 gen_mov_reg_Z(dst
, src
);
689 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
691 gen_mov_reg_N(cpu_tmp0
, src
);
692 gen_mov_reg_V(dst
, src
);
693 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
694 gen_mov_reg_Z(cpu_tmp0
, src
);
695 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
699 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
701 gen_mov_reg_V(cpu_tmp0
, src
);
702 gen_mov_reg_N(dst
, src
);
703 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
707 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
709 gen_mov_reg_Z(cpu_tmp0
, src
);
710 gen_mov_reg_C(dst
, src
);
711 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
715 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
717 gen_mov_reg_C(dst
, src
);
721 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
723 gen_mov_reg_V(dst
, src
);
727 static inline void gen_op_eval_bn(TCGv dst
)
729 tcg_gen_movi_tl(dst
, 0);
733 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
735 gen_mov_reg_N(dst
, src
);
739 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
741 gen_mov_reg_Z(dst
, src
);
742 tcg_gen_xori_tl(dst
, dst
, 0x1);
746 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
748 gen_mov_reg_N(cpu_tmp0
, src
);
749 gen_mov_reg_V(dst
, src
);
750 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
751 gen_mov_reg_Z(cpu_tmp0
, src
);
752 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
753 tcg_gen_xori_tl(dst
, dst
, 0x1);
757 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
759 gen_mov_reg_V(cpu_tmp0
, src
);
760 gen_mov_reg_N(dst
, src
);
761 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
762 tcg_gen_xori_tl(dst
, dst
, 0x1);
766 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
768 gen_mov_reg_Z(cpu_tmp0
, src
);
769 gen_mov_reg_C(dst
, src
);
770 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
771 tcg_gen_xori_tl(dst
, dst
, 0x1);
775 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
777 gen_mov_reg_C(dst
, src
);
778 tcg_gen_xori_tl(dst
, dst
, 0x1);
782 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
784 gen_mov_reg_N(dst
, src
);
785 tcg_gen_xori_tl(dst
, dst
, 0x1);
789 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
791 gen_mov_reg_V(dst
, src
);
792 tcg_gen_xori_tl(dst
, dst
, 0x1);
796 FPSR bit field FCC1 | FCC0:
802 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
803 unsigned int fcc_offset
)
805 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
806 tcg_gen_andi_tl(reg
, reg
, 0x1);
809 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
810 unsigned int fcc_offset
)
812 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
813 tcg_gen_andi_tl(reg
, reg
, 0x1);
817 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
818 unsigned int fcc_offset
)
820 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
821 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
822 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
825 // 1 or 2: FCC0 ^ FCC1
826 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
827 unsigned int fcc_offset
)
829 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
830 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
831 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
835 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
836 unsigned int fcc_offset
)
838 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
842 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
843 unsigned int fcc_offset
)
845 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
846 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
847 tcg_gen_xori_tl(cpu_tmp0
, cpu_tmp0
, 0x1);
848 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
852 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
853 unsigned int fcc_offset
)
855 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
859 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
860 unsigned int fcc_offset
)
862 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
863 tcg_gen_xori_tl(dst
, dst
, 0x1);
864 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
865 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
869 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
870 unsigned int fcc_offset
)
872 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
873 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
874 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
878 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
879 unsigned int fcc_offset
)
881 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
882 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
883 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
884 tcg_gen_xori_tl(dst
, dst
, 0x1);
887 // 0 or 3: !(FCC0 ^ FCC1)
888 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
889 unsigned int fcc_offset
)
891 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
892 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
893 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
894 tcg_gen_xori_tl(dst
, dst
, 0x1);
898 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
899 unsigned int fcc_offset
)
901 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
902 tcg_gen_xori_tl(dst
, dst
, 0x1);
905 // !1: !(FCC0 & !FCC1)
906 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
907 unsigned int fcc_offset
)
909 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
910 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
911 tcg_gen_xori_tl(cpu_tmp0
, cpu_tmp0
, 0x1);
912 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
913 tcg_gen_xori_tl(dst
, dst
, 0x1);
917 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
918 unsigned int fcc_offset
)
920 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
921 tcg_gen_xori_tl(dst
, dst
, 0x1);
924 // !2: !(!FCC0 & FCC1)
925 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
926 unsigned int fcc_offset
)
928 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
929 tcg_gen_xori_tl(dst
, dst
, 0x1);
930 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
931 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
932 tcg_gen_xori_tl(dst
, dst
, 0x1);
935 // !3: !(FCC0 & FCC1)
936 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
937 unsigned int fcc_offset
)
939 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
940 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
941 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
942 tcg_gen_xori_tl(dst
, dst
, 0x1);
945 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
946 target_ulong pc2
, TCGv r_cond
)
950 l1
= gen_new_label();
952 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
954 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
957 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
960 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
961 target_ulong pc2
, TCGv r_cond
)
965 l1
= gen_new_label();
967 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
969 gen_goto_tb(dc
, 0, pc2
, pc1
);
972 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
975 static inline void gen_generic_branch(DisasContext
*dc
)
977 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
978 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
979 TCGv zero
= tcg_const_tl(0);
981 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
988 /* call this function before using the condition register as it may
989 have been set for a jump */
990 static inline void flush_cond(DisasContext
*dc
)
992 if (dc
->npc
== JUMP_PC
) {
993 gen_generic_branch(dc
);
994 dc
->npc
= DYNAMIC_PC
;
998 static inline void save_npc(DisasContext
*dc
)
1000 if (dc
->npc
== JUMP_PC
) {
1001 gen_generic_branch(dc
);
1002 dc
->npc
= DYNAMIC_PC
;
1003 } else if (dc
->npc
!= DYNAMIC_PC
) {
1004 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1008 static inline void save_state(DisasContext
*dc
)
1010 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1011 /* flush pending conditional evaluations before exposing cpu state */
1012 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1013 dc
->cc_op
= CC_OP_FLAGS
;
1014 gen_helper_compute_psr(cpu_env
);
1019 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1021 if (dc
->npc
== JUMP_PC
) {
1022 gen_generic_branch(dc
);
1023 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1024 dc
->pc
= DYNAMIC_PC
;
1025 } else if (dc
->npc
== DYNAMIC_PC
) {
1026 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1027 dc
->pc
= DYNAMIC_PC
;
1033 static inline void gen_op_next_insn(void)
1035 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1036 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1039 static void free_compare(DisasCompare
*cmp
)
1042 tcg_temp_free(cmp
->c1
);
1045 tcg_temp_free(cmp
->c2
);
1049 static void gen_compare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
,
1055 /* For now we still generate a straight boolean result. */
1056 cmp
->cond
= TCG_COND_NE
;
1057 cmp
->is_bool
= true;
1058 cmp
->g1
= cmp
->g2
= false;
1059 cmp
->c1
= r_dst
= tcg_temp_new();
1060 cmp
->c2
= tcg_const_tl(0);
1062 #ifdef TARGET_SPARC64
1070 switch (dc
->cc_op
) {
1074 gen_helper_compute_psr(cpu_env
);
1075 dc
->cc_op
= CC_OP_FLAGS
;
1080 gen_op_eval_bn(r_dst
);
1083 gen_op_eval_be(r_dst
, r_src
);
1086 gen_op_eval_ble(r_dst
, r_src
);
1089 gen_op_eval_bl(r_dst
, r_src
);
1092 gen_op_eval_bleu(r_dst
, r_src
);
1095 gen_op_eval_bcs(r_dst
, r_src
);
1098 gen_op_eval_bneg(r_dst
, r_src
);
1101 gen_op_eval_bvs(r_dst
, r_src
);
1104 gen_op_eval_ba(r_dst
);
1107 gen_op_eval_bne(r_dst
, r_src
);
1110 gen_op_eval_bg(r_dst
, r_src
);
1113 gen_op_eval_bge(r_dst
, r_src
);
1116 gen_op_eval_bgu(r_dst
, r_src
);
1119 gen_op_eval_bcc(r_dst
, r_src
);
1122 gen_op_eval_bpos(r_dst
, r_src
);
1125 gen_op_eval_bvc(r_dst
, r_src
);
1130 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1132 unsigned int offset
;
1135 /* For now we still generate a straight boolean result. */
1136 cmp
->cond
= TCG_COND_NE
;
1137 cmp
->is_bool
= true;
1138 cmp
->g1
= cmp
->g2
= false;
1139 cmp
->c1
= r_dst
= tcg_temp_new();
1140 cmp
->c2
= tcg_const_tl(0);
1160 gen_op_eval_bn(r_dst
);
1163 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1166 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1169 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1172 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1175 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1178 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1181 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1184 gen_op_eval_ba(r_dst
);
1187 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1190 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1193 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1196 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1199 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1202 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1205 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1210 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1214 gen_compare(&cmp
, cc
, cond
, dc
);
1216 /* The interface is to return a boolean in r_dst. */
1218 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1220 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1226 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1229 gen_fcompare(&cmp
, cc
, cond
);
1231 /* The interface is to return a boolean in r_dst. */
1233 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1235 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1241 #ifdef TARGET_SPARC64
1243 static const int gen_tcg_cond_reg
[8] = {
1254 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1256 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1257 cmp
->is_bool
= false;
1261 cmp
->c2
= tcg_const_tl(0);
1264 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1267 gen_compare_reg(&cmp
, cond
, r_src
);
1269 /* The interface is to return a boolean in r_dst. */
1270 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1276 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1278 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1279 target_ulong target
= dc
->pc
+ offset
;
1281 #ifdef TARGET_SPARC64
1282 if (unlikely(AM_CHECK(dc
))) {
1283 target
&= 0xffffffffULL
;
1287 /* unconditional not taken */
1289 dc
->pc
= dc
->npc
+ 4;
1290 dc
->npc
= dc
->pc
+ 4;
1293 dc
->npc
= dc
->pc
+ 4;
1295 } else if (cond
== 0x8) {
1296 /* unconditional taken */
1299 dc
->npc
= dc
->pc
+ 4;
1303 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1307 gen_cond(cpu_cond
, cc
, cond
, dc
);
1309 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1313 dc
->jump_pc
[0] = target
;
1314 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1315 dc
->jump_pc
[1] = DYNAMIC_PC
;
1316 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1318 dc
->jump_pc
[1] = dc
->npc
+ 4;
1325 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1327 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1328 target_ulong target
= dc
->pc
+ offset
;
1330 #ifdef TARGET_SPARC64
1331 if (unlikely(AM_CHECK(dc
))) {
1332 target
&= 0xffffffffULL
;
1336 /* unconditional not taken */
1338 dc
->pc
= dc
->npc
+ 4;
1339 dc
->npc
= dc
->pc
+ 4;
1342 dc
->npc
= dc
->pc
+ 4;
1344 } else if (cond
== 0x8) {
1345 /* unconditional taken */
1348 dc
->npc
= dc
->pc
+ 4;
1352 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1356 gen_fcond(cpu_cond
, cc
, cond
);
1358 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1362 dc
->jump_pc
[0] = target
;
1363 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1364 dc
->jump_pc
[1] = DYNAMIC_PC
;
1365 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1367 dc
->jump_pc
[1] = dc
->npc
+ 4;
1374 #ifdef TARGET_SPARC64
1375 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1378 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1379 target_ulong target
= dc
->pc
+ offset
;
1381 if (unlikely(AM_CHECK(dc
))) {
1382 target
&= 0xffffffffULL
;
1385 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1387 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1391 dc
->jump_pc
[0] = target
;
1392 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1393 dc
->jump_pc
[1] = DYNAMIC_PC
;
1394 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1396 dc
->jump_pc
[1] = dc
->npc
+ 4;
1402 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1406 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1409 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1412 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1415 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1420 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1424 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1427 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1430 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1433 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1438 static inline void gen_op_fcmpq(int fccno
)
1442 gen_helper_fcmpq(cpu_env
);
1445 gen_helper_fcmpq_fcc1(cpu_env
);
1448 gen_helper_fcmpq_fcc2(cpu_env
);
1451 gen_helper_fcmpq_fcc3(cpu_env
);
1456 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1460 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1463 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1466 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1469 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1474 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1478 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1481 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1484 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1487 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1492 static inline void gen_op_fcmpeq(int fccno
)
1496 gen_helper_fcmpeq(cpu_env
);
1499 gen_helper_fcmpeq_fcc1(cpu_env
);
1502 gen_helper_fcmpeq_fcc2(cpu_env
);
1505 gen_helper_fcmpeq_fcc3(cpu_env
);
1512 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1514 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1517 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1519 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1522 static inline void gen_op_fcmpq(int fccno
)
1524 gen_helper_fcmpq(cpu_env
);
1527 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1529 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1532 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1534 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1537 static inline void gen_op_fcmpeq(int fccno
)
1539 gen_helper_fcmpeq(cpu_env
);
1543 static inline void gen_op_fpexception_im(int fsr_flags
)
1547 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1548 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1549 r_const
= tcg_const_i32(TT_FP_EXCP
);
1550 gen_helper_raise_exception(cpu_env
, r_const
);
1551 tcg_temp_free_i32(r_const
);
1554 static int gen_trap_ifnofpu(DisasContext
*dc
)
1556 #if !defined(CONFIG_USER_ONLY)
1557 if (!dc
->fpu_enabled
) {
1561 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1562 gen_helper_raise_exception(cpu_env
, r_const
);
1563 tcg_temp_free_i32(r_const
);
1571 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1573 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1576 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1577 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1581 src
= gen_load_fpr_F(dc
, rs
);
1582 dst
= gen_dest_fpr_F();
1584 gen(dst
, cpu_env
, src
);
1586 gen_store_fpr_F(dc
, rd
, dst
);
1589 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1590 void (*gen
)(TCGv_i32
, TCGv_i32
))
1594 src
= gen_load_fpr_F(dc
, rs
);
1595 dst
= gen_dest_fpr_F();
1599 gen_store_fpr_F(dc
, rd
, dst
);
1602 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1603 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1605 TCGv_i32 dst
, src1
, src2
;
1607 src1
= gen_load_fpr_F(dc
, rs1
);
1608 src2
= gen_load_fpr_F(dc
, rs2
);
1609 dst
= gen_dest_fpr_F();
1611 gen(dst
, cpu_env
, src1
, src2
);
1613 gen_store_fpr_F(dc
, rd
, dst
);
1616 #ifdef TARGET_SPARC64
1617 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1618 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1620 TCGv_i32 dst
, src1
, src2
;
1622 src1
= gen_load_fpr_F(dc
, rs1
);
1623 src2
= gen_load_fpr_F(dc
, rs2
);
1624 dst
= gen_dest_fpr_F();
1626 gen(dst
, src1
, src2
);
1628 gen_store_fpr_F(dc
, rd
, dst
);
1632 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1633 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1637 src
= gen_load_fpr_D(dc
, rs
);
1638 dst
= gen_dest_fpr_D();
1640 gen(dst
, cpu_env
, src
);
1642 gen_store_fpr_D(dc
, rd
, dst
);
1645 #ifdef TARGET_SPARC64
1646 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1647 void (*gen
)(TCGv_i64
, TCGv_i64
))
1651 src
= gen_load_fpr_D(dc
, rs
);
1652 dst
= gen_dest_fpr_D();
1656 gen_store_fpr_D(dc
, rd
, dst
);
1660 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1661 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1663 TCGv_i64 dst
, src1
, src2
;
1665 src1
= gen_load_fpr_D(dc
, rs1
);
1666 src2
= gen_load_fpr_D(dc
, rs2
);
1667 dst
= gen_dest_fpr_D();
1669 gen(dst
, cpu_env
, src1
, src2
);
1671 gen_store_fpr_D(dc
, rd
, dst
);
1674 #ifdef TARGET_SPARC64
1675 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1676 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1678 TCGv_i64 dst
, src1
, src2
;
1680 src1
= gen_load_fpr_D(dc
, rs1
);
1681 src2
= gen_load_fpr_D(dc
, rs2
);
1682 dst
= gen_dest_fpr_D();
1684 gen(dst
, src1
, src2
);
1686 gen_store_fpr_D(dc
, rd
, dst
);
1689 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1690 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1692 TCGv_i64 dst
, src1
, src2
;
1694 src1
= gen_load_fpr_D(dc
, rs1
);
1695 src2
= gen_load_fpr_D(dc
, rs2
);
1696 dst
= gen_dest_fpr_D();
1698 gen(dst
, cpu_gsr
, src1
, src2
);
1700 gen_store_fpr_D(dc
, rd
, dst
);
1703 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1704 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1706 TCGv_i64 dst
, src0
, src1
, src2
;
1708 src1
= gen_load_fpr_D(dc
, rs1
);
1709 src2
= gen_load_fpr_D(dc
, rs2
);
1710 src0
= gen_load_fpr_D(dc
, rd
);
1711 dst
= gen_dest_fpr_D();
1713 gen(dst
, src0
, src1
, src2
);
1715 gen_store_fpr_D(dc
, rd
, dst
);
1719 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1720 void (*gen
)(TCGv_ptr
))
1722 gen_op_load_fpr_QT1(QFPREG(rs
));
1726 gen_op_store_QT0_fpr(QFPREG(rd
));
1727 gen_update_fprs_dirty(QFPREG(rd
));
1730 #ifdef TARGET_SPARC64
1731 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1732 void (*gen
)(TCGv_ptr
))
1734 gen_op_load_fpr_QT1(QFPREG(rs
));
1738 gen_op_store_QT0_fpr(QFPREG(rd
));
1739 gen_update_fprs_dirty(QFPREG(rd
));
1743 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1744 void (*gen
)(TCGv_ptr
))
1746 gen_op_load_fpr_QT0(QFPREG(rs1
));
1747 gen_op_load_fpr_QT1(QFPREG(rs2
));
1751 gen_op_store_QT0_fpr(QFPREG(rd
));
1752 gen_update_fprs_dirty(QFPREG(rd
));
1755 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1756 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1759 TCGv_i32 src1
, src2
;
1761 src1
= gen_load_fpr_F(dc
, rs1
);
1762 src2
= gen_load_fpr_F(dc
, rs2
);
1763 dst
= gen_dest_fpr_D();
1765 gen(dst
, cpu_env
, src1
, src2
);
1767 gen_store_fpr_D(dc
, rd
, dst
);
1770 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1771 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1773 TCGv_i64 src1
, src2
;
1775 src1
= gen_load_fpr_D(dc
, rs1
);
1776 src2
= gen_load_fpr_D(dc
, rs2
);
1778 gen(cpu_env
, src1
, src2
);
1780 gen_op_store_QT0_fpr(QFPREG(rd
));
1781 gen_update_fprs_dirty(QFPREG(rd
));
1784 #ifdef TARGET_SPARC64
1785 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1786 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1791 src
= gen_load_fpr_F(dc
, rs
);
1792 dst
= gen_dest_fpr_D();
1794 gen(dst
, cpu_env
, src
);
1796 gen_store_fpr_D(dc
, rd
, dst
);
1800 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1801 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1806 src
= gen_load_fpr_F(dc
, rs
);
1807 dst
= gen_dest_fpr_D();
1809 gen(dst
, cpu_env
, src
);
1811 gen_store_fpr_D(dc
, rd
, dst
);
1814 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1815 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1820 src
= gen_load_fpr_D(dc
, rs
);
1821 dst
= gen_dest_fpr_F();
1823 gen(dst
, cpu_env
, src
);
1825 gen_store_fpr_F(dc
, rd
, dst
);
1828 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1829 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1833 gen_op_load_fpr_QT1(QFPREG(rs
));
1834 dst
= gen_dest_fpr_F();
1838 gen_store_fpr_F(dc
, rd
, dst
);
1841 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1842 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1846 gen_op_load_fpr_QT1(QFPREG(rs
));
1847 dst
= gen_dest_fpr_D();
1851 gen_store_fpr_D(dc
, rd
, dst
);
1854 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1855 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1859 src
= gen_load_fpr_F(dc
, rs
);
1863 gen_op_store_QT0_fpr(QFPREG(rd
));
1864 gen_update_fprs_dirty(QFPREG(rd
));
1867 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1868 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1872 src
= gen_load_fpr_D(dc
, rs
);
1876 gen_op_store_QT0_fpr(QFPREG(rd
));
1877 gen_update_fprs_dirty(QFPREG(rd
));
1881 #ifdef TARGET_SPARC64
1882 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
1888 r_asi
= tcg_temp_new_i32();
1889 tcg_gen_mov_i32(r_asi
, cpu_asi
);
1891 asi
= GET_FIELD(insn
, 19, 26);
1892 r_asi
= tcg_const_i32(asi
);
1897 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
1900 TCGv_i32 r_asi
, r_size
, r_sign
;
1902 r_asi
= gen_get_asi(insn
, addr
);
1903 r_size
= tcg_const_i32(size
);
1904 r_sign
= tcg_const_i32(sign
);
1905 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
1906 tcg_temp_free_i32(r_sign
);
1907 tcg_temp_free_i32(r_size
);
1908 tcg_temp_free_i32(r_asi
);
1911 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
1913 TCGv_i32 r_asi
, r_size
;
1915 r_asi
= gen_get_asi(insn
, addr
);
1916 r_size
= tcg_const_i32(size
);
1917 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
1918 tcg_temp_free_i32(r_size
);
1919 tcg_temp_free_i32(r_asi
);
1922 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
1924 TCGv_i32 r_asi
, r_size
, r_rd
;
1926 r_asi
= gen_get_asi(insn
, addr
);
1927 r_size
= tcg_const_i32(size
);
1928 r_rd
= tcg_const_i32(rd
);
1929 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
1930 tcg_temp_free_i32(r_rd
);
1931 tcg_temp_free_i32(r_size
);
1932 tcg_temp_free_i32(r_asi
);
1935 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
1937 TCGv_i32 r_asi
, r_size
, r_rd
;
1939 r_asi
= gen_get_asi(insn
, addr
);
1940 r_size
= tcg_const_i32(size
);
1941 r_rd
= tcg_const_i32(rd
);
1942 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
1943 tcg_temp_free_i32(r_rd
);
1944 tcg_temp_free_i32(r_size
);
1945 tcg_temp_free_i32(r_asi
);
1948 static inline void gen_swap_asi(TCGv dst
, TCGv addr
, int insn
)
1950 TCGv_i32 r_asi
, r_size
, r_sign
;
1952 r_asi
= gen_get_asi(insn
, addr
);
1953 r_size
= tcg_const_i32(4);
1954 r_sign
= tcg_const_i32(0);
1955 gen_helper_ld_asi(cpu_tmp64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
1956 tcg_temp_free_i32(r_sign
);
1957 gen_helper_st_asi(cpu_env
, addr
, dst
, r_asi
, r_size
);
1958 tcg_temp_free_i32(r_size
);
1959 tcg_temp_free_i32(r_asi
);
1960 tcg_gen_trunc_i64_tl(dst
, cpu_tmp64
);
1963 static inline void gen_ldda_asi(TCGv hi
, TCGv addr
, int insn
, int rd
)
1965 TCGv_i32 r_asi
, r_rd
;
1967 r_asi
= gen_get_asi(insn
, addr
);
1968 r_rd
= tcg_const_i32(rd
);
1969 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
1970 tcg_temp_free_i32(r_rd
);
1971 tcg_temp_free_i32(r_asi
);
1974 static inline void gen_stda_asi(TCGv hi
, TCGv addr
, int insn
, int rd
)
1976 TCGv_i32 r_asi
, r_size
;
1978 gen_movl_reg_TN(rd
+ 1, cpu_tmp0
);
1979 tcg_gen_concat_tl_i64(cpu_tmp64
, cpu_tmp0
, hi
);
1980 r_asi
= gen_get_asi(insn
, addr
);
1981 r_size
= tcg_const_i32(8);
1982 gen_helper_st_asi(cpu_env
, addr
, cpu_tmp64
, r_asi
, r_size
);
1983 tcg_temp_free_i32(r_size
);
1984 tcg_temp_free_i32(r_asi
);
1987 static inline void gen_cas_asi(TCGv dst
, TCGv addr
, TCGv val2
, int insn
,
1993 r_val1
= tcg_temp_new();
1994 gen_movl_reg_TN(rd
, r_val1
);
1995 r_asi
= gen_get_asi(insn
, addr
);
1996 gen_helper_cas_asi(dst
, cpu_env
, addr
, r_val1
, val2
, r_asi
);
1997 tcg_temp_free_i32(r_asi
);
1998 tcg_temp_free(r_val1
);
2001 static inline void gen_casx_asi(TCGv dst
, TCGv addr
, TCGv val2
, int insn
,
2006 gen_movl_reg_TN(rd
, cpu_tmp64
);
2007 r_asi
= gen_get_asi(insn
, addr
);
2008 gen_helper_casx_asi(dst
, cpu_env
, addr
, cpu_tmp64
, val2
, r_asi
);
2009 tcg_temp_free_i32(r_asi
);
2012 #elif !defined(CONFIG_USER_ONLY)
2014 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2017 TCGv_i32 r_asi
, r_size
, r_sign
;
2019 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2020 r_size
= tcg_const_i32(size
);
2021 r_sign
= tcg_const_i32(sign
);
2022 gen_helper_ld_asi(cpu_tmp64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2023 tcg_temp_free(r_sign
);
2024 tcg_temp_free(r_size
);
2025 tcg_temp_free(r_asi
);
2026 tcg_gen_trunc_i64_tl(dst
, cpu_tmp64
);
2029 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2031 TCGv_i32 r_asi
, r_size
;
2033 tcg_gen_extu_tl_i64(cpu_tmp64
, src
);
2034 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2035 r_size
= tcg_const_i32(size
);
2036 gen_helper_st_asi(cpu_env
, addr
, cpu_tmp64
, r_asi
, r_size
);
2037 tcg_temp_free(r_size
);
2038 tcg_temp_free(r_asi
);
2041 static inline void gen_swap_asi(TCGv dst
, TCGv addr
, int insn
)
2043 TCGv_i32 r_asi
, r_size
, r_sign
;
2046 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2047 r_size
= tcg_const_i32(4);
2048 r_sign
= tcg_const_i32(0);
2049 gen_helper_ld_asi(cpu_tmp64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2050 tcg_temp_free(r_sign
);
2051 r_val
= tcg_temp_new_i64();
2052 tcg_gen_extu_tl_i64(r_val
, dst
);
2053 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2054 tcg_temp_free_i64(r_val
);
2055 tcg_temp_free(r_size
);
2056 tcg_temp_free(r_asi
);
2057 tcg_gen_trunc_i64_tl(dst
, cpu_tmp64
);
2060 static inline void gen_ldda_asi(TCGv hi
, TCGv addr
, int insn
, int rd
)
2062 TCGv_i32 r_asi
, r_size
, r_sign
;
2064 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2065 r_size
= tcg_const_i32(8);
2066 r_sign
= tcg_const_i32(0);
2067 gen_helper_ld_asi(cpu_tmp64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2068 tcg_temp_free(r_sign
);
2069 tcg_temp_free(r_size
);
2070 tcg_temp_free(r_asi
);
2071 tcg_gen_trunc_i64_tl(cpu_tmp0
, cpu_tmp64
);
2072 gen_movl_TN_reg(rd
+ 1, cpu_tmp0
);
2073 tcg_gen_shri_i64(cpu_tmp64
, cpu_tmp64
, 32);
2074 tcg_gen_trunc_i64_tl(hi
, cpu_tmp64
);
2075 gen_movl_TN_reg(rd
, hi
);
2078 static inline void gen_stda_asi(TCGv hi
, TCGv addr
, int insn
, int rd
)
2080 TCGv_i32 r_asi
, r_size
;
2082 gen_movl_reg_TN(rd
+ 1, cpu_tmp0
);
2083 tcg_gen_concat_tl_i64(cpu_tmp64
, cpu_tmp0
, hi
);
2084 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2085 r_size
= tcg_const_i32(8);
2086 gen_helper_st_asi(cpu_env
, addr
, cpu_tmp64
, r_asi
, r_size
);
2087 tcg_temp_free(r_size
);
2088 tcg_temp_free(r_asi
);
2092 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2093 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2096 TCGv_i32 r_asi
, r_size
;
2098 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2100 r_val
= tcg_const_i64(0xffULL
);
2101 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2102 r_size
= tcg_const_i32(1);
2103 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2104 tcg_temp_free_i32(r_size
);
2105 tcg_temp_free_i32(r_asi
);
2106 tcg_temp_free_i64(r_val
);
2110 static inline TCGv
get_src1(unsigned int insn
, TCGv def
)
2115 rs1
= GET_FIELD(insn
, 13, 17);
2117 tcg_gen_movi_tl(def
, 0);
2118 } else if (rs1
< 8) {
2119 r_rs1
= cpu_gregs
[rs1
];
2121 tcg_gen_ld_tl(def
, cpu_regwptr
, (rs1
- 8) * sizeof(target_ulong
));
2126 static inline TCGv
get_src2(unsigned int insn
, TCGv def
)
2130 if (IS_IMM
) { /* immediate */
2131 target_long simm
= GET_FIELDs(insn
, 19, 31);
2132 tcg_gen_movi_tl(def
, simm
);
2133 } else { /* register */
2134 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2136 tcg_gen_movi_tl(def
, 0);
2137 } else if (rs2
< 8) {
2138 r_rs2
= cpu_gregs
[rs2
];
2140 tcg_gen_ld_tl(def
, cpu_regwptr
, (rs2
- 8) * sizeof(target_ulong
));
2146 #ifdef TARGET_SPARC64
2147 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2149 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2151 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2152 or fold the comparison down to 32 bits and use movcond_i32. Choose
2154 c32
= tcg_temp_new_i32();
2156 tcg_gen_trunc_i64_i32(c32
, cmp
->c1
);
2158 TCGv_i64 c64
= tcg_temp_new_i64();
2159 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2160 tcg_gen_trunc_i64_i32(c32
, c64
);
2161 tcg_temp_free_i64(c64
);
2164 s1
= gen_load_fpr_F(dc
, rs
);
2165 s2
= gen_load_fpr_F(dc
, rd
);
2166 dst
= gen_dest_fpr_F();
2167 zero
= tcg_const_i32(0);
2169 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2171 tcg_temp_free_i32(c32
);
2172 tcg_temp_free_i32(zero
);
2173 gen_store_fpr_F(dc
, rd
, dst
);
2176 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2178 TCGv_i64 dst
= gen_dest_fpr_D();
2179 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2180 gen_load_fpr_D(dc
, rs
),
2181 gen_load_fpr_D(dc
, rd
));
2182 gen_store_fpr_D(dc
, rd
, dst
);
2185 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2187 int qd
= QFPREG(rd
);
2188 int qs
= QFPREG(rs
);
2190 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2191 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2192 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2193 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2195 gen_update_fprs_dirty(qd
);
2198 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2200 TCGv_i32 r_tl
= tcg_temp_new_i32();
2202 /* load env->tl into r_tl */
2203 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2205 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2206 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2208 /* calculate offset to current trap state from env->ts, reuse r_tl */
2209 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2210 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2212 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2214 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2215 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2216 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2217 tcg_temp_free_ptr(r_tl_tmp
);
2220 tcg_temp_free_i32(r_tl
);
2223 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2224 int width
, bool cc
, bool left
)
2226 TCGv lo1
, lo2
, t1
, t2
;
2227 uint64_t amask
, tabl
, tabr
;
2228 int shift
, imask
, omask
;
2231 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2232 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2233 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2234 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2235 dc
->cc_op
= CC_OP_SUB
;
2238 /* Theory of operation: there are two tables, left and right (not to
2239 be confused with the left and right versions of the opcode). These
2240 are indexed by the low 3 bits of the inputs. To make things "easy",
2241 these tables are loaded into two constants, TABL and TABR below.
2242 The operation index = (input & imask) << shift calculates the index
2243 into the constant, while val = (table >> index) & omask calculates
2244 the value we're looking for. */
2251 tabl
= 0x80c0e0f0f8fcfeffULL
;
2252 tabr
= 0xff7f3f1f0f070301ULL
;
2254 tabl
= 0x0103070f1f3f7fffULL
;
2255 tabr
= 0xfffefcf8f0e0c080ULL
;
2275 tabl
= (2 << 2) | 3;
2276 tabr
= (3 << 2) | 1;
2278 tabl
= (1 << 2) | 3;
2279 tabr
= (3 << 2) | 2;
2286 lo1
= tcg_temp_new();
2287 lo2
= tcg_temp_new();
2288 tcg_gen_andi_tl(lo1
, s1
, imask
);
2289 tcg_gen_andi_tl(lo2
, s2
, imask
);
2290 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2291 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2293 t1
= tcg_const_tl(tabl
);
2294 t2
= tcg_const_tl(tabr
);
2295 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2296 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2297 tcg_gen_andi_tl(dst
, lo1
, omask
);
2298 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2302 amask
&= 0xffffffffULL
;
2304 tcg_gen_andi_tl(s1
, s1
, amask
);
2305 tcg_gen_andi_tl(s2
, s2
, amask
);
2307 /* We want to compute
2308 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2309 We've already done dst = lo1, so this reduces to
2310 dst &= (s1 == s2 ? -1 : lo2)
2315 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2316 tcg_gen_neg_tl(t1
, t1
);
2317 tcg_gen_or_tl(lo2
, lo2
, t1
);
2318 tcg_gen_and_tl(dst
, dst
, lo2
);
2326 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2328 TCGv tmp
= tcg_temp_new();
2330 tcg_gen_add_tl(tmp
, s1
, s2
);
2331 tcg_gen_andi_tl(dst
, tmp
, -8);
2333 tcg_gen_neg_tl(tmp
, tmp
);
2335 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2340 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2344 t1
= tcg_temp_new();
2345 t2
= tcg_temp_new();
2346 shift
= tcg_temp_new();
2348 tcg_gen_andi_tl(shift
, gsr
, 7);
2349 tcg_gen_shli_tl(shift
, shift
, 3);
2350 tcg_gen_shl_tl(t1
, s1
, shift
);
2352 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2353 shift of (up to 63) followed by a constant shift of 1. */
2354 tcg_gen_xori_tl(shift
, shift
, 63);
2355 tcg_gen_shr_tl(t2
, s2
, shift
);
2356 tcg_gen_shri_tl(t2
, t2
, 1);
2358 tcg_gen_or_tl(dst
, t1
, t2
);
2362 tcg_temp_free(shift
);
2366 #define CHECK_IU_FEATURE(dc, FEATURE) \
2367 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2369 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2370 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2373 /* before an instruction, dc->pc must be static */
2374 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2376 unsigned int opc
, rs1
, rs2
, rd
;
2377 TCGv cpu_src1
, cpu_src2
, cpu_tmp1
, cpu_tmp2
;
2378 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2379 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2382 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2383 tcg_gen_debug_insn_start(dc
->pc
);
2386 opc
= GET_FIELD(insn
, 0, 1);
2388 rd
= GET_FIELD(insn
, 2, 6);
2390 cpu_tmp1
= cpu_src1
= tcg_temp_new();
2391 cpu_tmp2
= cpu_src2
= tcg_temp_new();
2394 case 0: /* branches/sethi */
2396 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2399 #ifdef TARGET_SPARC64
2400 case 0x1: /* V9 BPcc */
2404 target
= GET_FIELD_SP(insn
, 0, 18);
2405 target
= sign_extend(target
, 19);
2407 cc
= GET_FIELD_SP(insn
, 20, 21);
2409 do_branch(dc
, target
, insn
, 0);
2411 do_branch(dc
, target
, insn
, 1);
2416 case 0x3: /* V9 BPr */
2418 target
= GET_FIELD_SP(insn
, 0, 13) |
2419 (GET_FIELD_SP(insn
, 20, 21) << 14);
2420 target
= sign_extend(target
, 16);
2422 cpu_src1
= get_src1(insn
, cpu_src1
);
2423 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2426 case 0x5: /* V9 FBPcc */
2428 int cc
= GET_FIELD_SP(insn
, 20, 21);
2429 if (gen_trap_ifnofpu(dc
)) {
2432 target
= GET_FIELD_SP(insn
, 0, 18);
2433 target
= sign_extend(target
, 19);
2435 do_fbranch(dc
, target
, insn
, cc
);
2439 case 0x7: /* CBN+x */
2444 case 0x2: /* BN+x */
2446 target
= GET_FIELD(insn
, 10, 31);
2447 target
= sign_extend(target
, 22);
2449 do_branch(dc
, target
, insn
, 0);
2452 case 0x6: /* FBN+x */
2454 if (gen_trap_ifnofpu(dc
)) {
2457 target
= GET_FIELD(insn
, 10, 31);
2458 target
= sign_extend(target
, 22);
2460 do_fbranch(dc
, target
, insn
, 0);
2463 case 0x4: /* SETHI */
2465 uint32_t value
= GET_FIELD(insn
, 10, 31);
2468 r_const
= tcg_const_tl(value
<< 10);
2469 gen_movl_TN_reg(rd
, r_const
);
2470 tcg_temp_free(r_const
);
2473 case 0x0: /* UNIMPL */
2482 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2485 r_const
= tcg_const_tl(dc
->pc
);
2486 gen_movl_TN_reg(15, r_const
);
2487 tcg_temp_free(r_const
);
2490 #ifdef TARGET_SPARC64
2491 if (unlikely(AM_CHECK(dc
))) {
2492 target
&= 0xffffffffULL
;
2498 case 2: /* FPU & Logical Operations */
2500 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2501 if (xop
== 0x3a) { /* generate trap */
2502 int cond
= GET_FIELD(insn
, 3, 6);
2514 /* Conditional trap. */
2516 #ifdef TARGET_SPARC64
2518 int cc
= GET_FIELD_SP(insn
, 11, 12);
2520 gen_compare(&cmp
, 0, cond
, dc
);
2521 } else if (cc
== 2) {
2522 gen_compare(&cmp
, 1, cond
, dc
);
2527 gen_compare(&cmp
, 0, cond
, dc
);
2529 l1
= gen_new_label();
2530 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2531 cmp
.c1
, cmp
.c2
, l1
);
2535 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2536 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2538 /* Don't use the normal temporaries, as they may well have
2539 gone out of scope with the branch above. While we're
2540 doing that we might as well pre-truncate to 32-bit. */
2541 trap
= tcg_temp_new_i32();
2543 rs1
= GET_FIELD_SP(insn
, 14, 18);
2545 rs2
= GET_FIELD_SP(insn
, 0, 6);
2547 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2548 /* Signal that the trap value is fully constant. */
2551 TCGv t1
= tcg_temp_new();
2552 gen_movl_reg_TN(rs1
, t1
);
2553 tcg_gen_trunc_tl_i32(trap
, t1
);
2555 tcg_gen_addi_i32(trap
, trap
, rs2
);
2558 TCGv t1
= tcg_temp_new();
2559 TCGv t2
= tcg_temp_new();
2560 rs2
= GET_FIELD_SP(insn
, 0, 4);
2561 gen_movl_reg_TN(rs1
, t1
);
2562 gen_movl_reg_TN(rs2
, t2
);
2563 tcg_gen_add_tl(t1
, t1
, t2
);
2564 tcg_gen_trunc_tl_i32(trap
, t1
);
2569 tcg_gen_andi_i32(trap
, trap
, mask
);
2570 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2573 gen_helper_raise_exception(cpu_env
, trap
);
2574 tcg_temp_free_i32(trap
);
2583 } else if (xop
== 0x28) {
2584 rs1
= GET_FIELD(insn
, 13, 17);
2587 #ifndef TARGET_SPARC64
2588 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2589 manual, rdy on the microSPARC
2591 case 0x0f: /* stbar in the SPARCv8 manual,
2592 rdy on the microSPARC II */
2593 case 0x10 ... 0x1f: /* implementation-dependent in the
2594 SPARCv8 manual, rdy on the
2597 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2600 /* Read Asr17 for a Leon3 monoprocessor */
2601 r_const
= tcg_const_tl((1 << 8)
2602 | (dc
->def
->nwindows
- 1));
2603 gen_movl_TN_reg(rd
, r_const
);
2604 tcg_temp_free(r_const
);
2608 gen_movl_TN_reg(rd
, cpu_y
);
2610 #ifdef TARGET_SPARC64
2611 case 0x2: /* V9 rdccr */
2612 gen_helper_compute_psr(cpu_env
);
2613 gen_helper_rdccr(cpu_dst
, cpu_env
);
2614 gen_movl_TN_reg(rd
, cpu_dst
);
2616 case 0x3: /* V9 rdasi */
2617 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2618 gen_movl_TN_reg(rd
, cpu_dst
);
2620 case 0x4: /* V9 rdtick */
2624 r_tickptr
= tcg_temp_new_ptr();
2625 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2626 offsetof(CPUSPARCState
, tick
));
2627 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2628 tcg_temp_free_ptr(r_tickptr
);
2629 gen_movl_TN_reg(rd
, cpu_dst
);
2632 case 0x5: /* V9 rdpc */
2636 if (unlikely(AM_CHECK(dc
))) {
2637 r_const
= tcg_const_tl(dc
->pc
& 0xffffffffULL
);
2639 r_const
= tcg_const_tl(dc
->pc
);
2641 gen_movl_TN_reg(rd
, r_const
);
2642 tcg_temp_free(r_const
);
2645 case 0x6: /* V9 rdfprs */
2646 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2647 gen_movl_TN_reg(rd
, cpu_dst
);
2649 case 0xf: /* V9 membar */
2650 break; /* no effect */
2651 case 0x13: /* Graphics Status */
2652 if (gen_trap_ifnofpu(dc
)) {
2655 gen_movl_TN_reg(rd
, cpu_gsr
);
2657 case 0x16: /* Softint */
2658 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2659 gen_movl_TN_reg(rd
, cpu_dst
);
2661 case 0x17: /* Tick compare */
2662 gen_movl_TN_reg(rd
, cpu_tick_cmpr
);
2664 case 0x18: /* System tick */
2668 r_tickptr
= tcg_temp_new_ptr();
2669 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2670 offsetof(CPUSPARCState
, stick
));
2671 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2672 tcg_temp_free_ptr(r_tickptr
);
2673 gen_movl_TN_reg(rd
, cpu_dst
);
2676 case 0x19: /* System tick compare */
2677 gen_movl_TN_reg(rd
, cpu_stick_cmpr
);
2679 case 0x10: /* Performance Control */
2680 case 0x11: /* Performance Instrumentation Counter */
2681 case 0x12: /* Dispatch Control */
2682 case 0x14: /* Softint set, WO */
2683 case 0x15: /* Softint clear, WO */
2688 #if !defined(CONFIG_USER_ONLY)
2689 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2690 #ifndef TARGET_SPARC64
2691 if (!supervisor(dc
))
2693 gen_helper_compute_psr(cpu_env
);
2694 dc
->cc_op
= CC_OP_FLAGS
;
2695 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2697 CHECK_IU_FEATURE(dc
, HYPV
);
2698 if (!hypervisor(dc
))
2700 rs1
= GET_FIELD(insn
, 13, 17);
2703 // gen_op_rdhpstate();
2706 // gen_op_rdhtstate();
2709 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2712 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2715 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2717 case 31: // hstick_cmpr
2718 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2724 gen_movl_TN_reg(rd
, cpu_dst
);
2726 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2727 if (!supervisor(dc
))
2729 #ifdef TARGET_SPARC64
2730 rs1
= GET_FIELD(insn
, 13, 17);
2736 r_tsptr
= tcg_temp_new_ptr();
2737 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2738 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2739 offsetof(trap_state
, tpc
));
2740 tcg_temp_free_ptr(r_tsptr
);
2747 r_tsptr
= tcg_temp_new_ptr();
2748 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2749 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2750 offsetof(trap_state
, tnpc
));
2751 tcg_temp_free_ptr(r_tsptr
);
2758 r_tsptr
= tcg_temp_new_ptr();
2759 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2760 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2761 offsetof(trap_state
, tstate
));
2762 tcg_temp_free_ptr(r_tsptr
);
2769 r_tsptr
= tcg_temp_new_ptr();
2770 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2771 tcg_gen_ld_i32(cpu_tmp32
, r_tsptr
,
2772 offsetof(trap_state
, tt
));
2773 tcg_temp_free_ptr(r_tsptr
);
2774 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2781 r_tickptr
= tcg_temp_new_ptr();
2782 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2783 offsetof(CPUSPARCState
, tick
));
2784 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2785 gen_movl_TN_reg(rd
, cpu_tmp0
);
2786 tcg_temp_free_ptr(r_tickptr
);
2790 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2793 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2794 offsetof(CPUSPARCState
, pstate
));
2795 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2798 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2799 offsetof(CPUSPARCState
, tl
));
2800 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2803 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2804 offsetof(CPUSPARCState
, psrpil
));
2805 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2808 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2811 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2812 offsetof(CPUSPARCState
, cansave
));
2813 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2815 case 11: // canrestore
2816 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2817 offsetof(CPUSPARCState
, canrestore
));
2818 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2820 case 12: // cleanwin
2821 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2822 offsetof(CPUSPARCState
, cleanwin
));
2823 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2825 case 13: // otherwin
2826 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2827 offsetof(CPUSPARCState
, otherwin
));
2828 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2831 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2832 offsetof(CPUSPARCState
, wstate
));
2833 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2835 case 16: // UA2005 gl
2836 CHECK_IU_FEATURE(dc
, GL
);
2837 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
,
2838 offsetof(CPUSPARCState
, gl
));
2839 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_tmp32
);
2841 case 26: // UA2005 strand status
2842 CHECK_IU_FEATURE(dc
, HYPV
);
2843 if (!hypervisor(dc
))
2845 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2848 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2855 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2857 gen_movl_TN_reg(rd
, cpu_tmp0
);
2859 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2860 #ifdef TARGET_SPARC64
2862 gen_helper_flushw(cpu_env
);
2864 if (!supervisor(dc
))
2866 gen_movl_TN_reg(rd
, cpu_tbr
);
2870 } else if (xop
== 0x34) { /* FPU Operations */
2871 if (gen_trap_ifnofpu(dc
)) {
2874 gen_op_clear_ieee_excp_and_FTT();
2875 rs1
= GET_FIELD(insn
, 13, 17);
2876 rs2
= GET_FIELD(insn
, 27, 31);
2877 xop
= GET_FIELD(insn
, 18, 26);
2880 case 0x1: /* fmovs */
2881 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2882 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2884 case 0x5: /* fnegs */
2885 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2887 case 0x9: /* fabss */
2888 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2890 case 0x29: /* fsqrts */
2891 CHECK_FPU_FEATURE(dc
, FSQRT
);
2892 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2894 case 0x2a: /* fsqrtd */
2895 CHECK_FPU_FEATURE(dc
, FSQRT
);
2896 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
2898 case 0x2b: /* fsqrtq */
2899 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2900 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
2902 case 0x41: /* fadds */
2903 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
2905 case 0x42: /* faddd */
2906 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
2908 case 0x43: /* faddq */
2909 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2910 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
2912 case 0x45: /* fsubs */
2913 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
2915 case 0x46: /* fsubd */
2916 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
2918 case 0x47: /* fsubq */
2919 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2920 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
2922 case 0x49: /* fmuls */
2923 CHECK_FPU_FEATURE(dc
, FMUL
);
2924 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
2926 case 0x4a: /* fmuld */
2927 CHECK_FPU_FEATURE(dc
, FMUL
);
2928 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
2930 case 0x4b: /* fmulq */
2931 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2932 CHECK_FPU_FEATURE(dc
, FMUL
);
2933 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
2935 case 0x4d: /* fdivs */
2936 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
2938 case 0x4e: /* fdivd */
2939 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
2941 case 0x4f: /* fdivq */
2942 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2943 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
2945 case 0x69: /* fsmuld */
2946 CHECK_FPU_FEATURE(dc
, FSMULD
);
2947 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
2949 case 0x6e: /* fdmulq */
2950 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2951 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
2953 case 0xc4: /* fitos */
2954 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
2956 case 0xc6: /* fdtos */
2957 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
2959 case 0xc7: /* fqtos */
2960 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2961 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
2963 case 0xc8: /* fitod */
2964 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
2966 case 0xc9: /* fstod */
2967 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
2969 case 0xcb: /* fqtod */
2970 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2971 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
2973 case 0xcc: /* fitoq */
2974 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2975 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
2977 case 0xcd: /* fstoq */
2978 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2979 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
2981 case 0xce: /* fdtoq */
2982 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2983 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
2985 case 0xd1: /* fstoi */
2986 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
2988 case 0xd2: /* fdtoi */
2989 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
2991 case 0xd3: /* fqtoi */
2992 CHECK_FPU_FEATURE(dc
, FLOAT128
);
2993 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
2995 #ifdef TARGET_SPARC64
2996 case 0x2: /* V9 fmovd */
2997 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
2998 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3000 case 0x3: /* V9 fmovq */
3001 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3002 gen_move_Q(rd
, rs2
);
3004 case 0x6: /* V9 fnegd */
3005 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3007 case 0x7: /* V9 fnegq */
3008 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3009 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3011 case 0xa: /* V9 fabsd */
3012 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3014 case 0xb: /* V9 fabsq */
3015 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3016 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3018 case 0x81: /* V9 fstox */
3019 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3021 case 0x82: /* V9 fdtox */
3022 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3024 case 0x83: /* V9 fqtox */
3025 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3026 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3028 case 0x84: /* V9 fxtos */
3029 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3031 case 0x88: /* V9 fxtod */
3032 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3034 case 0x8c: /* V9 fxtoq */
3035 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3036 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3042 } else if (xop
== 0x35) { /* FPU Operations */
3043 #ifdef TARGET_SPARC64
3046 if (gen_trap_ifnofpu(dc
)) {
3049 gen_op_clear_ieee_excp_and_FTT();
3050 rs1
= GET_FIELD(insn
, 13, 17);
3051 rs2
= GET_FIELD(insn
, 27, 31);
3052 xop
= GET_FIELD(insn
, 18, 26);
3054 #ifdef TARGET_SPARC64
3055 if ((xop
& 0x11f) == 0x005) { // V9 fmovsr
3058 l1
= gen_new_label();
3059 cond
= GET_FIELD_SP(insn
, 14, 17);
3060 cpu_src1
= get_src1(insn
, cpu_src1
);
3061 tcg_gen_brcondi_tl(gen_tcg_cond_reg
[cond
], cpu_src1
,
3063 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
3064 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
3067 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3070 l1
= gen_new_label();
3071 cond
= GET_FIELD_SP(insn
, 14, 17);
3072 cpu_src1
= get_src1(insn
, cpu_src1
);
3073 tcg_gen_brcondi_tl(gen_tcg_cond_reg
[cond
], cpu_src1
,
3075 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3076 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3079 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3082 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3083 l1
= gen_new_label();
3084 cond
= GET_FIELD_SP(insn
, 14, 17);
3085 cpu_src1
= get_src1(insn
, cpu_src1
);
3086 tcg_gen_brcondi_tl(gen_tcg_cond_reg
[cond
], cpu_src1
,
3088 gen_move_Q(rd
, rs2
);
3094 #ifdef TARGET_SPARC64
3095 #define FMOVCC(fcc, sz) \
3098 cond = GET_FIELD_SP(insn, 14, 17); \
3099 gen_fcompare(&cmp, fcc, cond); \
3100 gen_fmov##sz(dc, &cmp, rd, rs2); \
3101 free_compare(&cmp); \
3104 case 0x001: /* V9 fmovscc %fcc0 */
3107 case 0x002: /* V9 fmovdcc %fcc0 */
3110 case 0x003: /* V9 fmovqcc %fcc0 */
3111 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3114 case 0x041: /* V9 fmovscc %fcc1 */
3117 case 0x042: /* V9 fmovdcc %fcc1 */
3120 case 0x043: /* V9 fmovqcc %fcc1 */
3121 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3124 case 0x081: /* V9 fmovscc %fcc2 */
3127 case 0x082: /* V9 fmovdcc %fcc2 */
3130 case 0x083: /* V9 fmovqcc %fcc2 */
3131 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3134 case 0x0c1: /* V9 fmovscc %fcc3 */
3137 case 0x0c2: /* V9 fmovdcc %fcc3 */
3140 case 0x0c3: /* V9 fmovqcc %fcc3 */
3141 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3145 #define FMOVCC(xcc, sz) \
3148 cond = GET_FIELD_SP(insn, 14, 17); \
3149 gen_compare(&cmp, xcc, cond, dc); \
3150 gen_fmov##sz(dc, &cmp, rd, rs2); \
3151 free_compare(&cmp); \
3154 case 0x101: /* V9 fmovscc %icc */
3157 case 0x102: /* V9 fmovdcc %icc */
3160 case 0x103: /* V9 fmovqcc %icc */
3161 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3164 case 0x181: /* V9 fmovscc %xcc */
3167 case 0x182: /* V9 fmovdcc %xcc */
3170 case 0x183: /* V9 fmovqcc %xcc */
3171 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3176 case 0x51: /* fcmps, V9 %fcc */
3177 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3178 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3179 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3181 case 0x52: /* fcmpd, V9 %fcc */
3182 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3183 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3184 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3186 case 0x53: /* fcmpq, V9 %fcc */
3187 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3188 gen_op_load_fpr_QT0(QFPREG(rs1
));
3189 gen_op_load_fpr_QT1(QFPREG(rs2
));
3190 gen_op_fcmpq(rd
& 3);
3192 case 0x55: /* fcmpes, V9 %fcc */
3193 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3194 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3195 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3197 case 0x56: /* fcmped, V9 %fcc */
3198 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3199 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3200 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3202 case 0x57: /* fcmpeq, V9 %fcc */
3203 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3204 gen_op_load_fpr_QT0(QFPREG(rs1
));
3205 gen_op_load_fpr_QT1(QFPREG(rs2
));
3206 gen_op_fcmpeq(rd
& 3);
3211 } else if (xop
== 0x2) {
3214 rs1
= GET_FIELD(insn
, 13, 17);
3216 // or %g0, x, y -> mov T0, x; mov y, T0
3217 if (IS_IMM
) { /* immediate */
3220 simm
= GET_FIELDs(insn
, 19, 31);
3221 r_const
= tcg_const_tl(simm
);
3222 gen_movl_TN_reg(rd
, r_const
);
3223 tcg_temp_free(r_const
);
3224 } else { /* register */
3225 rs2
= GET_FIELD(insn
, 27, 31);
3226 gen_movl_reg_TN(rs2
, cpu_dst
);
3227 gen_movl_TN_reg(rd
, cpu_dst
);
3230 cpu_src1
= get_src1(insn
, cpu_src1
);
3231 if (IS_IMM
) { /* immediate */
3232 simm
= GET_FIELDs(insn
, 19, 31);
3233 tcg_gen_ori_tl(cpu_dst
, cpu_src1
, simm
);
3234 gen_movl_TN_reg(rd
, cpu_dst
);
3235 } else { /* register */
3236 // or x, %g0, y -> mov T1, x; mov y, T1
3237 rs2
= GET_FIELD(insn
, 27, 31);
3239 gen_movl_reg_TN(rs2
, cpu_src2
);
3240 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3241 gen_movl_TN_reg(rd
, cpu_dst
);
3243 gen_movl_TN_reg(rd
, cpu_src1
);
3246 #ifdef TARGET_SPARC64
3247 } else if (xop
== 0x25) { /* sll, V9 sllx */
3248 cpu_src1
= get_src1(insn
, cpu_src1
);
3249 if (IS_IMM
) { /* immediate */
3250 simm
= GET_FIELDs(insn
, 20, 31);
3251 if (insn
& (1 << 12)) {
3252 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3254 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3256 } else { /* register */
3257 rs2
= GET_FIELD(insn
, 27, 31);
3258 gen_movl_reg_TN(rs2
, cpu_src2
);
3259 if (insn
& (1 << 12)) {
3260 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3262 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3264 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3266 gen_movl_TN_reg(rd
, cpu_dst
);
3267 } else if (xop
== 0x26) { /* srl, V9 srlx */
3268 cpu_src1
= get_src1(insn
, cpu_src1
);
3269 if (IS_IMM
) { /* immediate */
3270 simm
= GET_FIELDs(insn
, 20, 31);
3271 if (insn
& (1 << 12)) {
3272 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3274 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3275 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3277 } else { /* register */
3278 rs2
= GET_FIELD(insn
, 27, 31);
3279 gen_movl_reg_TN(rs2
, cpu_src2
);
3280 if (insn
& (1 << 12)) {
3281 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3282 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3284 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3285 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3286 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3289 gen_movl_TN_reg(rd
, cpu_dst
);
3290 } else if (xop
== 0x27) { /* sra, V9 srax */
3291 cpu_src1
= get_src1(insn
, cpu_src1
);
3292 if (IS_IMM
) { /* immediate */
3293 simm
= GET_FIELDs(insn
, 20, 31);
3294 if (insn
& (1 << 12)) {
3295 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3297 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3298 tcg_gen_ext32s_i64(cpu_dst
, cpu_dst
);
3299 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3301 } else { /* register */
3302 rs2
= GET_FIELD(insn
, 27, 31);
3303 gen_movl_reg_TN(rs2
, cpu_src2
);
3304 if (insn
& (1 << 12)) {
3305 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3306 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3308 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3309 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3310 tcg_gen_ext32s_i64(cpu_dst
, cpu_dst
);
3311 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3314 gen_movl_TN_reg(rd
, cpu_dst
);
3316 } else if (xop
< 0x36) {
3318 cpu_src1
= get_src1(insn
, cpu_src1
);
3319 cpu_src2
= get_src2(insn
, cpu_src2
);
3320 switch (xop
& ~0x10) {
3323 simm
= GET_FIELDs(insn
, 19, 31);
3325 gen_op_addi_cc(cpu_dst
, cpu_src1
, simm
);
3326 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3327 dc
->cc_op
= CC_OP_ADD
;
3329 tcg_gen_addi_tl(cpu_dst
, cpu_src1
, simm
);
3333 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3334 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3335 dc
->cc_op
= CC_OP_ADD
;
3337 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3343 simm
= GET_FIELDs(insn
, 19, 31);
3344 tcg_gen_andi_tl(cpu_dst
, cpu_src1
, simm
);
3346 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3349 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3350 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3351 dc
->cc_op
= CC_OP_LOGIC
;
3356 simm
= GET_FIELDs(insn
, 19, 31);
3357 tcg_gen_ori_tl(cpu_dst
, cpu_src1
, simm
);
3359 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3362 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3363 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3364 dc
->cc_op
= CC_OP_LOGIC
;
3369 simm
= GET_FIELDs(insn
, 19, 31);
3370 tcg_gen_xori_tl(cpu_dst
, cpu_src1
, simm
);
3372 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3375 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3376 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3377 dc
->cc_op
= CC_OP_LOGIC
;
3382 simm
= GET_FIELDs(insn
, 19, 31);
3384 gen_op_subi_cc(cpu_dst
, cpu_src1
, simm
, dc
);
3386 tcg_gen_subi_tl(cpu_dst
, cpu_src1
, simm
);
3390 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3391 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3392 dc
->cc_op
= CC_OP_SUB
;
3394 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3398 case 0x5: /* andn */
3400 simm
= GET_FIELDs(insn
, 19, 31);
3401 tcg_gen_andi_tl(cpu_dst
, cpu_src1
, ~simm
);
3403 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3406 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3407 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3408 dc
->cc_op
= CC_OP_LOGIC
;
3413 simm
= GET_FIELDs(insn
, 19, 31);
3414 tcg_gen_ori_tl(cpu_dst
, cpu_src1
, ~simm
);
3416 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3419 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3420 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3421 dc
->cc_op
= CC_OP_LOGIC
;
3424 case 0x7: /* xorn */
3426 simm
= GET_FIELDs(insn
, 19, 31);
3427 tcg_gen_xori_tl(cpu_dst
, cpu_src1
, ~simm
);
3429 tcg_gen_not_tl(cpu_tmp0
, cpu_src2
);
3430 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3433 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3434 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3435 dc
->cc_op
= CC_OP_LOGIC
;
3438 case 0x8: /* addx, V9 addc */
3439 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3442 #ifdef TARGET_SPARC64
3443 case 0x9: /* V9 mulx */
3445 simm
= GET_FIELDs(insn
, 19, 31);
3446 tcg_gen_muli_i64(cpu_dst
, cpu_src1
, simm
);
3448 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3452 case 0xa: /* umul */
3453 CHECK_IU_FEATURE(dc
, MUL
);
3454 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3456 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3457 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3458 dc
->cc_op
= CC_OP_LOGIC
;
3461 case 0xb: /* smul */
3462 CHECK_IU_FEATURE(dc
, MUL
);
3463 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3465 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3466 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3467 dc
->cc_op
= CC_OP_LOGIC
;
3470 case 0xc: /* subx, V9 subc */
3471 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3474 #ifdef TARGET_SPARC64
3475 case 0xd: /* V9 udivx */
3476 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3479 case 0xe: /* udiv */
3480 CHECK_IU_FEATURE(dc
, DIV
);
3482 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3484 dc
->cc_op
= CC_OP_DIV
;
3486 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3490 case 0xf: /* sdiv */
3491 CHECK_IU_FEATURE(dc
, DIV
);
3493 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3495 dc
->cc_op
= CC_OP_DIV
;
3497 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3504 gen_movl_TN_reg(rd
, cpu_dst
);
3506 cpu_src1
= get_src1(insn
, cpu_src1
);
3507 cpu_src2
= get_src2(insn
, cpu_src2
);
3509 case 0x20: /* taddcc */
3510 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3511 gen_movl_TN_reg(rd
, cpu_dst
);
3512 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3513 dc
->cc_op
= CC_OP_TADD
;
3515 case 0x21: /* tsubcc */
3516 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3517 gen_movl_TN_reg(rd
, cpu_dst
);
3518 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3519 dc
->cc_op
= CC_OP_TSUB
;
3521 case 0x22: /* taddcctv */
3522 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3523 cpu_src1
, cpu_src2
);
3524 gen_movl_TN_reg(rd
, cpu_dst
);
3525 dc
->cc_op
= CC_OP_TADDTV
;
3527 case 0x23: /* tsubcctv */
3528 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3529 cpu_src1
, cpu_src2
);
3530 gen_movl_TN_reg(rd
, cpu_dst
);
3531 dc
->cc_op
= CC_OP_TSUBTV
;
3533 case 0x24: /* mulscc */
3534 gen_helper_compute_psr(cpu_env
);
3535 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3536 gen_movl_TN_reg(rd
, cpu_dst
);
3537 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3538 dc
->cc_op
= CC_OP_ADD
;
3540 #ifndef TARGET_SPARC64
3541 case 0x25: /* sll */
3542 if (IS_IMM
) { /* immediate */
3543 simm
= GET_FIELDs(insn
, 20, 31);
3544 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3545 } else { /* register */
3546 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3547 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3549 gen_movl_TN_reg(rd
, cpu_dst
);
3551 case 0x26: /* srl */
3552 if (IS_IMM
) { /* immediate */
3553 simm
= GET_FIELDs(insn
, 20, 31);
3554 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3555 } else { /* register */
3556 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3557 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3559 gen_movl_TN_reg(rd
, cpu_dst
);
3561 case 0x27: /* sra */
3562 if (IS_IMM
) { /* immediate */
3563 simm
= GET_FIELDs(insn
, 20, 31);
3564 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3565 } else { /* register */
3566 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3567 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3569 gen_movl_TN_reg(rd
, cpu_dst
);
3576 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3577 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3579 #ifndef TARGET_SPARC64
3580 case 0x01 ... 0x0f: /* undefined in the
3584 case 0x10 ... 0x1f: /* implementation-dependent
3590 case 0x2: /* V9 wrccr */
3591 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3592 gen_helper_wrccr(cpu_env
, cpu_dst
);
3593 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3594 dc
->cc_op
= CC_OP_FLAGS
;
3596 case 0x3: /* V9 wrasi */
3597 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3598 tcg_gen_andi_tl(cpu_dst
, cpu_dst
, 0xff);
3599 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_dst
);
3601 case 0x6: /* V9 wrfprs */
3602 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3603 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_dst
);
3609 case 0xf: /* V9 sir, nop if user */
3610 #if !defined(CONFIG_USER_ONLY)
3611 if (supervisor(dc
)) {
3616 case 0x13: /* Graphics Status */
3617 if (gen_trap_ifnofpu(dc
)) {
3620 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3622 case 0x14: /* Softint set */
3623 if (!supervisor(dc
))
3625 tcg_gen_xor_tl(cpu_tmp64
, cpu_src1
, cpu_src2
);
3626 gen_helper_set_softint(cpu_env
, cpu_tmp64
);
3628 case 0x15: /* Softint clear */
3629 if (!supervisor(dc
))
3631 tcg_gen_xor_tl(cpu_tmp64
, cpu_src1
, cpu_src2
);
3632 gen_helper_clear_softint(cpu_env
, cpu_tmp64
);
3634 case 0x16: /* Softint write */
3635 if (!supervisor(dc
))
3637 tcg_gen_xor_tl(cpu_tmp64
, cpu_src1
, cpu_src2
);
3638 gen_helper_write_softint(cpu_env
, cpu_tmp64
);
3640 case 0x17: /* Tick compare */
3641 #if !defined(CONFIG_USER_ONLY)
3642 if (!supervisor(dc
))
3648 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3650 r_tickptr
= tcg_temp_new_ptr();
3651 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3652 offsetof(CPUSPARCState
, tick
));
3653 gen_helper_tick_set_limit(r_tickptr
,
3655 tcg_temp_free_ptr(r_tickptr
);
3658 case 0x18: /* System tick */
3659 #if !defined(CONFIG_USER_ONLY)
3660 if (!supervisor(dc
))
3666 tcg_gen_xor_tl(cpu_dst
, cpu_src1
,
3668 r_tickptr
= tcg_temp_new_ptr();
3669 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3670 offsetof(CPUSPARCState
, stick
));
3671 gen_helper_tick_set_count(r_tickptr
,
3673 tcg_temp_free_ptr(r_tickptr
);
3676 case 0x19: /* System tick compare */
3677 #if !defined(CONFIG_USER_ONLY)
3678 if (!supervisor(dc
))
3684 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3686 r_tickptr
= tcg_temp_new_ptr();
3687 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3688 offsetof(CPUSPARCState
, stick
));
3689 gen_helper_tick_set_limit(r_tickptr
,
3691 tcg_temp_free_ptr(r_tickptr
);
3695 case 0x10: /* Performance Control */
3696 case 0x11: /* Performance Instrumentation
3698 case 0x12: /* Dispatch Control */
3705 #if !defined(CONFIG_USER_ONLY)
3706 case 0x31: /* wrpsr, V9 saved, restored */
3708 if (!supervisor(dc
))
3710 #ifdef TARGET_SPARC64
3713 gen_helper_saved(cpu_env
);
3716 gen_helper_restored(cpu_env
);
3718 case 2: /* UA2005 allclean */
3719 case 3: /* UA2005 otherw */
3720 case 4: /* UA2005 normalw */
3721 case 5: /* UA2005 invalw */
3727 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3728 gen_helper_wrpsr(cpu_env
, cpu_dst
);
3729 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3730 dc
->cc_op
= CC_OP_FLAGS
;
3738 case 0x32: /* wrwim, V9 wrpr */
3740 if (!supervisor(dc
))
3742 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3743 #ifdef TARGET_SPARC64
3749 r_tsptr
= tcg_temp_new_ptr();
3750 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3751 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3752 offsetof(trap_state
, tpc
));
3753 tcg_temp_free_ptr(r_tsptr
);
3760 r_tsptr
= tcg_temp_new_ptr();
3761 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3762 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3763 offsetof(trap_state
, tnpc
));
3764 tcg_temp_free_ptr(r_tsptr
);
3771 r_tsptr
= tcg_temp_new_ptr();
3772 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3773 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3774 offsetof(trap_state
,
3776 tcg_temp_free_ptr(r_tsptr
);
3783 r_tsptr
= tcg_temp_new_ptr();
3784 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3785 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3786 tcg_gen_st_i32(cpu_tmp32
, r_tsptr
,
3787 offsetof(trap_state
, tt
));
3788 tcg_temp_free_ptr(r_tsptr
);
3795 r_tickptr
= tcg_temp_new_ptr();
3796 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3797 offsetof(CPUSPARCState
, tick
));
3798 gen_helper_tick_set_count(r_tickptr
,
3800 tcg_temp_free_ptr(r_tickptr
);
3804 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3808 TCGv r_tmp
= tcg_temp_local_new();
3810 tcg_gen_mov_tl(r_tmp
, cpu_tmp0
);
3812 gen_helper_wrpstate(cpu_env
, r_tmp
);
3813 tcg_temp_free(r_tmp
);
3814 dc
->npc
= DYNAMIC_PC
;
3819 TCGv r_tmp
= tcg_temp_local_new();
3821 tcg_gen_mov_tl(r_tmp
, cpu_tmp0
);
3823 tcg_gen_trunc_tl_i32(cpu_tmp32
, r_tmp
);
3824 tcg_temp_free(r_tmp
);
3825 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3826 offsetof(CPUSPARCState
, tl
));
3827 dc
->npc
= DYNAMIC_PC
;
3831 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3834 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3837 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3838 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3839 offsetof(CPUSPARCState
,
3842 case 11: // canrestore
3843 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3844 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3845 offsetof(CPUSPARCState
,
3848 case 12: // cleanwin
3849 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3850 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3851 offsetof(CPUSPARCState
,
3854 case 13: // otherwin
3855 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3856 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3857 offsetof(CPUSPARCState
,
3861 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3862 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3863 offsetof(CPUSPARCState
,
3866 case 16: // UA2005 gl
3867 CHECK_IU_FEATURE(dc
, GL
);
3868 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3869 tcg_gen_st_i32(cpu_tmp32
, cpu_env
,
3870 offsetof(CPUSPARCState
, gl
));
3872 case 26: // UA2005 strand status
3873 CHECK_IU_FEATURE(dc
, HYPV
);
3874 if (!hypervisor(dc
))
3876 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3882 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
3883 if (dc
->def
->nwindows
!= 32)
3884 tcg_gen_andi_tl(cpu_tmp32
, cpu_tmp32
,
3885 (1 << dc
->def
->nwindows
) - 1);
3886 tcg_gen_mov_i32(cpu_wim
, cpu_tmp32
);
3890 case 0x33: /* wrtbr, UA2005 wrhpr */
3892 #ifndef TARGET_SPARC64
3893 if (!supervisor(dc
))
3895 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3897 CHECK_IU_FEATURE(dc
, HYPV
);
3898 if (!hypervisor(dc
))
3900 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3903 // XXX gen_op_wrhpstate();
3910 // XXX gen_op_wrhtstate();
3913 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3916 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3918 case 31: // hstick_cmpr
3922 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3923 r_tickptr
= tcg_temp_new_ptr();
3924 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3925 offsetof(CPUSPARCState
, hstick
));
3926 gen_helper_tick_set_limit(r_tickptr
,
3928 tcg_temp_free_ptr(r_tickptr
);
3931 case 6: // hver readonly
3939 #ifdef TARGET_SPARC64
3940 case 0x2c: /* V9 movcc */
3942 int cc
= GET_FIELD_SP(insn
, 11, 12);
3943 int cond
= GET_FIELD_SP(insn
, 14, 17);
3946 if (insn
& (1 << 18)) {
3948 gen_compare(&cmp
, 0, cond
, dc
);
3949 } else if (cc
== 2) {
3950 gen_compare(&cmp
, 1, cond
, dc
);
3955 gen_fcompare(&cmp
, cc
, cond
);
3958 /* The get_src2 above loaded the normal 13-bit
3959 immediate field, not the 11-bit field we have
3960 in movcc. But it did handle the reg case. */
3962 simm
= GET_FIELD_SPs(insn
, 0, 10);
3963 tcg_gen_movi_tl(cpu_src2
, simm
);
3966 gen_movl_reg_TN(rd
, cpu_dst
);
3967 tcg_gen_movcond_tl(cmp
.cond
, cpu_dst
,
3971 gen_movl_TN_reg(rd
, cpu_dst
);
3974 case 0x2d: /* V9 sdivx */
3975 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3976 gen_movl_TN_reg(rd
, cpu_dst
);
3978 case 0x2e: /* V9 popc */
3980 cpu_src2
= get_src2(insn
, cpu_src2
);
3981 gen_helper_popc(cpu_dst
, cpu_src2
);
3982 gen_movl_TN_reg(rd
, cpu_dst
);
3984 case 0x2f: /* V9 movr */
3986 int cond
= GET_FIELD_SP(insn
, 10, 12);
3989 gen_compare_reg(&cmp
, cond
, cpu_src1
);
3991 /* The get_src2 above loaded the normal 13-bit
3992 immediate field, not the 10-bit field we have
3993 in movr. But it did handle the reg case. */
3995 simm
= GET_FIELD_SPs(insn
, 0, 9);
3996 tcg_gen_movi_tl(cpu_src2
, simm
);
3999 gen_movl_reg_TN(rd
, cpu_dst
);
4000 tcg_gen_movcond_tl(cmp
.cond
, cpu_dst
,
4004 gen_movl_TN_reg(rd
, cpu_dst
);
4012 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4013 #ifdef TARGET_SPARC64
4014 int opf
= GET_FIELD_SP(insn
, 5, 13);
4015 rs1
= GET_FIELD(insn
, 13, 17);
4016 rs2
= GET_FIELD(insn
, 27, 31);
4017 if (gen_trap_ifnofpu(dc
)) {
4022 case 0x000: /* VIS I edge8cc */
4023 CHECK_FPU_FEATURE(dc
, VIS1
);
4024 gen_movl_reg_TN(rs1
, cpu_src1
);
4025 gen_movl_reg_TN(rs2
, cpu_src2
);
4026 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4027 gen_movl_TN_reg(rd
, cpu_dst
);
4029 case 0x001: /* VIS II edge8n */
4030 CHECK_FPU_FEATURE(dc
, VIS2
);
4031 gen_movl_reg_TN(rs1
, cpu_src1
);
4032 gen_movl_reg_TN(rs2
, cpu_src2
);
4033 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4034 gen_movl_TN_reg(rd
, cpu_dst
);
4036 case 0x002: /* VIS I edge8lcc */
4037 CHECK_FPU_FEATURE(dc
, VIS1
);
4038 gen_movl_reg_TN(rs1
, cpu_src1
);
4039 gen_movl_reg_TN(rs2
, cpu_src2
);
4040 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4041 gen_movl_TN_reg(rd
, cpu_dst
);
4043 case 0x003: /* VIS II edge8ln */
4044 CHECK_FPU_FEATURE(dc
, VIS2
);
4045 gen_movl_reg_TN(rs1
, cpu_src1
);
4046 gen_movl_reg_TN(rs2
, cpu_src2
);
4047 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4048 gen_movl_TN_reg(rd
, cpu_dst
);
4050 case 0x004: /* VIS I edge16cc */
4051 CHECK_FPU_FEATURE(dc
, VIS1
);
4052 gen_movl_reg_TN(rs1
, cpu_src1
);
4053 gen_movl_reg_TN(rs2
, cpu_src2
);
4054 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4055 gen_movl_TN_reg(rd
, cpu_dst
);
4057 case 0x005: /* VIS II edge16n */
4058 CHECK_FPU_FEATURE(dc
, VIS2
);
4059 gen_movl_reg_TN(rs1
, cpu_src1
);
4060 gen_movl_reg_TN(rs2
, cpu_src2
);
4061 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4062 gen_movl_TN_reg(rd
, cpu_dst
);
4064 case 0x006: /* VIS I edge16lcc */
4065 CHECK_FPU_FEATURE(dc
, VIS1
);
4066 gen_movl_reg_TN(rs1
, cpu_src1
);
4067 gen_movl_reg_TN(rs2
, cpu_src2
);
4068 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4069 gen_movl_TN_reg(rd
, cpu_dst
);
4071 case 0x007: /* VIS II edge16ln */
4072 CHECK_FPU_FEATURE(dc
, VIS2
);
4073 gen_movl_reg_TN(rs1
, cpu_src1
);
4074 gen_movl_reg_TN(rs2
, cpu_src2
);
4075 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4076 gen_movl_TN_reg(rd
, cpu_dst
);
4078 case 0x008: /* VIS I edge32cc */
4079 CHECK_FPU_FEATURE(dc
, VIS1
);
4080 gen_movl_reg_TN(rs1
, cpu_src1
);
4081 gen_movl_reg_TN(rs2
, cpu_src2
);
4082 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4083 gen_movl_TN_reg(rd
, cpu_dst
);
4085 case 0x009: /* VIS II edge32n */
4086 CHECK_FPU_FEATURE(dc
, VIS2
);
4087 gen_movl_reg_TN(rs1
, cpu_src1
);
4088 gen_movl_reg_TN(rs2
, cpu_src2
);
4089 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4090 gen_movl_TN_reg(rd
, cpu_dst
);
4092 case 0x00a: /* VIS I edge32lcc */
4093 CHECK_FPU_FEATURE(dc
, VIS1
);
4094 gen_movl_reg_TN(rs1
, cpu_src1
);
4095 gen_movl_reg_TN(rs2
, cpu_src2
);
4096 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4097 gen_movl_TN_reg(rd
, cpu_dst
);
4099 case 0x00b: /* VIS II edge32ln */
4100 CHECK_FPU_FEATURE(dc
, VIS2
);
4101 gen_movl_reg_TN(rs1
, cpu_src1
);
4102 gen_movl_reg_TN(rs2
, cpu_src2
);
4103 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4104 gen_movl_TN_reg(rd
, cpu_dst
);
4106 case 0x010: /* VIS I array8 */
4107 CHECK_FPU_FEATURE(dc
, VIS1
);
4108 cpu_src1
= get_src1(insn
, cpu_src1
);
4109 gen_movl_reg_TN(rs2
, cpu_src2
);
4110 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4111 gen_movl_TN_reg(rd
, cpu_dst
);
4113 case 0x012: /* VIS I array16 */
4114 CHECK_FPU_FEATURE(dc
, VIS1
);
4115 cpu_src1
= get_src1(insn
, cpu_src1
);
4116 gen_movl_reg_TN(rs2
, cpu_src2
);
4117 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4118 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4119 gen_movl_TN_reg(rd
, cpu_dst
);
4121 case 0x014: /* VIS I array32 */
4122 CHECK_FPU_FEATURE(dc
, VIS1
);
4123 cpu_src1
= get_src1(insn
, cpu_src1
);
4124 gen_movl_reg_TN(rs2
, cpu_src2
);
4125 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4126 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4127 gen_movl_TN_reg(rd
, cpu_dst
);
4129 case 0x018: /* VIS I alignaddr */
4130 CHECK_FPU_FEATURE(dc
, VIS1
);
4131 cpu_src1
= get_src1(insn
, cpu_src1
);
4132 gen_movl_reg_TN(rs2
, cpu_src2
);
4133 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4134 gen_movl_TN_reg(rd
, cpu_dst
);
4136 case 0x01a: /* VIS I alignaddrl */
4137 CHECK_FPU_FEATURE(dc
, VIS1
);
4138 cpu_src1
= get_src1(insn
, cpu_src1
);
4139 gen_movl_reg_TN(rs2
, cpu_src2
);
4140 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4141 gen_movl_TN_reg(rd
, cpu_dst
);
4143 case 0x019: /* VIS II bmask */
4144 CHECK_FPU_FEATURE(dc
, VIS2
);
4145 cpu_src1
= get_src1(insn
, cpu_src1
);
4146 cpu_src2
= get_src1(insn
, cpu_src2
);
4147 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4148 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4149 gen_movl_TN_reg(rd
, cpu_dst
);
4151 case 0x020: /* VIS I fcmple16 */
4152 CHECK_FPU_FEATURE(dc
, VIS1
);
4153 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4154 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4155 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4156 gen_movl_TN_reg(rd
, cpu_dst
);
4158 case 0x022: /* VIS I fcmpne16 */
4159 CHECK_FPU_FEATURE(dc
, VIS1
);
4160 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4161 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4162 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4163 gen_movl_TN_reg(rd
, cpu_dst
);
4165 case 0x024: /* VIS I fcmple32 */
4166 CHECK_FPU_FEATURE(dc
, VIS1
);
4167 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4168 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4169 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4170 gen_movl_TN_reg(rd
, cpu_dst
);
4172 case 0x026: /* VIS I fcmpne32 */
4173 CHECK_FPU_FEATURE(dc
, VIS1
);
4174 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4175 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4176 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4177 gen_movl_TN_reg(rd
, cpu_dst
);
4179 case 0x028: /* VIS I fcmpgt16 */
4180 CHECK_FPU_FEATURE(dc
, VIS1
);
4181 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4182 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4183 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4184 gen_movl_TN_reg(rd
, cpu_dst
);
4186 case 0x02a: /* VIS I fcmpeq16 */
4187 CHECK_FPU_FEATURE(dc
, VIS1
);
4188 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4189 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4190 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4191 gen_movl_TN_reg(rd
, cpu_dst
);
4193 case 0x02c: /* VIS I fcmpgt32 */
4194 CHECK_FPU_FEATURE(dc
, VIS1
);
4195 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4196 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4197 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4198 gen_movl_TN_reg(rd
, cpu_dst
);
4200 case 0x02e: /* VIS I fcmpeq32 */
4201 CHECK_FPU_FEATURE(dc
, VIS1
);
4202 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4203 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4204 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4205 gen_movl_TN_reg(rd
, cpu_dst
);
4207 case 0x031: /* VIS I fmul8x16 */
4208 CHECK_FPU_FEATURE(dc
, VIS1
);
4209 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4211 case 0x033: /* VIS I fmul8x16au */
4212 CHECK_FPU_FEATURE(dc
, VIS1
);
4213 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4215 case 0x035: /* VIS I fmul8x16al */
4216 CHECK_FPU_FEATURE(dc
, VIS1
);
4217 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4219 case 0x036: /* VIS I fmul8sux16 */
4220 CHECK_FPU_FEATURE(dc
, VIS1
);
4221 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4223 case 0x037: /* VIS I fmul8ulx16 */
4224 CHECK_FPU_FEATURE(dc
, VIS1
);
4225 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4227 case 0x038: /* VIS I fmuld8sux16 */
4228 CHECK_FPU_FEATURE(dc
, VIS1
);
4229 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4231 case 0x039: /* VIS I fmuld8ulx16 */
4232 CHECK_FPU_FEATURE(dc
, VIS1
);
4233 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4235 case 0x03a: /* VIS I fpack32 */
4236 CHECK_FPU_FEATURE(dc
, VIS1
);
4237 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4239 case 0x03b: /* VIS I fpack16 */
4240 CHECK_FPU_FEATURE(dc
, VIS1
);
4241 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4242 cpu_dst_32
= gen_dest_fpr_F();
4243 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4244 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4246 case 0x03d: /* VIS I fpackfix */
4247 CHECK_FPU_FEATURE(dc
, VIS1
);
4248 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4249 cpu_dst_32
= gen_dest_fpr_F();
4250 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4251 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4253 case 0x03e: /* VIS I pdist */
4254 CHECK_FPU_FEATURE(dc
, VIS1
);
4255 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4257 case 0x048: /* VIS I faligndata */
4258 CHECK_FPU_FEATURE(dc
, VIS1
);
4259 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4261 case 0x04b: /* VIS I fpmerge */
4262 CHECK_FPU_FEATURE(dc
, VIS1
);
4263 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4265 case 0x04c: /* VIS II bshuffle */
4266 CHECK_FPU_FEATURE(dc
, VIS2
);
4267 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4269 case 0x04d: /* VIS I fexpand */
4270 CHECK_FPU_FEATURE(dc
, VIS1
);
4271 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4273 case 0x050: /* VIS I fpadd16 */
4274 CHECK_FPU_FEATURE(dc
, VIS1
);
4275 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4277 case 0x051: /* VIS I fpadd16s */
4278 CHECK_FPU_FEATURE(dc
, VIS1
);
4279 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4281 case 0x052: /* VIS I fpadd32 */
4282 CHECK_FPU_FEATURE(dc
, VIS1
);
4283 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4285 case 0x053: /* VIS I fpadd32s */
4286 CHECK_FPU_FEATURE(dc
, VIS1
);
4287 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4289 case 0x054: /* VIS I fpsub16 */
4290 CHECK_FPU_FEATURE(dc
, VIS1
);
4291 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4293 case 0x055: /* VIS I fpsub16s */
4294 CHECK_FPU_FEATURE(dc
, VIS1
);
4295 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4297 case 0x056: /* VIS I fpsub32 */
4298 CHECK_FPU_FEATURE(dc
, VIS1
);
4299 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4301 case 0x057: /* VIS I fpsub32s */
4302 CHECK_FPU_FEATURE(dc
, VIS1
);
4303 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4305 case 0x060: /* VIS I fzero */
4306 CHECK_FPU_FEATURE(dc
, VIS1
);
4307 cpu_dst_64
= gen_dest_fpr_D();
4308 tcg_gen_movi_i64(cpu_dst_64
, 0);
4309 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4311 case 0x061: /* VIS I fzeros */
4312 CHECK_FPU_FEATURE(dc
, VIS1
);
4313 cpu_dst_32
= gen_dest_fpr_F();
4314 tcg_gen_movi_i32(cpu_dst_32
, 0);
4315 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4317 case 0x062: /* VIS I fnor */
4318 CHECK_FPU_FEATURE(dc
, VIS1
);
4319 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4321 case 0x063: /* VIS I fnors */
4322 CHECK_FPU_FEATURE(dc
, VIS1
);
4323 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4325 case 0x064: /* VIS I fandnot2 */
4326 CHECK_FPU_FEATURE(dc
, VIS1
);
4327 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4329 case 0x065: /* VIS I fandnot2s */
4330 CHECK_FPU_FEATURE(dc
, VIS1
);
4331 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4333 case 0x066: /* VIS I fnot2 */
4334 CHECK_FPU_FEATURE(dc
, VIS1
);
4335 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4337 case 0x067: /* VIS I fnot2s */
4338 CHECK_FPU_FEATURE(dc
, VIS1
);
4339 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4341 case 0x068: /* VIS I fandnot1 */
4342 CHECK_FPU_FEATURE(dc
, VIS1
);
4343 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4345 case 0x069: /* VIS I fandnot1s */
4346 CHECK_FPU_FEATURE(dc
, VIS1
);
4347 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4349 case 0x06a: /* VIS I fnot1 */
4350 CHECK_FPU_FEATURE(dc
, VIS1
);
4351 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4353 case 0x06b: /* VIS I fnot1s */
4354 CHECK_FPU_FEATURE(dc
, VIS1
);
4355 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4357 case 0x06c: /* VIS I fxor */
4358 CHECK_FPU_FEATURE(dc
, VIS1
);
4359 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4361 case 0x06d: /* VIS I fxors */
4362 CHECK_FPU_FEATURE(dc
, VIS1
);
4363 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4365 case 0x06e: /* VIS I fnand */
4366 CHECK_FPU_FEATURE(dc
, VIS1
);
4367 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4369 case 0x06f: /* VIS I fnands */
4370 CHECK_FPU_FEATURE(dc
, VIS1
);
4371 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4373 case 0x070: /* VIS I fand */
4374 CHECK_FPU_FEATURE(dc
, VIS1
);
4375 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4377 case 0x071: /* VIS I fands */
4378 CHECK_FPU_FEATURE(dc
, VIS1
);
4379 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4381 case 0x072: /* VIS I fxnor */
4382 CHECK_FPU_FEATURE(dc
, VIS1
);
4383 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4385 case 0x073: /* VIS I fxnors */
4386 CHECK_FPU_FEATURE(dc
, VIS1
);
4387 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4389 case 0x074: /* VIS I fsrc1 */
4390 CHECK_FPU_FEATURE(dc
, VIS1
);
4391 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4392 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4394 case 0x075: /* VIS I fsrc1s */
4395 CHECK_FPU_FEATURE(dc
, VIS1
);
4396 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4397 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4399 case 0x076: /* VIS I fornot2 */
4400 CHECK_FPU_FEATURE(dc
, VIS1
);
4401 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4403 case 0x077: /* VIS I fornot2s */
4404 CHECK_FPU_FEATURE(dc
, VIS1
);
4405 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4407 case 0x078: /* VIS I fsrc2 */
4408 CHECK_FPU_FEATURE(dc
, VIS1
);
4409 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4410 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4412 case 0x079: /* VIS I fsrc2s */
4413 CHECK_FPU_FEATURE(dc
, VIS1
);
4414 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4415 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4417 case 0x07a: /* VIS I fornot1 */
4418 CHECK_FPU_FEATURE(dc
, VIS1
);
4419 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4421 case 0x07b: /* VIS I fornot1s */
4422 CHECK_FPU_FEATURE(dc
, VIS1
);
4423 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4425 case 0x07c: /* VIS I for */
4426 CHECK_FPU_FEATURE(dc
, VIS1
);
4427 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4429 case 0x07d: /* VIS I fors */
4430 CHECK_FPU_FEATURE(dc
, VIS1
);
4431 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4433 case 0x07e: /* VIS I fone */
4434 CHECK_FPU_FEATURE(dc
, VIS1
);
4435 cpu_dst_64
= gen_dest_fpr_D();
4436 tcg_gen_movi_i64(cpu_dst_64
, -1);
4437 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4439 case 0x07f: /* VIS I fones */
4440 CHECK_FPU_FEATURE(dc
, VIS1
);
4441 cpu_dst_32
= gen_dest_fpr_F();
4442 tcg_gen_movi_i32(cpu_dst_32
, -1);
4443 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4445 case 0x080: /* VIS I shutdown */
4446 case 0x081: /* VIS II siam */
4455 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4456 #ifdef TARGET_SPARC64
4461 #ifdef TARGET_SPARC64
4462 } else if (xop
== 0x39) { /* V9 return */
4466 cpu_src1
= get_src1(insn
, cpu_src1
);
4467 if (IS_IMM
) { /* immediate */
4468 simm
= GET_FIELDs(insn
, 19, 31);
4469 tcg_gen_addi_tl(cpu_dst
, cpu_src1
, simm
);
4470 } else { /* register */
4471 rs2
= GET_FIELD(insn
, 27, 31);
4473 gen_movl_reg_TN(rs2
, cpu_src2
);
4474 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4476 tcg_gen_mov_tl(cpu_dst
, cpu_src1
);
4478 gen_helper_restore(cpu_env
);
4480 r_const
= tcg_const_i32(3);
4481 gen_helper_check_align(cpu_env
, cpu_dst
, r_const
);
4482 tcg_temp_free_i32(r_const
);
4483 tcg_gen_mov_tl(cpu_npc
, cpu_dst
);
4484 dc
->npc
= DYNAMIC_PC
;
4488 cpu_src1
= get_src1(insn
, cpu_src1
);
4489 if (IS_IMM
) { /* immediate */
4490 simm
= GET_FIELDs(insn
, 19, 31);
4491 tcg_gen_addi_tl(cpu_dst
, cpu_src1
, simm
);
4492 } else { /* register */
4493 rs2
= GET_FIELD(insn
, 27, 31);
4495 gen_movl_reg_TN(rs2
, cpu_src2
);
4496 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4498 tcg_gen_mov_tl(cpu_dst
, cpu_src1
);
4501 case 0x38: /* jmpl */
4506 r_pc
= tcg_const_tl(dc
->pc
);
4507 gen_movl_TN_reg(rd
, r_pc
);
4508 tcg_temp_free(r_pc
);
4510 r_const
= tcg_const_i32(3);
4511 gen_helper_check_align(cpu_env
, cpu_dst
, r_const
);
4512 tcg_temp_free_i32(r_const
);
4513 gen_address_mask(dc
, cpu_dst
);
4514 tcg_gen_mov_tl(cpu_npc
, cpu_dst
);
4515 dc
->npc
= DYNAMIC_PC
;
4518 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4519 case 0x39: /* rett, V9 return */
4523 if (!supervisor(dc
))
4526 r_const
= tcg_const_i32(3);
4527 gen_helper_check_align(cpu_env
, cpu_dst
, r_const
);
4528 tcg_temp_free_i32(r_const
);
4529 tcg_gen_mov_tl(cpu_npc
, cpu_dst
);
4530 dc
->npc
= DYNAMIC_PC
;
4531 gen_helper_rett(cpu_env
);
4535 case 0x3b: /* flush */
4536 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4540 case 0x3c: /* save */
4542 gen_helper_save(cpu_env
);
4543 gen_movl_TN_reg(rd
, cpu_dst
);
4545 case 0x3d: /* restore */
4547 gen_helper_restore(cpu_env
);
4548 gen_movl_TN_reg(rd
, cpu_dst
);
4550 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4551 case 0x3e: /* V9 done/retry */
4555 if (!supervisor(dc
))
4557 dc
->npc
= DYNAMIC_PC
;
4558 dc
->pc
= DYNAMIC_PC
;
4559 gen_helper_done(cpu_env
);
4562 if (!supervisor(dc
))
4564 dc
->npc
= DYNAMIC_PC
;
4565 dc
->pc
= DYNAMIC_PC
;
4566 gen_helper_retry(cpu_env
);
4581 case 3: /* load/store instructions */
4583 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4585 /* flush pending conditional evaluations before exposing
4587 if (dc
->cc_op
!= CC_OP_FLAGS
) {
4588 dc
->cc_op
= CC_OP_FLAGS
;
4589 gen_helper_compute_psr(cpu_env
);
4591 cpu_src1
= get_src1(insn
, cpu_src1
);
4592 if (xop
== 0x3c || xop
== 0x3e) { // V9 casa/casxa
4593 rs2
= GET_FIELD(insn
, 27, 31);
4594 gen_movl_reg_TN(rs2
, cpu_src2
);
4595 tcg_gen_mov_tl(cpu_addr
, cpu_src1
);
4596 } else if (IS_IMM
) { /* immediate */
4597 simm
= GET_FIELDs(insn
, 19, 31);
4598 tcg_gen_addi_tl(cpu_addr
, cpu_src1
, simm
);
4599 } else { /* register */
4600 rs2
= GET_FIELD(insn
, 27, 31);
4602 gen_movl_reg_TN(rs2
, cpu_src2
);
4603 tcg_gen_add_tl(cpu_addr
, cpu_src1
, cpu_src2
);
4605 tcg_gen_mov_tl(cpu_addr
, cpu_src1
);
4607 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4608 (xop
> 0x17 && xop
<= 0x1d ) ||
4609 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4611 case 0x0: /* ld, V9 lduw, load unsigned word */
4612 gen_address_mask(dc
, cpu_addr
);
4613 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4615 case 0x1: /* ldub, load unsigned byte */
4616 gen_address_mask(dc
, cpu_addr
);
4617 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4619 case 0x2: /* lduh, load unsigned halfword */
4620 gen_address_mask(dc
, cpu_addr
);
4621 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4623 case 0x3: /* ldd, load double word */
4630 r_const
= tcg_const_i32(7);
4631 /* XXX remove alignment check */
4632 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4633 tcg_temp_free_i32(r_const
);
4634 gen_address_mask(dc
, cpu_addr
);
4635 tcg_gen_qemu_ld64(cpu_tmp64
, cpu_addr
, dc
->mem_idx
);
4636 tcg_gen_trunc_i64_tl(cpu_tmp0
, cpu_tmp64
);
4637 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xffffffffULL
);
4638 gen_movl_TN_reg(rd
+ 1, cpu_tmp0
);
4639 tcg_gen_shri_i64(cpu_tmp64
, cpu_tmp64
, 32);
4640 tcg_gen_trunc_i64_tl(cpu_val
, cpu_tmp64
);
4641 tcg_gen_andi_tl(cpu_val
, cpu_val
, 0xffffffffULL
);
4644 case 0x9: /* ldsb, load signed byte */
4645 gen_address_mask(dc
, cpu_addr
);
4646 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4648 case 0xa: /* ldsh, load signed halfword */
4649 gen_address_mask(dc
, cpu_addr
);
4650 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4652 case 0xd: /* ldstub -- XXX: should be atomically */
4656 gen_address_mask(dc
, cpu_addr
);
4657 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4658 r_const
= tcg_const_tl(0xff);
4659 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4660 tcg_temp_free(r_const
);
4663 case 0x0f: /* swap, swap register with memory. Also
4665 CHECK_IU_FEATURE(dc
, SWAP
);
4666 gen_movl_reg_TN(rd
, cpu_val
);
4667 gen_address_mask(dc
, cpu_addr
);
4668 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4669 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4670 tcg_gen_mov_tl(cpu_val
, cpu_tmp0
);
4672 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4673 case 0x10: /* lda, V9 lduwa, load word alternate */
4674 #ifndef TARGET_SPARC64
4677 if (!supervisor(dc
))
4681 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4683 case 0x11: /* lduba, load unsigned byte alternate */
4684 #ifndef TARGET_SPARC64
4687 if (!supervisor(dc
))
4691 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4693 case 0x12: /* lduha, load unsigned halfword alternate */
4694 #ifndef TARGET_SPARC64
4697 if (!supervisor(dc
))
4701 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4703 case 0x13: /* ldda, load double word alternate */
4704 #ifndef TARGET_SPARC64
4707 if (!supervisor(dc
))
4713 gen_ldda_asi(cpu_val
, cpu_addr
, insn
, rd
);
4715 case 0x19: /* ldsba, load signed byte alternate */
4716 #ifndef TARGET_SPARC64
4719 if (!supervisor(dc
))
4723 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4725 case 0x1a: /* ldsha, load signed halfword alternate */
4726 #ifndef TARGET_SPARC64
4729 if (!supervisor(dc
))
4733 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4735 case 0x1d: /* ldstuba -- XXX: should be atomically */
4736 #ifndef TARGET_SPARC64
4739 if (!supervisor(dc
))
4743 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4745 case 0x1f: /* swapa, swap reg with alt. memory. Also
4747 CHECK_IU_FEATURE(dc
, SWAP
);
4748 #ifndef TARGET_SPARC64
4751 if (!supervisor(dc
))
4755 gen_movl_reg_TN(rd
, cpu_val
);
4756 gen_swap_asi(cpu_val
, cpu_addr
, insn
);
4759 #ifndef TARGET_SPARC64
4760 case 0x30: /* ldc */
4761 case 0x31: /* ldcsr */
4762 case 0x33: /* lddc */
4766 #ifdef TARGET_SPARC64
4767 case 0x08: /* V9 ldsw */
4768 gen_address_mask(dc
, cpu_addr
);
4769 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4771 case 0x0b: /* V9 ldx */
4772 gen_address_mask(dc
, cpu_addr
);
4773 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4775 case 0x18: /* V9 ldswa */
4777 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4779 case 0x1b: /* V9 ldxa */
4781 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4783 case 0x2d: /* V9 prefetch, no effect */
4785 case 0x30: /* V9 ldfa */
4786 if (gen_trap_ifnofpu(dc
)) {
4790 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4791 gen_update_fprs_dirty(rd
);
4793 case 0x33: /* V9 lddfa */
4794 if (gen_trap_ifnofpu(dc
)) {
4798 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4799 gen_update_fprs_dirty(DFPREG(rd
));
4801 case 0x3d: /* V9 prefetcha, no effect */
4803 case 0x32: /* V9 ldqfa */
4804 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4805 if (gen_trap_ifnofpu(dc
)) {
4809 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4810 gen_update_fprs_dirty(QFPREG(rd
));
4816 gen_movl_TN_reg(rd
, cpu_val
);
4817 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4820 } else if (xop
>= 0x20 && xop
< 0x24) {
4821 if (gen_trap_ifnofpu(dc
)) {
4826 case 0x20: /* ldf, load fpreg */
4827 gen_address_mask(dc
, cpu_addr
);
4828 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4829 cpu_dst_32
= gen_dest_fpr_F();
4830 tcg_gen_trunc_tl_i32(cpu_dst_32
, cpu_tmp0
);
4831 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4833 case 0x21: /* ldfsr, V9 ldxfsr */
4834 #ifdef TARGET_SPARC64
4835 gen_address_mask(dc
, cpu_addr
);
4837 tcg_gen_qemu_ld64(cpu_tmp64
, cpu_addr
, dc
->mem_idx
);
4838 gen_helper_ldxfsr(cpu_env
, cpu_tmp64
);
4840 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4841 tcg_gen_trunc_tl_i32(cpu_tmp32
, cpu_tmp0
);
4842 gen_helper_ldfsr(cpu_env
, cpu_tmp32
);
4846 tcg_gen_qemu_ld32u(cpu_tmp32
, cpu_addr
, dc
->mem_idx
);
4847 gen_helper_ldfsr(cpu_env
, cpu_tmp32
);
4851 case 0x22: /* ldqf, load quad fpreg */
4855 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4856 r_const
= tcg_const_i32(dc
->mem_idx
);
4857 gen_address_mask(dc
, cpu_addr
);
4858 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4859 tcg_temp_free_i32(r_const
);
4860 gen_op_store_QT0_fpr(QFPREG(rd
));
4861 gen_update_fprs_dirty(QFPREG(rd
));
4864 case 0x23: /* lddf, load double fpreg */
4865 gen_address_mask(dc
, cpu_addr
);
4866 cpu_dst_64
= gen_dest_fpr_D();
4867 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4868 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4873 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4874 xop
== 0xe || xop
== 0x1e) {
4875 gen_movl_reg_TN(rd
, cpu_val
);
4877 case 0x4: /* st, store word */
4878 gen_address_mask(dc
, cpu_addr
);
4879 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4881 case 0x5: /* stb, store byte */
4882 gen_address_mask(dc
, cpu_addr
);
4883 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4885 case 0x6: /* sth, store halfword */
4886 gen_address_mask(dc
, cpu_addr
);
4887 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4889 case 0x7: /* std, store double word */
4896 gen_address_mask(dc
, cpu_addr
);
4897 r_const
= tcg_const_i32(7);
4898 /* XXX remove alignment check */
4899 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4900 tcg_temp_free_i32(r_const
);
4901 gen_movl_reg_TN(rd
+ 1, cpu_tmp0
);
4902 tcg_gen_concat_tl_i64(cpu_tmp64
, cpu_tmp0
, cpu_val
);
4903 tcg_gen_qemu_st64(cpu_tmp64
, cpu_addr
, dc
->mem_idx
);
4906 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4907 case 0x14: /* sta, V9 stwa, store word alternate */
4908 #ifndef TARGET_SPARC64
4911 if (!supervisor(dc
))
4915 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4916 dc
->npc
= DYNAMIC_PC
;
4918 case 0x15: /* stba, store byte alternate */
4919 #ifndef TARGET_SPARC64
4922 if (!supervisor(dc
))
4926 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4927 dc
->npc
= DYNAMIC_PC
;
4929 case 0x16: /* stha, store halfword alternate */
4930 #ifndef TARGET_SPARC64
4933 if (!supervisor(dc
))
4937 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4938 dc
->npc
= DYNAMIC_PC
;
4940 case 0x17: /* stda, store double word alternate */
4941 #ifndef TARGET_SPARC64
4944 if (!supervisor(dc
))
4951 gen_stda_asi(cpu_val
, cpu_addr
, insn
, rd
);
4955 #ifdef TARGET_SPARC64
4956 case 0x0e: /* V9 stx */
4957 gen_address_mask(dc
, cpu_addr
);
4958 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4960 case 0x1e: /* V9 stxa */
4962 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4963 dc
->npc
= DYNAMIC_PC
;
4969 } else if (xop
> 0x23 && xop
< 0x28) {
4970 if (gen_trap_ifnofpu(dc
)) {
4975 case 0x24: /* stf, store fpreg */
4976 gen_address_mask(dc
, cpu_addr
);
4977 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
4978 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_src1_32
);
4979 tcg_gen_qemu_st32(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4981 case 0x25: /* stfsr, V9 stxfsr */
4982 #ifdef TARGET_SPARC64
4983 gen_address_mask(dc
, cpu_addr
);
4984 tcg_gen_ld_i64(cpu_tmp64
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
4986 tcg_gen_qemu_st64(cpu_tmp64
, cpu_addr
, dc
->mem_idx
);
4988 tcg_gen_qemu_st32(cpu_tmp64
, cpu_addr
, dc
->mem_idx
);
4990 tcg_gen_ld_i32(cpu_tmp32
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
4991 tcg_gen_qemu_st32(cpu_tmp32
, cpu_addr
, dc
->mem_idx
);
4995 #ifdef TARGET_SPARC64
4996 /* V9 stqf, store quad fpreg */
5000 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5001 gen_op_load_fpr_QT0(QFPREG(rd
));
5002 r_const
= tcg_const_i32(dc
->mem_idx
);
5003 gen_address_mask(dc
, cpu_addr
);
5004 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5005 tcg_temp_free_i32(r_const
);
5008 #else /* !TARGET_SPARC64 */
5009 /* stdfq, store floating point queue */
5010 #if defined(CONFIG_USER_ONLY)
5013 if (!supervisor(dc
))
5015 if (gen_trap_ifnofpu(dc
)) {
5021 case 0x27: /* stdf, store double fpreg */
5022 gen_address_mask(dc
, cpu_addr
);
5023 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5024 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5029 } else if (xop
> 0x33 && xop
< 0x3f) {
5032 #ifdef TARGET_SPARC64
5033 case 0x34: /* V9 stfa */
5034 if (gen_trap_ifnofpu(dc
)) {
5037 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5039 case 0x36: /* V9 stqfa */
5043 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5044 if (gen_trap_ifnofpu(dc
)) {
5047 r_const
= tcg_const_i32(7);
5048 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5049 tcg_temp_free_i32(r_const
);
5050 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5053 case 0x37: /* V9 stdfa */
5054 if (gen_trap_ifnofpu(dc
)) {
5057 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5059 case 0x3c: /* V9 casa */
5060 gen_cas_asi(cpu_val
, cpu_addr
, cpu_src2
, insn
, rd
);
5061 gen_movl_TN_reg(rd
, cpu_val
);
5063 case 0x3e: /* V9 casxa */
5064 gen_casx_asi(cpu_val
, cpu_addr
, cpu_src2
, insn
, rd
);
5065 gen_movl_TN_reg(rd
, cpu_val
);
5068 case 0x34: /* stc */
5069 case 0x35: /* stcsr */
5070 case 0x36: /* stdcq */
5071 case 0x37: /* stdc */
5082 /* default case for non jump instructions */
5083 if (dc
->npc
== DYNAMIC_PC
) {
5084 dc
->pc
= DYNAMIC_PC
;
5086 } else if (dc
->npc
== JUMP_PC
) {
5087 /* we can do a static jump */
5088 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5092 dc
->npc
= dc
->npc
+ 4;
5101 r_const
= tcg_const_i32(TT_ILL_INSN
);
5102 gen_helper_raise_exception(cpu_env
, r_const
);
5103 tcg_temp_free_i32(r_const
);
5112 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5113 gen_helper_raise_exception(cpu_env
, r_const
);
5114 tcg_temp_free_i32(r_const
);
5118 #if !defined(CONFIG_USER_ONLY)
5124 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5125 gen_helper_raise_exception(cpu_env
, r_const
);
5126 tcg_temp_free_i32(r_const
);
5133 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5136 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5139 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5143 #ifndef TARGET_SPARC64
5149 r_const
= tcg_const_i32(TT_NCP_INSN
);
5150 gen_helper_raise_exception(cpu_env
, r_const
);
5151 tcg_temp_free(r_const
);
5157 tcg_temp_free(cpu_tmp1
);
5158 tcg_temp_free(cpu_tmp2
);
5159 if (dc
->n_t32
!= 0) {
5161 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5162 tcg_temp_free_i32(dc
->t32
[i
]);
5168 static inline void gen_intermediate_code_internal(TranslationBlock
* tb
,
5169 int spc
, CPUSPARCState
*env
)
5171 target_ulong pc_start
, last_pc
;
5172 uint16_t *gen_opc_end
;
5173 DisasContext dc1
, *dc
= &dc1
;
5180 memset(dc
, 0, sizeof(DisasContext
));
5185 dc
->npc
= (target_ulong
) tb
->cs_base
;
5186 dc
->cc_op
= CC_OP_DYNAMIC
;
5187 dc
->mem_idx
= cpu_mmu_index(env
);
5189 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5190 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5191 dc
->singlestep
= (env
->singlestep_enabled
|| singlestep
);
5192 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
5194 cpu_tmp0
= tcg_temp_new();
5195 cpu_tmp32
= tcg_temp_new_i32();
5196 cpu_tmp64
= tcg_temp_new_i64();
5198 cpu_dst
= tcg_temp_local_new();
5201 cpu_val
= tcg_temp_local_new();
5202 cpu_addr
= tcg_temp_local_new();
5205 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5207 max_insns
= CF_COUNT_MASK
;
5210 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
5211 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
5212 if (bp
->pc
== dc
->pc
) {
5213 if (dc
->pc
!= pc_start
)
5215 gen_helper_debug(cpu_env
);
5223 qemu_log("Search PC...\n");
5224 j
= gen_opc_ptr
- gen_opc_buf
;
5228 gen_opc_instr_start
[lj
++] = 0;
5229 gen_opc_pc
[lj
] = dc
->pc
;
5230 gen_opc_npc
[lj
] = dc
->npc
;
5231 gen_opc_instr_start
[lj
] = 1;
5232 gen_opc_icount
[lj
] = num_insns
;
5235 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5238 insn
= cpu_ldl_code(env
, dc
->pc
);
5239 disas_sparc_insn(dc
, insn
);
5244 /* if the next PC is different, we abort now */
5245 if (dc
->pc
!= (last_pc
+ 4))
5247 /* if we reach a page boundary, we stop generation so that the
5248 PC of a TT_TFAULT exception is always in the right page */
5249 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5251 /* if single step mode, we generate only one instruction and
5252 generate an exception */
5253 if (dc
->singlestep
) {
5256 } while ((gen_opc_ptr
< gen_opc_end
) &&
5257 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5258 num_insns
< max_insns
);
5261 tcg_temp_free(cpu_addr
);
5262 tcg_temp_free(cpu_val
);
5263 tcg_temp_free(cpu_dst
);
5264 tcg_temp_free_i64(cpu_tmp64
);
5265 tcg_temp_free_i32(cpu_tmp32
);
5266 tcg_temp_free(cpu_tmp0
);
5268 if (tb
->cflags
& CF_LAST_IO
)
5271 if (dc
->pc
!= DYNAMIC_PC
&&
5272 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5273 /* static PC and NPC: we can use direct chaining */
5274 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5276 if (dc
->pc
!= DYNAMIC_PC
)
5277 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5282 gen_icount_end(tb
, num_insns
);
5283 *gen_opc_ptr
= INDEX_op_end
;
5285 j
= gen_opc_ptr
- gen_opc_buf
;
5288 gen_opc_instr_start
[lj
++] = 0;
5292 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5293 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5295 tb
->size
= last_pc
+ 4 - pc_start
;
5296 tb
->icount
= num_insns
;
5299 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5300 qemu_log("--------------\n");
5301 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5302 log_target_disas(pc_start
, last_pc
+ 4 - pc_start
, 0);
5308 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5310 gen_intermediate_code_internal(tb
, 0, env
);
5313 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5315 gen_intermediate_code_internal(tb
, 1, env
);
5318 void gen_intermediate_code_init(CPUSPARCState
*env
)
5322 static const char * const gregnames
[8] = {
5323 NULL
, // g0 not used
5332 static const char * const fregnames
[32] = {
5333 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5334 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5335 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5336 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5339 /* init various static tables */
5343 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5344 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5345 offsetof(CPUSPARCState
, regwptr
),
5347 #ifdef TARGET_SPARC64
5348 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5350 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5352 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5354 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5356 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5357 offsetof(CPUSPARCState
, tick_cmpr
),
5359 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5360 offsetof(CPUSPARCState
, stick_cmpr
),
5362 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5363 offsetof(CPUSPARCState
, hstick_cmpr
),
5365 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5367 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5369 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5371 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5372 offsetof(CPUSPARCState
, ssr
), "ssr");
5373 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5374 offsetof(CPUSPARCState
, version
), "ver");
5375 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5376 offsetof(CPUSPARCState
, softint
),
5379 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5382 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5384 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5386 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5387 offsetof(CPUSPARCState
, cc_src2
),
5389 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5391 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5393 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5395 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5397 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5399 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5401 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5402 #ifndef CONFIG_USER_ONLY
5403 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5406 for (i
= 1; i
< 8; i
++) {
5407 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5408 offsetof(CPUSPARCState
, gregs
[i
]),
5411 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5412 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5413 offsetof(CPUSPARCState
, fpr
[i
]),
5417 /* register helpers */
5419 #define GEN_HELPER 2
5424 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5427 env
->pc
= gen_opc_pc
[pc_pos
];
5428 npc
= gen_opc_npc
[pc_pos
];
5430 /* dynamic NPC: already stored */
5431 } else if (npc
== 2) {
5432 /* jump PC: use 'cond' and the jump targets of the translation */
5434 env
->npc
= gen_opc_jump_pc
[0];
5436 env
->npc
= gen_opc_jump_pc
[1];
5442 /* flush pending conditional evaluations before exposing cpu state */
5443 if (CC_OP
!= CC_OP_FLAGS
) {
5444 helper_compute_psr(env
);