4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
37 #define DYNAMIC_PC 1 /* dynamic pc value */
38 #define JUMP_PC 2 /* dynamic pc value which takes only two values
39 according to jump_pc[T2] */
41 /* global register indexes */
42 static TCGv_ptr cpu_env
, cpu_regwptr
;
43 static TCGv cpu_cc_src
, cpu_cc_src2
, cpu_cc_dst
;
44 static TCGv_i32 cpu_cc_op
;
45 static TCGv_i32 cpu_psr
;
46 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
, cpu_gregs
[8];
48 #ifndef CONFIG_USER_ONLY
51 static TCGv cpu_cond
, cpu_dst
;
53 static TCGv_i32 cpu_xcc
, cpu_asi
, cpu_fprs
;
55 static TCGv cpu_tick_cmpr
, cpu_stick_cmpr
, cpu_hstick_cmpr
;
56 static TCGv cpu_hintp
, cpu_htba
, cpu_hver
, cpu_ssr
, cpu_ver
;
57 static TCGv_i32 cpu_softint
;
61 /* local register indexes (only used inside old micro ops) */
63 /* Floating point registers */
64 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
66 static target_ulong gen_opc_npc
[OPC_BUF_SIZE
];
67 static target_ulong gen_opc_jump_pc
[2];
69 #include "gen-icount.h"
71 typedef struct DisasContext
{
72 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
73 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
74 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
78 int address_mask_32bit
;
80 uint32_t cc_op
; /* current CC operation */
81 struct TranslationBlock
*tb
;
96 // This function uses non-native bit order
97 #define GET_FIELD(X, FROM, TO) \
98 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
100 // This function uses the order in the manuals, i.e. bit 0 is 2^0
101 #define GET_FIELD_SP(X, FROM, TO) \
102 GET_FIELD(X, 31 - (TO), 31 - (FROM))
104 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
105 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
107 #ifdef TARGET_SPARC64
108 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
109 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
111 #define DFPREG(r) (r & 0x1e)
112 #define QFPREG(r) (r & 0x1c)
115 #define UA2005_HTRAP_MASK 0xff
116 #define V8_TRAP_MASK 0x7f
118 static int sign_extend(int x
, int len
)
121 return (x
<< len
) >> len
;
124 #define IS_IMM (insn & (1<<13))
126 static inline TCGv_i32
get_temp_i32(DisasContext
*dc
)
129 assert(dc
->n_t32
< ARRAY_SIZE(dc
->t32
));
130 dc
->t32
[dc
->n_t32
++] = t
= tcg_temp_new_i32();
134 static inline TCGv
get_temp_tl(DisasContext
*dc
)
137 assert(dc
->n_ttl
< ARRAY_SIZE(dc
->ttl
));
138 dc
->ttl
[dc
->n_ttl
++] = t
= tcg_temp_new();
142 static inline void gen_update_fprs_dirty(int rd
)
144 #if defined(TARGET_SPARC64)
145 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, (rd
< 32) ? 1 : 2);
149 /* floating point registers moves */
150 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
152 #if TCG_TARGET_REG_BITS == 32
154 return TCGV_LOW(cpu_fpr
[src
/ 2]);
156 return TCGV_HIGH(cpu_fpr
[src
/ 2]);
160 return MAKE_TCGV_I32(GET_TCGV_I64(cpu_fpr
[src
/ 2]));
162 TCGv_i32 ret
= get_temp_i32(dc
);
163 TCGv_i64 t
= tcg_temp_new_i64();
165 tcg_gen_shri_i64(t
, cpu_fpr
[src
/ 2], 32);
166 tcg_gen_trunc_i64_i32(ret
, t
);
167 tcg_temp_free_i64(t
);
174 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
176 #if TCG_TARGET_REG_BITS == 32
178 tcg_gen_mov_i32(TCGV_LOW(cpu_fpr
[dst
/ 2]), v
);
180 tcg_gen_mov_i32(TCGV_HIGH(cpu_fpr
[dst
/ 2]), v
);
183 TCGv_i64 t
= MAKE_TCGV_I64(GET_TCGV_I32(v
));
184 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
185 (dst
& 1 ? 0 : 32), 32);
187 gen_update_fprs_dirty(dst
);
190 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
192 return get_temp_i32(dc
);
195 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
198 return cpu_fpr
[src
/ 2];
201 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
204 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
205 gen_update_fprs_dirty(dst
);
208 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
210 return cpu_fpr
[DFPREG(dst
) / 2];
213 static void gen_op_load_fpr_QT0(unsigned int src
)
215 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
216 offsetof(CPU_QuadU
, ll
.upper
));
217 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
218 offsetof(CPU_QuadU
, ll
.lower
));
221 static void gen_op_load_fpr_QT1(unsigned int src
)
223 tcg_gen_st_i64(cpu_fpr
[src
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
224 offsetof(CPU_QuadU
, ll
.upper
));
225 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt1
) +
226 offsetof(CPU_QuadU
, ll
.lower
));
229 static void gen_op_store_QT0_fpr(unsigned int dst
)
231 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
232 offsetof(CPU_QuadU
, ll
.upper
));
233 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], cpu_env
, offsetof(CPUSPARCState
, qt0
) +
234 offsetof(CPU_QuadU
, ll
.lower
));
237 #ifdef TARGET_SPARC64
238 static void gen_move_Q(unsigned int rd
, unsigned int rs
)
243 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
244 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
245 gen_update_fprs_dirty(rd
);
250 #ifdef CONFIG_USER_ONLY
251 #define supervisor(dc) 0
252 #ifdef TARGET_SPARC64
253 #define hypervisor(dc) 0
256 #define supervisor(dc) (dc->mem_idx >= MMU_KERNEL_IDX)
257 #ifdef TARGET_SPARC64
258 #define hypervisor(dc) (dc->mem_idx == MMU_HYPV_IDX)
263 #ifdef TARGET_SPARC64
265 #define AM_CHECK(dc) ((dc)->address_mask_32bit)
267 #define AM_CHECK(dc) (1)
271 static inline void gen_address_mask(DisasContext
*dc
, TCGv addr
)
273 #ifdef TARGET_SPARC64
275 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
279 static inline TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
281 if (reg
== 0 || reg
>= 8) {
282 TCGv t
= get_temp_tl(dc
);
284 tcg_gen_movi_tl(t
, 0);
286 tcg_gen_ld_tl(t
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
290 return cpu_gregs
[reg
];
294 static inline void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
298 tcg_gen_mov_tl(cpu_gregs
[reg
], v
);
300 tcg_gen_st_tl(v
, cpu_regwptr
, (reg
- 8) * sizeof(target_ulong
));
305 static inline TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
307 if (reg
== 0 || reg
>= 8) {
308 return get_temp_tl(dc
);
310 return cpu_gregs
[reg
];
314 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
,
315 target_ulong pc
, target_ulong npc
)
317 TranslationBlock
*tb
;
320 if ((pc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
321 (npc
& TARGET_PAGE_MASK
) == (tb
->pc
& TARGET_PAGE_MASK
) &&
323 /* jump to same page: we can use a direct jump */
324 tcg_gen_goto_tb(tb_num
);
325 tcg_gen_movi_tl(cpu_pc
, pc
);
326 tcg_gen_movi_tl(cpu_npc
, npc
);
327 tcg_gen_exit_tb((tcg_target_long
)tb
+ tb_num
);
329 /* jump to another page: currently not optimized */
330 tcg_gen_movi_tl(cpu_pc
, pc
);
331 tcg_gen_movi_tl(cpu_npc
, npc
);
337 static inline void gen_mov_reg_N(TCGv reg
, TCGv_i32 src
)
339 tcg_gen_extu_i32_tl(reg
, src
);
340 tcg_gen_shri_tl(reg
, reg
, PSR_NEG_SHIFT
);
341 tcg_gen_andi_tl(reg
, reg
, 0x1);
344 static inline void gen_mov_reg_Z(TCGv reg
, TCGv_i32 src
)
346 tcg_gen_extu_i32_tl(reg
, src
);
347 tcg_gen_shri_tl(reg
, reg
, PSR_ZERO_SHIFT
);
348 tcg_gen_andi_tl(reg
, reg
, 0x1);
351 static inline void gen_mov_reg_V(TCGv reg
, TCGv_i32 src
)
353 tcg_gen_extu_i32_tl(reg
, src
);
354 tcg_gen_shri_tl(reg
, reg
, PSR_OVF_SHIFT
);
355 tcg_gen_andi_tl(reg
, reg
, 0x1);
358 static inline void gen_mov_reg_C(TCGv reg
, TCGv_i32 src
)
360 tcg_gen_extu_i32_tl(reg
, src
);
361 tcg_gen_shri_tl(reg
, reg
, PSR_CARRY_SHIFT
);
362 tcg_gen_andi_tl(reg
, reg
, 0x1);
365 static inline void gen_op_addi_cc(TCGv dst
, TCGv src1
, target_long src2
)
367 tcg_gen_mov_tl(cpu_cc_src
, src1
);
368 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
369 tcg_gen_addi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
370 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
373 static inline void gen_op_add_cc(TCGv dst
, TCGv src1
, TCGv src2
)
375 tcg_gen_mov_tl(cpu_cc_src
, src1
);
376 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
377 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
378 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
381 static TCGv_i32
gen_add32_carry32(void)
383 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
385 /* Carry is computed from a previous add: (dst < src) */
386 #if TARGET_LONG_BITS == 64
387 cc_src1_32
= tcg_temp_new_i32();
388 cc_src2_32
= tcg_temp_new_i32();
389 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_dst
);
390 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src
);
392 cc_src1_32
= cpu_cc_dst
;
393 cc_src2_32
= cpu_cc_src
;
396 carry_32
= tcg_temp_new_i32();
397 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
399 #if TARGET_LONG_BITS == 64
400 tcg_temp_free_i32(cc_src1_32
);
401 tcg_temp_free_i32(cc_src2_32
);
407 static TCGv_i32
gen_sub32_carry32(void)
409 TCGv_i32 carry_32
, cc_src1_32
, cc_src2_32
;
411 /* Carry is computed from a previous borrow: (src1 < src2) */
412 #if TARGET_LONG_BITS == 64
413 cc_src1_32
= tcg_temp_new_i32();
414 cc_src2_32
= tcg_temp_new_i32();
415 tcg_gen_trunc_i64_i32(cc_src1_32
, cpu_cc_src
);
416 tcg_gen_trunc_i64_i32(cc_src2_32
, cpu_cc_src2
);
418 cc_src1_32
= cpu_cc_src
;
419 cc_src2_32
= cpu_cc_src2
;
422 carry_32
= tcg_temp_new_i32();
423 tcg_gen_setcond_i32(TCG_COND_LTU
, carry_32
, cc_src1_32
, cc_src2_32
);
425 #if TARGET_LONG_BITS == 64
426 tcg_temp_free_i32(cc_src1_32
);
427 tcg_temp_free_i32(cc_src2_32
);
433 static void gen_op_addx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
434 TCGv src2
, int update_cc
)
442 /* Carry is known to be zero. Fall back to plain ADD. */
444 gen_op_add_cc(dst
, src1
, src2
);
446 tcg_gen_add_tl(dst
, src1
, src2
);
453 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
455 /* For 32-bit hosts, we can re-use the host's hardware carry
456 generation by using an ADD2 opcode. We discard the low
457 part of the output. Ideally we'd combine this operation
458 with the add that generated the carry in the first place. */
459 TCGv dst_low
= tcg_temp_new();
460 tcg_gen_op6_i32(INDEX_op_add2_i32
, dst_low
, dst
,
461 cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
462 tcg_temp_free(dst_low
);
466 carry_32
= gen_add32_carry32();
472 carry_32
= gen_sub32_carry32();
476 /* We need external help to produce the carry. */
477 carry_32
= tcg_temp_new_i32();
478 gen_helper_compute_C_icc(carry_32
, cpu_env
);
482 #if TARGET_LONG_BITS == 64
483 carry
= tcg_temp_new();
484 tcg_gen_extu_i32_i64(carry
, carry_32
);
489 tcg_gen_add_tl(dst
, src1
, src2
);
490 tcg_gen_add_tl(dst
, dst
, carry
);
492 tcg_temp_free_i32(carry_32
);
493 #if TARGET_LONG_BITS == 64
494 tcg_temp_free(carry
);
497 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
501 tcg_gen_mov_tl(cpu_cc_src
, src1
);
502 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
503 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
504 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADDX
);
505 dc
->cc_op
= CC_OP_ADDX
;
509 static inline void gen_op_subi_cc(TCGv dst
, TCGv src1
, target_long src2
, DisasContext
*dc
)
511 tcg_gen_mov_tl(cpu_cc_src
, src1
);
512 tcg_gen_movi_tl(cpu_cc_src2
, src2
);
514 tcg_gen_mov_tl(cpu_cc_dst
, src1
);
515 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
516 dc
->cc_op
= CC_OP_LOGIC
;
518 tcg_gen_subi_tl(cpu_cc_dst
, cpu_cc_src
, src2
);
519 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
520 dc
->cc_op
= CC_OP_SUB
;
522 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
525 static inline void gen_op_sub_cc(TCGv dst
, TCGv src1
, TCGv src2
)
527 tcg_gen_mov_tl(cpu_cc_src
, src1
);
528 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
529 tcg_gen_sub_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
530 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
533 static void gen_op_subx_int(DisasContext
*dc
, TCGv dst
, TCGv src1
,
534 TCGv src2
, int update_cc
)
542 /* Carry is known to be zero. Fall back to plain SUB. */
544 gen_op_sub_cc(dst
, src1
, src2
);
546 tcg_gen_sub_tl(dst
, src1
, src2
);
553 carry_32
= gen_add32_carry32();
559 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
561 /* For 32-bit hosts, we can re-use the host's hardware carry
562 generation by using a SUB2 opcode. We discard the low
563 part of the output. Ideally we'd combine this operation
564 with the add that generated the carry in the first place. */
565 TCGv dst_low
= tcg_temp_new();
566 tcg_gen_op6_i32(INDEX_op_sub2_i32
, dst_low
, dst
,
567 cpu_cc_src
, src1
, cpu_cc_src2
, src2
);
568 tcg_temp_free(dst_low
);
572 carry_32
= gen_sub32_carry32();
576 /* We need external help to produce the carry. */
577 carry_32
= tcg_temp_new_i32();
578 gen_helper_compute_C_icc(carry_32
, cpu_env
);
582 #if TARGET_LONG_BITS == 64
583 carry
= tcg_temp_new();
584 tcg_gen_extu_i32_i64(carry
, carry_32
);
589 tcg_gen_sub_tl(dst
, src1
, src2
);
590 tcg_gen_sub_tl(dst
, dst
, carry
);
592 tcg_temp_free_i32(carry_32
);
593 #if TARGET_LONG_BITS == 64
594 tcg_temp_free(carry
);
597 #if TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 32
601 tcg_gen_mov_tl(cpu_cc_src
, src1
);
602 tcg_gen_mov_tl(cpu_cc_src2
, src2
);
603 tcg_gen_mov_tl(cpu_cc_dst
, dst
);
604 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUBX
);
605 dc
->cc_op
= CC_OP_SUBX
;
609 static inline void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
613 r_temp
= tcg_temp_new();
619 zero
= tcg_const_tl(0);
620 tcg_gen_andi_tl(cpu_cc_src
, src1
, 0xffffffff);
621 tcg_gen_andi_tl(r_temp
, cpu_y
, 0x1);
622 tcg_gen_andi_tl(cpu_cc_src2
, src2
, 0xffffffff);
623 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_cc_src2
, r_temp
, zero
,
628 // env->y = (b2 << 31) | (env->y >> 1);
629 tcg_gen_andi_tl(r_temp
, cpu_cc_src
, 0x1);
630 tcg_gen_shli_tl(r_temp
, r_temp
, 31);
631 tcg_gen_shri_tl(cpu_tmp0
, cpu_y
, 1);
632 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0x7fffffff);
633 tcg_gen_or_tl(cpu_tmp0
, cpu_tmp0
, r_temp
);
634 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
637 gen_mov_reg_N(cpu_tmp0
, cpu_psr
);
638 gen_mov_reg_V(r_temp
, cpu_psr
);
639 tcg_gen_xor_tl(cpu_tmp0
, cpu_tmp0
, r_temp
);
640 tcg_temp_free(r_temp
);
642 // T0 = (b1 << 31) | (T0 >> 1);
644 tcg_gen_shli_tl(cpu_tmp0
, cpu_tmp0
, 31);
645 tcg_gen_shri_tl(cpu_cc_src
, cpu_cc_src
, 1);
646 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, cpu_tmp0
);
648 tcg_gen_add_tl(cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
);
650 tcg_gen_mov_tl(dst
, cpu_cc_dst
);
653 static inline void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
655 TCGv_i32 r_src1
, r_src2
;
656 TCGv_i64 r_temp
, r_temp2
;
658 r_src1
= tcg_temp_new_i32();
659 r_src2
= tcg_temp_new_i32();
661 tcg_gen_trunc_tl_i32(r_src1
, src1
);
662 tcg_gen_trunc_tl_i32(r_src2
, src2
);
664 r_temp
= tcg_temp_new_i64();
665 r_temp2
= tcg_temp_new_i64();
668 tcg_gen_ext_i32_i64(r_temp
, r_src2
);
669 tcg_gen_ext_i32_i64(r_temp2
, r_src1
);
671 tcg_gen_extu_i32_i64(r_temp
, r_src2
);
672 tcg_gen_extu_i32_i64(r_temp2
, r_src1
);
675 tcg_gen_mul_i64(r_temp2
, r_temp
, r_temp2
);
677 tcg_gen_shri_i64(r_temp
, r_temp2
, 32);
678 tcg_gen_trunc_i64_tl(cpu_tmp0
, r_temp
);
679 tcg_temp_free_i64(r_temp
);
680 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
682 tcg_gen_trunc_i64_tl(dst
, r_temp2
);
684 tcg_temp_free_i64(r_temp2
);
686 tcg_temp_free_i32(r_src1
);
687 tcg_temp_free_i32(r_src2
);
690 static inline void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
692 /* zero-extend truncated operands before multiplication */
693 gen_op_multiply(dst
, src1
, src2
, 0);
696 static inline void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
698 /* sign-extend truncated operands before multiplication */
699 gen_op_multiply(dst
, src1
, src2
, 1);
703 static inline void gen_op_eval_ba(TCGv dst
)
705 tcg_gen_movi_tl(dst
, 1);
709 static inline void gen_op_eval_be(TCGv dst
, TCGv_i32 src
)
711 gen_mov_reg_Z(dst
, src
);
715 static inline void gen_op_eval_ble(TCGv dst
, TCGv_i32 src
)
717 gen_mov_reg_N(cpu_tmp0
, src
);
718 gen_mov_reg_V(dst
, src
);
719 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
720 gen_mov_reg_Z(cpu_tmp0
, src
);
721 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
725 static inline void gen_op_eval_bl(TCGv dst
, TCGv_i32 src
)
727 gen_mov_reg_V(cpu_tmp0
, src
);
728 gen_mov_reg_N(dst
, src
);
729 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
733 static inline void gen_op_eval_bleu(TCGv dst
, TCGv_i32 src
)
735 gen_mov_reg_Z(cpu_tmp0
, src
);
736 gen_mov_reg_C(dst
, src
);
737 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
741 static inline void gen_op_eval_bcs(TCGv dst
, TCGv_i32 src
)
743 gen_mov_reg_C(dst
, src
);
747 static inline void gen_op_eval_bvs(TCGv dst
, TCGv_i32 src
)
749 gen_mov_reg_V(dst
, src
);
753 static inline void gen_op_eval_bn(TCGv dst
)
755 tcg_gen_movi_tl(dst
, 0);
759 static inline void gen_op_eval_bneg(TCGv dst
, TCGv_i32 src
)
761 gen_mov_reg_N(dst
, src
);
765 static inline void gen_op_eval_bne(TCGv dst
, TCGv_i32 src
)
767 gen_mov_reg_Z(dst
, src
);
768 tcg_gen_xori_tl(dst
, dst
, 0x1);
772 static inline void gen_op_eval_bg(TCGv dst
, TCGv_i32 src
)
774 gen_mov_reg_N(cpu_tmp0
, src
);
775 gen_mov_reg_V(dst
, src
);
776 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
777 gen_mov_reg_Z(cpu_tmp0
, src
);
778 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
779 tcg_gen_xori_tl(dst
, dst
, 0x1);
783 static inline void gen_op_eval_bge(TCGv dst
, TCGv_i32 src
)
785 gen_mov_reg_V(cpu_tmp0
, src
);
786 gen_mov_reg_N(dst
, src
);
787 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
788 tcg_gen_xori_tl(dst
, dst
, 0x1);
792 static inline void gen_op_eval_bgu(TCGv dst
, TCGv_i32 src
)
794 gen_mov_reg_Z(cpu_tmp0
, src
);
795 gen_mov_reg_C(dst
, src
);
796 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
797 tcg_gen_xori_tl(dst
, dst
, 0x1);
801 static inline void gen_op_eval_bcc(TCGv dst
, TCGv_i32 src
)
803 gen_mov_reg_C(dst
, src
);
804 tcg_gen_xori_tl(dst
, dst
, 0x1);
808 static inline void gen_op_eval_bpos(TCGv dst
, TCGv_i32 src
)
810 gen_mov_reg_N(dst
, src
);
811 tcg_gen_xori_tl(dst
, dst
, 0x1);
815 static inline void gen_op_eval_bvc(TCGv dst
, TCGv_i32 src
)
817 gen_mov_reg_V(dst
, src
);
818 tcg_gen_xori_tl(dst
, dst
, 0x1);
822 FPSR bit field FCC1 | FCC0:
828 static inline void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
829 unsigned int fcc_offset
)
831 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
832 tcg_gen_andi_tl(reg
, reg
, 0x1);
835 static inline void gen_mov_reg_FCC1(TCGv reg
, TCGv src
,
836 unsigned int fcc_offset
)
838 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
839 tcg_gen_andi_tl(reg
, reg
, 0x1);
843 static inline void gen_op_eval_fbne(TCGv dst
, TCGv src
,
844 unsigned int fcc_offset
)
846 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
847 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
848 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
851 // 1 or 2: FCC0 ^ FCC1
852 static inline void gen_op_eval_fblg(TCGv dst
, TCGv src
,
853 unsigned int fcc_offset
)
855 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
856 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
857 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
861 static inline void gen_op_eval_fbul(TCGv dst
, TCGv src
,
862 unsigned int fcc_offset
)
864 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
868 static inline void gen_op_eval_fbl(TCGv dst
, TCGv src
,
869 unsigned int fcc_offset
)
871 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
872 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
873 tcg_gen_xori_tl(cpu_tmp0
, cpu_tmp0
, 0x1);
874 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
878 static inline void gen_op_eval_fbug(TCGv dst
, TCGv src
,
879 unsigned int fcc_offset
)
881 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
885 static inline void gen_op_eval_fbg(TCGv dst
, TCGv src
,
886 unsigned int fcc_offset
)
888 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
889 tcg_gen_xori_tl(dst
, dst
, 0x1);
890 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
891 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
895 static inline void gen_op_eval_fbu(TCGv dst
, TCGv src
,
896 unsigned int fcc_offset
)
898 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
899 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
900 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
904 static inline void gen_op_eval_fbe(TCGv dst
, TCGv src
,
905 unsigned int fcc_offset
)
907 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
908 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
909 tcg_gen_or_tl(dst
, dst
, cpu_tmp0
);
910 tcg_gen_xori_tl(dst
, dst
, 0x1);
913 // 0 or 3: !(FCC0 ^ FCC1)
914 static inline void gen_op_eval_fbue(TCGv dst
, TCGv src
,
915 unsigned int fcc_offset
)
917 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
918 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
919 tcg_gen_xor_tl(dst
, dst
, cpu_tmp0
);
920 tcg_gen_xori_tl(dst
, dst
, 0x1);
924 static inline void gen_op_eval_fbge(TCGv dst
, TCGv src
,
925 unsigned int fcc_offset
)
927 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
928 tcg_gen_xori_tl(dst
, dst
, 0x1);
931 // !1: !(FCC0 & !FCC1)
932 static inline void gen_op_eval_fbuge(TCGv dst
, TCGv src
,
933 unsigned int fcc_offset
)
935 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
936 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
937 tcg_gen_xori_tl(cpu_tmp0
, cpu_tmp0
, 0x1);
938 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
939 tcg_gen_xori_tl(dst
, dst
, 0x1);
943 static inline void gen_op_eval_fble(TCGv dst
, TCGv src
,
944 unsigned int fcc_offset
)
946 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
947 tcg_gen_xori_tl(dst
, dst
, 0x1);
950 // !2: !(!FCC0 & FCC1)
951 static inline void gen_op_eval_fbule(TCGv dst
, TCGv src
,
952 unsigned int fcc_offset
)
954 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
955 tcg_gen_xori_tl(dst
, dst
, 0x1);
956 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
957 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
958 tcg_gen_xori_tl(dst
, dst
, 0x1);
961 // !3: !(FCC0 & FCC1)
962 static inline void gen_op_eval_fbo(TCGv dst
, TCGv src
,
963 unsigned int fcc_offset
)
965 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
966 gen_mov_reg_FCC1(cpu_tmp0
, src
, fcc_offset
);
967 tcg_gen_and_tl(dst
, dst
, cpu_tmp0
);
968 tcg_gen_xori_tl(dst
, dst
, 0x1);
971 static inline void gen_branch2(DisasContext
*dc
, target_ulong pc1
,
972 target_ulong pc2
, TCGv r_cond
)
976 l1
= gen_new_label();
978 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
980 gen_goto_tb(dc
, 0, pc1
, pc1
+ 4);
983 gen_goto_tb(dc
, 1, pc2
, pc2
+ 4);
986 static inline void gen_branch_a(DisasContext
*dc
, target_ulong pc1
,
987 target_ulong pc2
, TCGv r_cond
)
991 l1
= gen_new_label();
993 tcg_gen_brcondi_tl(TCG_COND_EQ
, r_cond
, 0, l1
);
995 gen_goto_tb(dc
, 0, pc2
, pc1
);
998 gen_goto_tb(dc
, 1, pc2
+ 4, pc2
+ 8);
1001 static inline void gen_generic_branch(DisasContext
*dc
)
1003 TCGv npc0
= tcg_const_tl(dc
->jump_pc
[0]);
1004 TCGv npc1
= tcg_const_tl(dc
->jump_pc
[1]);
1005 TCGv zero
= tcg_const_tl(0);
1007 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_npc
, cpu_cond
, zero
, npc0
, npc1
);
1009 tcg_temp_free(npc0
);
1010 tcg_temp_free(npc1
);
1011 tcg_temp_free(zero
);
1014 /* call this function before using the condition register as it may
1015 have been set for a jump */
1016 static inline void flush_cond(DisasContext
*dc
)
1018 if (dc
->npc
== JUMP_PC
) {
1019 gen_generic_branch(dc
);
1020 dc
->npc
= DYNAMIC_PC
;
1024 static inline void save_npc(DisasContext
*dc
)
1026 if (dc
->npc
== JUMP_PC
) {
1027 gen_generic_branch(dc
);
1028 dc
->npc
= DYNAMIC_PC
;
1029 } else if (dc
->npc
!= DYNAMIC_PC
) {
1030 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
1034 static inline void update_psr(DisasContext
*dc
)
1036 if (dc
->cc_op
!= CC_OP_FLAGS
) {
1037 dc
->cc_op
= CC_OP_FLAGS
;
1038 gen_helper_compute_psr(cpu_env
);
1042 static inline void save_state(DisasContext
*dc
)
1044 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
1048 static inline void gen_mov_pc_npc(DisasContext
*dc
)
1050 if (dc
->npc
== JUMP_PC
) {
1051 gen_generic_branch(dc
);
1052 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1053 dc
->pc
= DYNAMIC_PC
;
1054 } else if (dc
->npc
== DYNAMIC_PC
) {
1055 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1056 dc
->pc
= DYNAMIC_PC
;
1062 static inline void gen_op_next_insn(void)
1064 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1065 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
1068 static void free_compare(DisasCompare
*cmp
)
1071 tcg_temp_free(cmp
->c1
);
1074 tcg_temp_free(cmp
->c2
);
1078 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1081 static int subcc_cond
[16] = {
1097 -1, /* no overflow */
1100 static int logic_cond
[16] = {
1102 TCG_COND_EQ
, /* eq: Z */
1103 TCG_COND_LE
, /* le: Z | (N ^ V) -> Z | N */
1104 TCG_COND_LT
, /* lt: N ^ V -> N */
1105 TCG_COND_EQ
, /* leu: C | Z -> Z */
1106 TCG_COND_NEVER
, /* ltu: C -> 0 */
1107 TCG_COND_LT
, /* neg: N */
1108 TCG_COND_NEVER
, /* vs: V -> 0 */
1110 TCG_COND_NE
, /* ne: !Z */
1111 TCG_COND_GT
, /* gt: !(Z | (N ^ V)) -> !(Z | N) */
1112 TCG_COND_GE
, /* ge: !(N ^ V) -> !N */
1113 TCG_COND_NE
, /* gtu: !(C | Z) -> !Z */
1114 TCG_COND_ALWAYS
, /* geu: !C -> 1 */
1115 TCG_COND_GE
, /* pos: !N */
1116 TCG_COND_ALWAYS
, /* vc: !V -> 1 */
1122 #ifdef TARGET_SPARC64
1132 switch (dc
->cc_op
) {
1134 cmp
->cond
= logic_cond
[cond
];
1136 cmp
->is_bool
= false;
1138 cmp
->c2
= tcg_const_tl(0);
1139 #ifdef TARGET_SPARC64
1142 cmp
->c1
= tcg_temp_new();
1143 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_dst
);
1148 cmp
->c1
= cpu_cc_dst
;
1155 cmp
->cond
= (cond
== 6 ? TCG_COND_LT
: TCG_COND_GE
);
1156 goto do_compare_dst_0
;
1158 case 7: /* overflow */
1159 case 15: /* !overflow */
1163 cmp
->cond
= subcc_cond
[cond
];
1164 cmp
->is_bool
= false;
1165 #ifdef TARGET_SPARC64
1167 /* Note that sign-extension works for unsigned compares as
1168 long as both operands are sign-extended. */
1169 cmp
->g1
= cmp
->g2
= false;
1170 cmp
->c1
= tcg_temp_new();
1171 cmp
->c2
= tcg_temp_new();
1172 tcg_gen_ext32s_tl(cmp
->c1
, cpu_cc_src
);
1173 tcg_gen_ext32s_tl(cmp
->c2
, cpu_cc_src2
);
1177 cmp
->g1
= cmp
->g2
= true;
1178 cmp
->c1
= cpu_cc_src
;
1179 cmp
->c2
= cpu_cc_src2
;
1186 gen_helper_compute_psr(cpu_env
);
1187 dc
->cc_op
= CC_OP_FLAGS
;
1191 /* We're going to generate a boolean result. */
1192 cmp
->cond
= TCG_COND_NE
;
1193 cmp
->is_bool
= true;
1194 cmp
->g1
= cmp
->g2
= false;
1195 cmp
->c1
= r_dst
= tcg_temp_new();
1196 cmp
->c2
= tcg_const_tl(0);
1200 gen_op_eval_bn(r_dst
);
1203 gen_op_eval_be(r_dst
, r_src
);
1206 gen_op_eval_ble(r_dst
, r_src
);
1209 gen_op_eval_bl(r_dst
, r_src
);
1212 gen_op_eval_bleu(r_dst
, r_src
);
1215 gen_op_eval_bcs(r_dst
, r_src
);
1218 gen_op_eval_bneg(r_dst
, r_src
);
1221 gen_op_eval_bvs(r_dst
, r_src
);
1224 gen_op_eval_ba(r_dst
);
1227 gen_op_eval_bne(r_dst
, r_src
);
1230 gen_op_eval_bg(r_dst
, r_src
);
1233 gen_op_eval_bge(r_dst
, r_src
);
1236 gen_op_eval_bgu(r_dst
, r_src
);
1239 gen_op_eval_bcc(r_dst
, r_src
);
1242 gen_op_eval_bpos(r_dst
, r_src
);
1245 gen_op_eval_bvc(r_dst
, r_src
);
1252 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1254 unsigned int offset
;
1257 /* For now we still generate a straight boolean result. */
1258 cmp
->cond
= TCG_COND_NE
;
1259 cmp
->is_bool
= true;
1260 cmp
->g1
= cmp
->g2
= false;
1261 cmp
->c1
= r_dst
= tcg_temp_new();
1262 cmp
->c2
= tcg_const_tl(0);
1282 gen_op_eval_bn(r_dst
);
1285 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1288 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1291 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1294 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1297 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1300 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1303 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1306 gen_op_eval_ba(r_dst
);
1309 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1312 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1315 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1318 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1321 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1324 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1327 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1332 static void gen_cond(TCGv r_dst
, unsigned int cc
, unsigned int cond
,
1336 gen_compare(&cmp
, cc
, cond
, dc
);
1338 /* The interface is to return a boolean in r_dst. */
1340 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1342 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1348 static void gen_fcond(TCGv r_dst
, unsigned int cc
, unsigned int cond
)
1351 gen_fcompare(&cmp
, cc
, cond
);
1353 /* The interface is to return a boolean in r_dst. */
1355 tcg_gen_mov_tl(r_dst
, cmp
.c1
);
1357 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1363 #ifdef TARGET_SPARC64
1365 static const int gen_tcg_cond_reg
[8] = {
1376 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1378 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1379 cmp
->is_bool
= false;
1383 cmp
->c2
= tcg_const_tl(0);
1386 static inline void gen_cond_reg(TCGv r_dst
, int cond
, TCGv r_src
)
1389 gen_compare_reg(&cmp
, cond
, r_src
);
1391 /* The interface is to return a boolean in r_dst. */
1392 tcg_gen_setcond_tl(cmp
.cond
, r_dst
, cmp
.c1
, cmp
.c2
);
1398 static void do_branch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1400 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1401 target_ulong target
= dc
->pc
+ offset
;
1403 #ifdef TARGET_SPARC64
1404 if (unlikely(AM_CHECK(dc
))) {
1405 target
&= 0xffffffffULL
;
1409 /* unconditional not taken */
1411 dc
->pc
= dc
->npc
+ 4;
1412 dc
->npc
= dc
->pc
+ 4;
1415 dc
->npc
= dc
->pc
+ 4;
1417 } else if (cond
== 0x8) {
1418 /* unconditional taken */
1421 dc
->npc
= dc
->pc
+ 4;
1425 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1429 gen_cond(cpu_cond
, cc
, cond
, dc
);
1431 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1435 dc
->jump_pc
[0] = target
;
1436 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1437 dc
->jump_pc
[1] = DYNAMIC_PC
;
1438 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1440 dc
->jump_pc
[1] = dc
->npc
+ 4;
1447 static void do_fbranch(DisasContext
*dc
, int32_t offset
, uint32_t insn
, int cc
)
1449 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
1450 target_ulong target
= dc
->pc
+ offset
;
1452 #ifdef TARGET_SPARC64
1453 if (unlikely(AM_CHECK(dc
))) {
1454 target
&= 0xffffffffULL
;
1458 /* unconditional not taken */
1460 dc
->pc
= dc
->npc
+ 4;
1461 dc
->npc
= dc
->pc
+ 4;
1464 dc
->npc
= dc
->pc
+ 4;
1466 } else if (cond
== 0x8) {
1467 /* unconditional taken */
1470 dc
->npc
= dc
->pc
+ 4;
1474 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1478 gen_fcond(cpu_cond
, cc
, cond
);
1480 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1484 dc
->jump_pc
[0] = target
;
1485 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1486 dc
->jump_pc
[1] = DYNAMIC_PC
;
1487 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1489 dc
->jump_pc
[1] = dc
->npc
+ 4;
1496 #ifdef TARGET_SPARC64
1497 static void do_branch_reg(DisasContext
*dc
, int32_t offset
, uint32_t insn
,
1500 unsigned int cond
= GET_FIELD_SP(insn
, 25, 27), a
= (insn
& (1 << 29));
1501 target_ulong target
= dc
->pc
+ offset
;
1503 if (unlikely(AM_CHECK(dc
))) {
1504 target
&= 0xffffffffULL
;
1507 gen_cond_reg(cpu_cond
, cond
, r_reg
);
1509 gen_branch_a(dc
, target
, dc
->npc
, cpu_cond
);
1513 dc
->jump_pc
[0] = target
;
1514 if (unlikely(dc
->npc
== DYNAMIC_PC
)) {
1515 dc
->jump_pc
[1] = DYNAMIC_PC
;
1516 tcg_gen_addi_tl(cpu_pc
, cpu_npc
, 4);
1518 dc
->jump_pc
[1] = dc
->npc
+ 4;
1524 static inline void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1528 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1531 gen_helper_fcmps_fcc1(cpu_env
, r_rs1
, r_rs2
);
1534 gen_helper_fcmps_fcc2(cpu_env
, r_rs1
, r_rs2
);
1537 gen_helper_fcmps_fcc3(cpu_env
, r_rs1
, r_rs2
);
1542 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1546 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1549 gen_helper_fcmpd_fcc1(cpu_env
, r_rs1
, r_rs2
);
1552 gen_helper_fcmpd_fcc2(cpu_env
, r_rs1
, r_rs2
);
1555 gen_helper_fcmpd_fcc3(cpu_env
, r_rs1
, r_rs2
);
1560 static inline void gen_op_fcmpq(int fccno
)
1564 gen_helper_fcmpq(cpu_env
);
1567 gen_helper_fcmpq_fcc1(cpu_env
);
1570 gen_helper_fcmpq_fcc2(cpu_env
);
1573 gen_helper_fcmpq_fcc3(cpu_env
);
1578 static inline void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1582 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1585 gen_helper_fcmpes_fcc1(cpu_env
, r_rs1
, r_rs2
);
1588 gen_helper_fcmpes_fcc2(cpu_env
, r_rs1
, r_rs2
);
1591 gen_helper_fcmpes_fcc3(cpu_env
, r_rs1
, r_rs2
);
1596 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1600 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1603 gen_helper_fcmped_fcc1(cpu_env
, r_rs1
, r_rs2
);
1606 gen_helper_fcmped_fcc2(cpu_env
, r_rs1
, r_rs2
);
1609 gen_helper_fcmped_fcc3(cpu_env
, r_rs1
, r_rs2
);
1614 static inline void gen_op_fcmpeq(int fccno
)
1618 gen_helper_fcmpeq(cpu_env
);
1621 gen_helper_fcmpeq_fcc1(cpu_env
);
1624 gen_helper_fcmpeq_fcc2(cpu_env
);
1627 gen_helper_fcmpeq_fcc3(cpu_env
);
1634 static inline void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1636 gen_helper_fcmps(cpu_env
, r_rs1
, r_rs2
);
1639 static inline void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1641 gen_helper_fcmpd(cpu_env
, r_rs1
, r_rs2
);
1644 static inline void gen_op_fcmpq(int fccno
)
1646 gen_helper_fcmpq(cpu_env
);
1649 static inline void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1651 gen_helper_fcmpes(cpu_env
, r_rs1
, r_rs2
);
1654 static inline void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1656 gen_helper_fcmped(cpu_env
, r_rs1
, r_rs2
);
1659 static inline void gen_op_fcmpeq(int fccno
)
1661 gen_helper_fcmpeq(cpu_env
);
1665 static inline void gen_op_fpexception_im(int fsr_flags
)
1669 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1670 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1671 r_const
= tcg_const_i32(TT_FP_EXCP
);
1672 gen_helper_raise_exception(cpu_env
, r_const
);
1673 tcg_temp_free_i32(r_const
);
1676 static int gen_trap_ifnofpu(DisasContext
*dc
)
1678 #if !defined(CONFIG_USER_ONLY)
1679 if (!dc
->fpu_enabled
) {
1683 r_const
= tcg_const_i32(TT_NFPU_INSN
);
1684 gen_helper_raise_exception(cpu_env
, r_const
);
1685 tcg_temp_free_i32(r_const
);
1693 static inline void gen_op_clear_ieee_excp_and_FTT(void)
1695 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1698 static inline void gen_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1699 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
))
1703 src
= gen_load_fpr_F(dc
, rs
);
1704 dst
= gen_dest_fpr_F(dc
);
1706 gen(dst
, cpu_env
, src
);
1708 gen_store_fpr_F(dc
, rd
, dst
);
1711 static inline void gen_ne_fop_FF(DisasContext
*dc
, int rd
, int rs
,
1712 void (*gen
)(TCGv_i32
, TCGv_i32
))
1716 src
= gen_load_fpr_F(dc
, rs
);
1717 dst
= gen_dest_fpr_F(dc
);
1721 gen_store_fpr_F(dc
, rd
, dst
);
1724 static inline void gen_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1725 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1727 TCGv_i32 dst
, src1
, src2
;
1729 src1
= gen_load_fpr_F(dc
, rs1
);
1730 src2
= gen_load_fpr_F(dc
, rs2
);
1731 dst
= gen_dest_fpr_F(dc
);
1733 gen(dst
, cpu_env
, src1
, src2
);
1735 gen_store_fpr_F(dc
, rd
, dst
);
1738 #ifdef TARGET_SPARC64
1739 static inline void gen_ne_fop_FFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1740 void (*gen
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
1742 TCGv_i32 dst
, src1
, src2
;
1744 src1
= gen_load_fpr_F(dc
, rs1
);
1745 src2
= gen_load_fpr_F(dc
, rs2
);
1746 dst
= gen_dest_fpr_F(dc
);
1748 gen(dst
, src1
, src2
);
1750 gen_store_fpr_F(dc
, rd
, dst
);
1754 static inline void gen_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1755 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
))
1759 src
= gen_load_fpr_D(dc
, rs
);
1760 dst
= gen_dest_fpr_D(dc
, rd
);
1762 gen(dst
, cpu_env
, src
);
1764 gen_store_fpr_D(dc
, rd
, dst
);
1767 #ifdef TARGET_SPARC64
1768 static inline void gen_ne_fop_DD(DisasContext
*dc
, int rd
, int rs
,
1769 void (*gen
)(TCGv_i64
, TCGv_i64
))
1773 src
= gen_load_fpr_D(dc
, rs
);
1774 dst
= gen_dest_fpr_D(dc
, rd
);
1778 gen_store_fpr_D(dc
, rd
, dst
);
1782 static inline void gen_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1783 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1785 TCGv_i64 dst
, src1
, src2
;
1787 src1
= gen_load_fpr_D(dc
, rs1
);
1788 src2
= gen_load_fpr_D(dc
, rs2
);
1789 dst
= gen_dest_fpr_D(dc
, rd
);
1791 gen(dst
, cpu_env
, src1
, src2
);
1793 gen_store_fpr_D(dc
, rd
, dst
);
1796 #ifdef TARGET_SPARC64
1797 static inline void gen_ne_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1798 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1800 TCGv_i64 dst
, src1
, src2
;
1802 src1
= gen_load_fpr_D(dc
, rs1
);
1803 src2
= gen_load_fpr_D(dc
, rs2
);
1804 dst
= gen_dest_fpr_D(dc
, rd
);
1806 gen(dst
, src1
, src2
);
1808 gen_store_fpr_D(dc
, rd
, dst
);
1811 static inline void gen_gsr_fop_DDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1812 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1814 TCGv_i64 dst
, src1
, src2
;
1816 src1
= gen_load_fpr_D(dc
, rs1
);
1817 src2
= gen_load_fpr_D(dc
, rs2
);
1818 dst
= gen_dest_fpr_D(dc
, rd
);
1820 gen(dst
, cpu_gsr
, src1
, src2
);
1822 gen_store_fpr_D(dc
, rd
, dst
);
1825 static inline void gen_ne_fop_DDDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1826 void (*gen
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
1828 TCGv_i64 dst
, src0
, src1
, src2
;
1830 src1
= gen_load_fpr_D(dc
, rs1
);
1831 src2
= gen_load_fpr_D(dc
, rs2
);
1832 src0
= gen_load_fpr_D(dc
, rd
);
1833 dst
= gen_dest_fpr_D(dc
, rd
);
1835 gen(dst
, src0
, src1
, src2
);
1837 gen_store_fpr_D(dc
, rd
, dst
);
1841 static inline void gen_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1842 void (*gen
)(TCGv_ptr
))
1844 gen_op_load_fpr_QT1(QFPREG(rs
));
1848 gen_op_store_QT0_fpr(QFPREG(rd
));
1849 gen_update_fprs_dirty(QFPREG(rd
));
1852 #ifdef TARGET_SPARC64
1853 static inline void gen_ne_fop_QQ(DisasContext
*dc
, int rd
, int rs
,
1854 void (*gen
)(TCGv_ptr
))
1856 gen_op_load_fpr_QT1(QFPREG(rs
));
1860 gen_op_store_QT0_fpr(QFPREG(rd
));
1861 gen_update_fprs_dirty(QFPREG(rd
));
1865 static inline void gen_fop_QQQ(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1866 void (*gen
)(TCGv_ptr
))
1868 gen_op_load_fpr_QT0(QFPREG(rs1
));
1869 gen_op_load_fpr_QT1(QFPREG(rs2
));
1873 gen_op_store_QT0_fpr(QFPREG(rd
));
1874 gen_update_fprs_dirty(QFPREG(rd
));
1877 static inline void gen_fop_DFF(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1878 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
, TCGv_i32
))
1881 TCGv_i32 src1
, src2
;
1883 src1
= gen_load_fpr_F(dc
, rs1
);
1884 src2
= gen_load_fpr_F(dc
, rs2
);
1885 dst
= gen_dest_fpr_D(dc
, rd
);
1887 gen(dst
, cpu_env
, src1
, src2
);
1889 gen_store_fpr_D(dc
, rd
, dst
);
1892 static inline void gen_fop_QDD(DisasContext
*dc
, int rd
, int rs1
, int rs2
,
1893 void (*gen
)(TCGv_ptr
, TCGv_i64
, TCGv_i64
))
1895 TCGv_i64 src1
, src2
;
1897 src1
= gen_load_fpr_D(dc
, rs1
);
1898 src2
= gen_load_fpr_D(dc
, rs2
);
1900 gen(cpu_env
, src1
, src2
);
1902 gen_op_store_QT0_fpr(QFPREG(rd
));
1903 gen_update_fprs_dirty(QFPREG(rd
));
1906 #ifdef TARGET_SPARC64
1907 static inline void gen_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1908 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1913 src
= gen_load_fpr_F(dc
, rs
);
1914 dst
= gen_dest_fpr_D(dc
, rd
);
1916 gen(dst
, cpu_env
, src
);
1918 gen_store_fpr_D(dc
, rd
, dst
);
1922 static inline void gen_ne_fop_DF(DisasContext
*dc
, int rd
, int rs
,
1923 void (*gen
)(TCGv_i64
, TCGv_ptr
, TCGv_i32
))
1928 src
= gen_load_fpr_F(dc
, rs
);
1929 dst
= gen_dest_fpr_D(dc
, rd
);
1931 gen(dst
, cpu_env
, src
);
1933 gen_store_fpr_D(dc
, rd
, dst
);
1936 static inline void gen_fop_FD(DisasContext
*dc
, int rd
, int rs
,
1937 void (*gen
)(TCGv_i32
, TCGv_ptr
, TCGv_i64
))
1942 src
= gen_load_fpr_D(dc
, rs
);
1943 dst
= gen_dest_fpr_F(dc
);
1945 gen(dst
, cpu_env
, src
);
1947 gen_store_fpr_F(dc
, rd
, dst
);
1950 static inline void gen_fop_FQ(DisasContext
*dc
, int rd
, int rs
,
1951 void (*gen
)(TCGv_i32
, TCGv_ptr
))
1955 gen_op_load_fpr_QT1(QFPREG(rs
));
1956 dst
= gen_dest_fpr_F(dc
);
1960 gen_store_fpr_F(dc
, rd
, dst
);
1963 static inline void gen_fop_DQ(DisasContext
*dc
, int rd
, int rs
,
1964 void (*gen
)(TCGv_i64
, TCGv_ptr
))
1968 gen_op_load_fpr_QT1(QFPREG(rs
));
1969 dst
= gen_dest_fpr_D(dc
, rd
);
1973 gen_store_fpr_D(dc
, rd
, dst
);
1976 static inline void gen_ne_fop_QF(DisasContext
*dc
, int rd
, int rs
,
1977 void (*gen
)(TCGv_ptr
, TCGv_i32
))
1981 src
= gen_load_fpr_F(dc
, rs
);
1985 gen_op_store_QT0_fpr(QFPREG(rd
));
1986 gen_update_fprs_dirty(QFPREG(rd
));
1989 static inline void gen_ne_fop_QD(DisasContext
*dc
, int rd
, int rs
,
1990 void (*gen
)(TCGv_ptr
, TCGv_i64
))
1994 src
= gen_load_fpr_D(dc
, rs
);
1998 gen_op_store_QT0_fpr(QFPREG(rd
));
1999 gen_update_fprs_dirty(QFPREG(rd
));
2003 #ifdef TARGET_SPARC64
2004 static inline TCGv_i32
gen_get_asi(int insn
, TCGv r_addr
)
2010 r_asi
= tcg_temp_new_i32();
2011 tcg_gen_mov_i32(r_asi
, cpu_asi
);
2013 asi
= GET_FIELD(insn
, 19, 26);
2014 r_asi
= tcg_const_i32(asi
);
2019 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2022 TCGv_i32 r_asi
, r_size
, r_sign
;
2024 r_asi
= gen_get_asi(insn
, addr
);
2025 r_size
= tcg_const_i32(size
);
2026 r_sign
= tcg_const_i32(sign
);
2027 gen_helper_ld_asi(dst
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2028 tcg_temp_free_i32(r_sign
);
2029 tcg_temp_free_i32(r_size
);
2030 tcg_temp_free_i32(r_asi
);
2033 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2035 TCGv_i32 r_asi
, r_size
;
2037 r_asi
= gen_get_asi(insn
, addr
);
2038 r_size
= tcg_const_i32(size
);
2039 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2040 tcg_temp_free_i32(r_size
);
2041 tcg_temp_free_i32(r_asi
);
2044 static inline void gen_ldf_asi(TCGv addr
, int insn
, int size
, int rd
)
2046 TCGv_i32 r_asi
, r_size
, r_rd
;
2048 r_asi
= gen_get_asi(insn
, addr
);
2049 r_size
= tcg_const_i32(size
);
2050 r_rd
= tcg_const_i32(rd
);
2051 gen_helper_ldf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2052 tcg_temp_free_i32(r_rd
);
2053 tcg_temp_free_i32(r_size
);
2054 tcg_temp_free_i32(r_asi
);
2057 static inline void gen_stf_asi(TCGv addr
, int insn
, int size
, int rd
)
2059 TCGv_i32 r_asi
, r_size
, r_rd
;
2061 r_asi
= gen_get_asi(insn
, addr
);
2062 r_size
= tcg_const_i32(size
);
2063 r_rd
= tcg_const_i32(rd
);
2064 gen_helper_stf_asi(cpu_env
, addr
, r_asi
, r_size
, r_rd
);
2065 tcg_temp_free_i32(r_rd
);
2066 tcg_temp_free_i32(r_size
);
2067 tcg_temp_free_i32(r_asi
);
2070 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2072 TCGv_i32 r_asi
, r_size
, r_sign
;
2073 TCGv_i64 t64
= tcg_temp_new_i64();
2075 r_asi
= gen_get_asi(insn
, addr
);
2076 r_size
= tcg_const_i32(4);
2077 r_sign
= tcg_const_i32(0);
2078 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2079 tcg_temp_free_i32(r_sign
);
2080 gen_helper_st_asi(cpu_env
, addr
, src
, r_asi
, r_size
);
2081 tcg_temp_free_i32(r_size
);
2082 tcg_temp_free_i32(r_asi
);
2083 tcg_gen_trunc_i64_tl(dst
, t64
);
2084 tcg_temp_free_i64(t64
);
2087 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2090 TCGv_i32 r_asi
, r_rd
;
2092 r_asi
= gen_get_asi(insn
, addr
);
2093 r_rd
= tcg_const_i32(rd
);
2094 gen_helper_ldda_asi(cpu_env
, addr
, r_asi
, r_rd
);
2095 tcg_temp_free_i32(r_rd
);
2096 tcg_temp_free_i32(r_asi
);
2099 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2102 TCGv_i32 r_asi
, r_size
;
2103 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2104 TCGv_i64 t64
= tcg_temp_new_i64();
2106 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2107 r_asi
= gen_get_asi(insn
, addr
);
2108 r_size
= tcg_const_i32(8);
2109 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2110 tcg_temp_free_i32(r_size
);
2111 tcg_temp_free_i32(r_asi
);
2112 tcg_temp_free_i64(t64
);
2115 static inline void gen_cas_asi(DisasContext
*dc
, TCGv addr
,
2116 TCGv val2
, int insn
, int rd
)
2118 TCGv val1
= gen_load_gpr(dc
, rd
);
2119 TCGv dst
= gen_dest_gpr(dc
, rd
);
2120 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2122 gen_helper_cas_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2123 tcg_temp_free_i32(r_asi
);
2124 gen_store_gpr(dc
, rd
, dst
);
2127 static inline void gen_casx_asi(DisasContext
*dc
, TCGv addr
,
2128 TCGv val2
, int insn
, int rd
)
2130 TCGv val1
= gen_load_gpr(dc
, rd
);
2131 TCGv dst
= gen_dest_gpr(dc
, rd
);
2132 TCGv_i32 r_asi
= gen_get_asi(insn
, addr
);
2134 gen_helper_casx_asi(dst
, cpu_env
, addr
, val1
, val2
, r_asi
);
2135 tcg_temp_free_i32(r_asi
);
2136 gen_store_gpr(dc
, rd
, dst
);
2139 #elif !defined(CONFIG_USER_ONLY)
2141 static inline void gen_ld_asi(TCGv dst
, TCGv addr
, int insn
, int size
,
2144 TCGv_i32 r_asi
, r_size
, r_sign
;
2145 TCGv_i64 t64
= tcg_temp_new_i64();
2147 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2148 r_size
= tcg_const_i32(size
);
2149 r_sign
= tcg_const_i32(sign
);
2150 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2151 tcg_temp_free_i32(r_sign
);
2152 tcg_temp_free_i32(r_size
);
2153 tcg_temp_free_i32(r_asi
);
2154 tcg_gen_trunc_i64_tl(dst
, t64
);
2155 tcg_temp_free_i64(t64
);
2158 static inline void gen_st_asi(TCGv src
, TCGv addr
, int insn
, int size
)
2160 TCGv_i32 r_asi
, r_size
;
2161 TCGv_i64 t64
= tcg_temp_new_i64();
2163 tcg_gen_extu_tl_i64(t64
, src
);
2164 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2165 r_size
= tcg_const_i32(size
);
2166 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2167 tcg_temp_free_i32(r_size
);
2168 tcg_temp_free_i32(r_asi
);
2169 tcg_temp_free_i64(t64
);
2172 static inline void gen_swap_asi(TCGv dst
, TCGv src
, TCGv addr
, int insn
)
2174 TCGv_i32 r_asi
, r_size
, r_sign
;
2175 TCGv_i64 r_val
, t64
;
2177 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2178 r_size
= tcg_const_i32(4);
2179 r_sign
= tcg_const_i32(0);
2180 t64
= tcg_temp_new_i64();
2181 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2182 tcg_temp_free(r_sign
);
2183 r_val
= tcg_temp_new_i64();
2184 tcg_gen_extu_tl_i64(r_val
, src
);
2185 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2186 tcg_temp_free_i64(r_val
);
2187 tcg_temp_free_i32(r_size
);
2188 tcg_temp_free_i32(r_asi
);
2189 tcg_gen_trunc_i64_tl(dst
, t64
);
2190 tcg_temp_free_i64(t64
);
2193 static inline void gen_ldda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2196 TCGv_i32 r_asi
, r_size
, r_sign
;
2200 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2201 r_size
= tcg_const_i32(8);
2202 r_sign
= tcg_const_i32(0);
2203 t64
= tcg_temp_new_i64();
2204 gen_helper_ld_asi(t64
, cpu_env
, addr
, r_asi
, r_size
, r_sign
);
2205 tcg_temp_free_i32(r_sign
);
2206 tcg_temp_free_i32(r_size
);
2207 tcg_temp_free_i32(r_asi
);
2209 t
= gen_dest_gpr(dc
, rd
+ 1);
2210 tcg_gen_trunc_i64_tl(t
, t64
);
2211 gen_store_gpr(dc
, rd
+ 1, t
);
2213 tcg_gen_shri_i64(t64
, t64
, 32);
2214 tcg_gen_trunc_i64_tl(hi
, t64
);
2215 tcg_temp_free_i64(t64
);
2216 gen_store_gpr(dc
, rd
, hi
);
2219 static inline void gen_stda_asi(DisasContext
*dc
, TCGv hi
, TCGv addr
,
2222 TCGv_i32 r_asi
, r_size
;
2223 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2224 TCGv_i64 t64
= tcg_temp_new_i64();
2226 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2227 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2228 r_size
= tcg_const_i32(8);
2229 gen_helper_st_asi(cpu_env
, addr
, t64
, r_asi
, r_size
);
2230 tcg_temp_free_i32(r_size
);
2231 tcg_temp_free_i32(r_asi
);
2232 tcg_temp_free_i64(t64
);
2236 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
2237 static inline void gen_ldstub_asi(TCGv dst
, TCGv addr
, int insn
)
2240 TCGv_i32 r_asi
, r_size
;
2242 gen_ld_asi(dst
, addr
, insn
, 1, 0);
2244 r_val
= tcg_const_i64(0xffULL
);
2245 r_asi
= tcg_const_i32(GET_FIELD(insn
, 19, 26));
2246 r_size
= tcg_const_i32(1);
2247 gen_helper_st_asi(cpu_env
, addr
, r_val
, r_asi
, r_size
);
2248 tcg_temp_free_i32(r_size
);
2249 tcg_temp_free_i32(r_asi
);
2250 tcg_temp_free_i64(r_val
);
2254 static TCGv
get_src1(DisasContext
*dc
, unsigned int insn
)
2256 unsigned int rs1
= GET_FIELD(insn
, 13, 17);
2257 return gen_load_gpr(dc
, rs1
);
2260 static TCGv
get_src2(DisasContext
*dc
, unsigned int insn
)
2262 if (IS_IMM
) { /* immediate */
2263 target_long simm
= GET_FIELDs(insn
, 19, 31);
2264 TCGv t
= get_temp_tl(dc
);
2265 tcg_gen_movi_tl(t
, simm
);
2267 } else { /* register */
2268 unsigned int rs2
= GET_FIELD(insn
, 27, 31);
2269 return gen_load_gpr(dc
, rs2
);
2273 #ifdef TARGET_SPARC64
2274 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2276 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2278 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2279 or fold the comparison down to 32 bits and use movcond_i32. Choose
2281 c32
= tcg_temp_new_i32();
2283 tcg_gen_trunc_i64_i32(c32
, cmp
->c1
);
2285 TCGv_i64 c64
= tcg_temp_new_i64();
2286 tcg_gen_setcond_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2287 tcg_gen_trunc_i64_i32(c32
, c64
);
2288 tcg_temp_free_i64(c64
);
2291 s1
= gen_load_fpr_F(dc
, rs
);
2292 s2
= gen_load_fpr_F(dc
, rd
);
2293 dst
= gen_dest_fpr_F(dc
);
2294 zero
= tcg_const_i32(0);
2296 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2298 tcg_temp_free_i32(c32
);
2299 tcg_temp_free_i32(zero
);
2300 gen_store_fpr_F(dc
, rd
, dst
);
2303 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2305 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2306 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, cmp
->c2
,
2307 gen_load_fpr_D(dc
, rs
),
2308 gen_load_fpr_D(dc
, rd
));
2309 gen_store_fpr_D(dc
, rd
, dst
);
2312 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2314 int qd
= QFPREG(rd
);
2315 int qs
= QFPREG(rs
);
2317 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, cmp
->c2
,
2318 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2319 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, cmp
->c2
,
2320 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2322 gen_update_fprs_dirty(qd
);
2325 static inline void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
, TCGv_ptr cpu_env
)
2327 TCGv_i32 r_tl
= tcg_temp_new_i32();
2329 /* load env->tl into r_tl */
2330 tcg_gen_ld_i32(r_tl
, cpu_env
, offsetof(CPUSPARCState
, tl
));
2332 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2333 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2335 /* calculate offset to current trap state from env->ts, reuse r_tl */
2336 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2337 tcg_gen_addi_ptr(r_tsptr
, cpu_env
, offsetof(CPUSPARCState
, ts
));
2339 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2341 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2342 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2343 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2344 tcg_temp_free_ptr(r_tl_tmp
);
2347 tcg_temp_free_i32(r_tl
);
2350 static void gen_edge(DisasContext
*dc
, TCGv dst
, TCGv s1
, TCGv s2
,
2351 int width
, bool cc
, bool left
)
2353 TCGv lo1
, lo2
, t1
, t2
;
2354 uint64_t amask
, tabl
, tabr
;
2355 int shift
, imask
, omask
;
2358 tcg_gen_mov_tl(cpu_cc_src
, s1
);
2359 tcg_gen_mov_tl(cpu_cc_src2
, s2
);
2360 tcg_gen_sub_tl(cpu_cc_dst
, s1
, s2
);
2361 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
2362 dc
->cc_op
= CC_OP_SUB
;
2365 /* Theory of operation: there are two tables, left and right (not to
2366 be confused with the left and right versions of the opcode). These
2367 are indexed by the low 3 bits of the inputs. To make things "easy",
2368 these tables are loaded into two constants, TABL and TABR below.
2369 The operation index = (input & imask) << shift calculates the index
2370 into the constant, while val = (table >> index) & omask calculates
2371 the value we're looking for. */
2378 tabl
= 0x80c0e0f0f8fcfeffULL
;
2379 tabr
= 0xff7f3f1f0f070301ULL
;
2381 tabl
= 0x0103070f1f3f7fffULL
;
2382 tabr
= 0xfffefcf8f0e0c080ULL
;
2402 tabl
= (2 << 2) | 3;
2403 tabr
= (3 << 2) | 1;
2405 tabl
= (1 << 2) | 3;
2406 tabr
= (3 << 2) | 2;
2413 lo1
= tcg_temp_new();
2414 lo2
= tcg_temp_new();
2415 tcg_gen_andi_tl(lo1
, s1
, imask
);
2416 tcg_gen_andi_tl(lo2
, s2
, imask
);
2417 tcg_gen_shli_tl(lo1
, lo1
, shift
);
2418 tcg_gen_shli_tl(lo2
, lo2
, shift
);
2420 t1
= tcg_const_tl(tabl
);
2421 t2
= tcg_const_tl(tabr
);
2422 tcg_gen_shr_tl(lo1
, t1
, lo1
);
2423 tcg_gen_shr_tl(lo2
, t2
, lo2
);
2424 tcg_gen_andi_tl(dst
, lo1
, omask
);
2425 tcg_gen_andi_tl(lo2
, lo2
, omask
);
2429 amask
&= 0xffffffffULL
;
2431 tcg_gen_andi_tl(s1
, s1
, amask
);
2432 tcg_gen_andi_tl(s2
, s2
, amask
);
2434 /* We want to compute
2435 dst = (s1 == s2 ? lo1 : lo1 & lo2).
2436 We've already done dst = lo1, so this reduces to
2437 dst &= (s1 == s2 ? -1 : lo2)
2442 tcg_gen_setcond_tl(TCG_COND_EQ
, t1
, s1
, s2
);
2443 tcg_gen_neg_tl(t1
, t1
);
2444 tcg_gen_or_tl(lo2
, lo2
, t1
);
2445 tcg_gen_and_tl(dst
, dst
, lo2
);
2453 static void gen_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
, bool left
)
2455 TCGv tmp
= tcg_temp_new();
2457 tcg_gen_add_tl(tmp
, s1
, s2
);
2458 tcg_gen_andi_tl(dst
, tmp
, -8);
2460 tcg_gen_neg_tl(tmp
, tmp
);
2462 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
2467 static void gen_faligndata(TCGv dst
, TCGv gsr
, TCGv s1
, TCGv s2
)
2471 t1
= tcg_temp_new();
2472 t2
= tcg_temp_new();
2473 shift
= tcg_temp_new();
2475 tcg_gen_andi_tl(shift
, gsr
, 7);
2476 tcg_gen_shli_tl(shift
, shift
, 3);
2477 tcg_gen_shl_tl(t1
, s1
, shift
);
2479 /* A shift of 64 does not produce 0 in TCG. Divide this into a
2480 shift of (up to 63) followed by a constant shift of 1. */
2481 tcg_gen_xori_tl(shift
, shift
, 63);
2482 tcg_gen_shr_tl(t2
, s2
, shift
);
2483 tcg_gen_shri_tl(t2
, t2
, 1);
2485 tcg_gen_or_tl(dst
, t1
, t2
);
2489 tcg_temp_free(shift
);
2493 #define CHECK_IU_FEATURE(dc, FEATURE) \
2494 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2496 #define CHECK_FPU_FEATURE(dc, FEATURE) \
2497 if (!((dc)->def->features & CPU_FEATURE_ ## FEATURE)) \
2500 /* before an instruction, dc->pc must be static */
2501 static void disas_sparc_insn(DisasContext
* dc
, unsigned int insn
)
2503 unsigned int opc
, rs1
, rs2
, rd
;
2504 TCGv cpu_src1
, cpu_src2
;
2505 TCGv_i32 cpu_src1_32
, cpu_src2_32
, cpu_dst_32
;
2506 TCGv_i64 cpu_src1_64
, cpu_src2_64
, cpu_dst_64
;
2509 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP
| CPU_LOG_TB_OP_OPT
))) {
2510 tcg_gen_debug_insn_start(dc
->pc
);
2513 opc
= GET_FIELD(insn
, 0, 1);
2515 rd
= GET_FIELD(insn
, 2, 6);
2518 case 0: /* branches/sethi */
2520 unsigned int xop
= GET_FIELD(insn
, 7, 9);
2523 #ifdef TARGET_SPARC64
2524 case 0x1: /* V9 BPcc */
2528 target
= GET_FIELD_SP(insn
, 0, 18);
2529 target
= sign_extend(target
, 19);
2531 cc
= GET_FIELD_SP(insn
, 20, 21);
2533 do_branch(dc
, target
, insn
, 0);
2535 do_branch(dc
, target
, insn
, 1);
2540 case 0x3: /* V9 BPr */
2542 target
= GET_FIELD_SP(insn
, 0, 13) |
2543 (GET_FIELD_SP(insn
, 20, 21) << 14);
2544 target
= sign_extend(target
, 16);
2546 cpu_src1
= get_src1(dc
, insn
);
2547 do_branch_reg(dc
, target
, insn
, cpu_src1
);
2550 case 0x5: /* V9 FBPcc */
2552 int cc
= GET_FIELD_SP(insn
, 20, 21);
2553 if (gen_trap_ifnofpu(dc
)) {
2556 target
= GET_FIELD_SP(insn
, 0, 18);
2557 target
= sign_extend(target
, 19);
2559 do_fbranch(dc
, target
, insn
, cc
);
2563 case 0x7: /* CBN+x */
2568 case 0x2: /* BN+x */
2570 target
= GET_FIELD(insn
, 10, 31);
2571 target
= sign_extend(target
, 22);
2573 do_branch(dc
, target
, insn
, 0);
2576 case 0x6: /* FBN+x */
2578 if (gen_trap_ifnofpu(dc
)) {
2581 target
= GET_FIELD(insn
, 10, 31);
2582 target
= sign_extend(target
, 22);
2584 do_fbranch(dc
, target
, insn
, 0);
2587 case 0x4: /* SETHI */
2588 /* Special-case %g0 because that's the canonical nop. */
2590 uint32_t value
= GET_FIELD(insn
, 10, 31);
2591 TCGv t
= gen_dest_gpr(dc
, rd
);
2592 tcg_gen_movi_tl(t
, value
<< 10);
2593 gen_store_gpr(dc
, rd
, t
);
2596 case 0x0: /* UNIMPL */
2605 target_long target
= GET_FIELDs(insn
, 2, 31) << 2;
2606 TCGv o7
= gen_dest_gpr(dc
, 15);
2608 tcg_gen_movi_tl(o7
, dc
->pc
);
2609 gen_store_gpr(dc
, 15, o7
);
2612 #ifdef TARGET_SPARC64
2613 if (unlikely(AM_CHECK(dc
))) {
2614 target
&= 0xffffffffULL
;
2620 case 2: /* FPU & Logical Operations */
2622 unsigned int xop
= GET_FIELD(insn
, 7, 12);
2623 if (xop
== 0x3a) { /* generate trap */
2624 int cond
= GET_FIELD(insn
, 3, 6);
2636 /* Conditional trap. */
2638 #ifdef TARGET_SPARC64
2640 int cc
= GET_FIELD_SP(insn
, 11, 12);
2642 gen_compare(&cmp
, 0, cond
, dc
);
2643 } else if (cc
== 2) {
2644 gen_compare(&cmp
, 1, cond
, dc
);
2649 gen_compare(&cmp
, 0, cond
, dc
);
2651 l1
= gen_new_label();
2652 tcg_gen_brcond_tl(tcg_invert_cond(cmp
.cond
),
2653 cmp
.c1
, cmp
.c2
, l1
);
2657 mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2658 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2660 /* Don't use the normal temporaries, as they may well have
2661 gone out of scope with the branch above. While we're
2662 doing that we might as well pre-truncate to 32-bit. */
2663 trap
= tcg_temp_new_i32();
2665 rs1
= GET_FIELD_SP(insn
, 14, 18);
2667 rs2
= GET_FIELD_SP(insn
, 0, 6);
2669 tcg_gen_movi_i32(trap
, (rs2
& mask
) + TT_TRAP
);
2670 /* Signal that the trap value is fully constant. */
2673 TCGv t1
= gen_load_gpr(dc
, rs1
);
2674 tcg_gen_trunc_tl_i32(trap
, t1
);
2675 tcg_gen_addi_i32(trap
, trap
, rs2
);
2679 rs2
= GET_FIELD_SP(insn
, 0, 4);
2680 t1
= gen_load_gpr(dc
, rs1
);
2681 t2
= gen_load_gpr(dc
, rs2
);
2682 tcg_gen_add_tl(t1
, t1
, t2
);
2683 tcg_gen_trunc_tl_i32(trap
, t1
);
2686 tcg_gen_andi_i32(trap
, trap
, mask
);
2687 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2690 gen_helper_raise_exception(cpu_env
, trap
);
2691 tcg_temp_free_i32(trap
);
2694 /* An unconditional trap ends the TB. */
2698 /* A conditional trap falls through to the next insn. */
2702 } else if (xop
== 0x28) {
2703 rs1
= GET_FIELD(insn
, 13, 17);
2706 #ifndef TARGET_SPARC64
2707 case 0x01 ... 0x0e: /* undefined in the SPARCv8
2708 manual, rdy on the microSPARC
2710 case 0x0f: /* stbar in the SPARCv8 manual,
2711 rdy on the microSPARC II */
2712 case 0x10 ... 0x1f: /* implementation-dependent in the
2713 SPARCv8 manual, rdy on the
2716 if (rs1
== 0x11 && dc
->def
->features
& CPU_FEATURE_ASR17
) {
2717 TCGv t
= gen_dest_gpr(dc
, rd
);
2718 /* Read Asr17 for a Leon3 monoprocessor */
2719 tcg_gen_movi_tl(t
, (1 << 8) | (dc
->def
->nwindows
- 1));
2720 gen_store_gpr(dc
, rd
, t
);
2724 gen_store_gpr(dc
, rd
, cpu_y
);
2726 #ifdef TARGET_SPARC64
2727 case 0x2: /* V9 rdccr */
2729 gen_helper_rdccr(cpu_dst
, cpu_env
);
2730 gen_store_gpr(dc
, rd
, cpu_dst
);
2732 case 0x3: /* V9 rdasi */
2733 tcg_gen_ext_i32_tl(cpu_dst
, cpu_asi
);
2734 gen_store_gpr(dc
, rd
, cpu_dst
);
2736 case 0x4: /* V9 rdtick */
2740 r_tickptr
= tcg_temp_new_ptr();
2741 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2742 offsetof(CPUSPARCState
, tick
));
2743 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2744 tcg_temp_free_ptr(r_tickptr
);
2745 gen_store_gpr(dc
, rd
, cpu_dst
);
2748 case 0x5: /* V9 rdpc */
2750 TCGv t
= gen_dest_gpr(dc
, rd
);
2751 if (unlikely(AM_CHECK(dc
))) {
2752 tcg_gen_movi_tl(t
, dc
->pc
& 0xffffffffULL
);
2754 tcg_gen_movi_tl(t
, dc
->pc
);
2756 gen_store_gpr(dc
, rd
, t
);
2759 case 0x6: /* V9 rdfprs */
2760 tcg_gen_ext_i32_tl(cpu_dst
, cpu_fprs
);
2761 gen_store_gpr(dc
, rd
, cpu_dst
);
2763 case 0xf: /* V9 membar */
2764 break; /* no effect */
2765 case 0x13: /* Graphics Status */
2766 if (gen_trap_ifnofpu(dc
)) {
2769 gen_store_gpr(dc
, rd
, cpu_gsr
);
2771 case 0x16: /* Softint */
2772 tcg_gen_ext_i32_tl(cpu_dst
, cpu_softint
);
2773 gen_store_gpr(dc
, rd
, cpu_dst
);
2775 case 0x17: /* Tick compare */
2776 gen_store_gpr(dc
, rd
, cpu_tick_cmpr
);
2778 case 0x18: /* System tick */
2782 r_tickptr
= tcg_temp_new_ptr();
2783 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2784 offsetof(CPUSPARCState
, stick
));
2785 gen_helper_tick_get_count(cpu_dst
, r_tickptr
);
2786 tcg_temp_free_ptr(r_tickptr
);
2787 gen_store_gpr(dc
, rd
, cpu_dst
);
2790 case 0x19: /* System tick compare */
2791 gen_store_gpr(dc
, rd
, cpu_stick_cmpr
);
2793 case 0x10: /* Performance Control */
2794 case 0x11: /* Performance Instrumentation Counter */
2795 case 0x12: /* Dispatch Control */
2796 case 0x14: /* Softint set, WO */
2797 case 0x15: /* Softint clear, WO */
2802 #if !defined(CONFIG_USER_ONLY)
2803 } else if (xop
== 0x29) { /* rdpsr / UA2005 rdhpr */
2804 #ifndef TARGET_SPARC64
2805 if (!supervisor(dc
)) {
2809 gen_helper_rdpsr(cpu_dst
, cpu_env
);
2811 CHECK_IU_FEATURE(dc
, HYPV
);
2812 if (!hypervisor(dc
))
2814 rs1
= GET_FIELD(insn
, 13, 17);
2817 // gen_op_rdhpstate();
2820 // gen_op_rdhtstate();
2823 tcg_gen_mov_tl(cpu_dst
, cpu_hintp
);
2826 tcg_gen_mov_tl(cpu_dst
, cpu_htba
);
2829 tcg_gen_mov_tl(cpu_dst
, cpu_hver
);
2831 case 31: // hstick_cmpr
2832 tcg_gen_mov_tl(cpu_dst
, cpu_hstick_cmpr
);
2838 gen_store_gpr(dc
, rd
, cpu_dst
);
2840 } else if (xop
== 0x2a) { /* rdwim / V9 rdpr */
2841 if (!supervisor(dc
))
2843 #ifdef TARGET_SPARC64
2844 rs1
= GET_FIELD(insn
, 13, 17);
2850 r_tsptr
= tcg_temp_new_ptr();
2851 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2852 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2853 offsetof(trap_state
, tpc
));
2854 tcg_temp_free_ptr(r_tsptr
);
2861 r_tsptr
= tcg_temp_new_ptr();
2862 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2863 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2864 offsetof(trap_state
, tnpc
));
2865 tcg_temp_free_ptr(r_tsptr
);
2872 r_tsptr
= tcg_temp_new_ptr();
2873 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2874 tcg_gen_ld_tl(cpu_tmp0
, r_tsptr
,
2875 offsetof(trap_state
, tstate
));
2876 tcg_temp_free_ptr(r_tsptr
);
2881 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2883 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
2884 tcg_gen_ld32s_tl(cpu_tmp0
, r_tsptr
,
2885 offsetof(trap_state
, tt
));
2886 tcg_temp_free_ptr(r_tsptr
);
2893 r_tickptr
= tcg_temp_new_ptr();
2894 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
2895 offsetof(CPUSPARCState
, tick
));
2896 gen_helper_tick_get_count(cpu_tmp0
, r_tickptr
);
2897 tcg_temp_free_ptr(r_tickptr
);
2901 tcg_gen_mov_tl(cpu_tmp0
, cpu_tbr
);
2904 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2905 offsetof(CPUSPARCState
, pstate
));
2908 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2909 offsetof(CPUSPARCState
, tl
));
2912 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2913 offsetof(CPUSPARCState
, psrpil
));
2916 gen_helper_rdcwp(cpu_tmp0
, cpu_env
);
2919 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2920 offsetof(CPUSPARCState
, cansave
));
2922 case 11: // canrestore
2923 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2924 offsetof(CPUSPARCState
, canrestore
));
2926 case 12: // cleanwin
2927 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2928 offsetof(CPUSPARCState
, cleanwin
));
2930 case 13: // otherwin
2931 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2932 offsetof(CPUSPARCState
, otherwin
));
2935 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2936 offsetof(CPUSPARCState
, wstate
));
2938 case 16: // UA2005 gl
2939 CHECK_IU_FEATURE(dc
, GL
);
2940 tcg_gen_ld32s_tl(cpu_tmp0
, cpu_env
,
2941 offsetof(CPUSPARCState
, gl
));
2943 case 26: // UA2005 strand status
2944 CHECK_IU_FEATURE(dc
, HYPV
);
2945 if (!hypervisor(dc
))
2947 tcg_gen_mov_tl(cpu_tmp0
, cpu_ssr
);
2950 tcg_gen_mov_tl(cpu_tmp0
, cpu_ver
);
2957 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_wim
);
2959 gen_store_gpr(dc
, rd
, cpu_tmp0
);
2961 } else if (xop
== 0x2b) { /* rdtbr / V9 flushw */
2962 #ifdef TARGET_SPARC64
2964 gen_helper_flushw(cpu_env
);
2966 if (!supervisor(dc
))
2968 gen_store_gpr(dc
, rd
, cpu_tbr
);
2972 } else if (xop
== 0x34) { /* FPU Operations */
2973 if (gen_trap_ifnofpu(dc
)) {
2976 gen_op_clear_ieee_excp_and_FTT();
2977 rs1
= GET_FIELD(insn
, 13, 17);
2978 rs2
= GET_FIELD(insn
, 27, 31);
2979 xop
= GET_FIELD(insn
, 18, 26);
2982 case 0x1: /* fmovs */
2983 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
2984 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
2986 case 0x5: /* fnegs */
2987 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fnegs
);
2989 case 0x9: /* fabss */
2990 gen_ne_fop_FF(dc
, rd
, rs2
, gen_helper_fabss
);
2992 case 0x29: /* fsqrts */
2993 CHECK_FPU_FEATURE(dc
, FSQRT
);
2994 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fsqrts
);
2996 case 0x2a: /* fsqrtd */
2997 CHECK_FPU_FEATURE(dc
, FSQRT
);
2998 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fsqrtd
);
3000 case 0x2b: /* fsqrtq */
3001 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3002 gen_fop_QQ(dc
, rd
, rs2
, gen_helper_fsqrtq
);
3004 case 0x41: /* fadds */
3005 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fadds
);
3007 case 0x42: /* faddd */
3008 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_faddd
);
3010 case 0x43: /* faddq */
3011 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3012 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_faddq
);
3014 case 0x45: /* fsubs */
3015 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fsubs
);
3017 case 0x46: /* fsubd */
3018 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fsubd
);
3020 case 0x47: /* fsubq */
3021 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3022 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fsubq
);
3024 case 0x49: /* fmuls */
3025 CHECK_FPU_FEATURE(dc
, FMUL
);
3026 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fmuls
);
3028 case 0x4a: /* fmuld */
3029 CHECK_FPU_FEATURE(dc
, FMUL
);
3030 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld
);
3032 case 0x4b: /* fmulq */
3033 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3034 CHECK_FPU_FEATURE(dc
, FMUL
);
3035 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fmulq
);
3037 case 0x4d: /* fdivs */
3038 gen_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fdivs
);
3040 case 0x4e: /* fdivd */
3041 gen_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fdivd
);
3043 case 0x4f: /* fdivq */
3044 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3045 gen_fop_QQQ(dc
, rd
, rs1
, rs2
, gen_helper_fdivq
);
3047 case 0x69: /* fsmuld */
3048 CHECK_FPU_FEATURE(dc
, FSMULD
);
3049 gen_fop_DFF(dc
, rd
, rs1
, rs2
, gen_helper_fsmuld
);
3051 case 0x6e: /* fdmulq */
3052 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3053 gen_fop_QDD(dc
, rd
, rs1
, rs2
, gen_helper_fdmulq
);
3055 case 0xc4: /* fitos */
3056 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fitos
);
3058 case 0xc6: /* fdtos */
3059 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtos
);
3061 case 0xc7: /* fqtos */
3062 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3063 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtos
);
3065 case 0xc8: /* fitod */
3066 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fitod
);
3068 case 0xc9: /* fstod */
3069 gen_ne_fop_DF(dc
, rd
, rs2
, gen_helper_fstod
);
3071 case 0xcb: /* fqtod */
3072 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3073 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtod
);
3075 case 0xcc: /* fitoq */
3076 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3077 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fitoq
);
3079 case 0xcd: /* fstoq */
3080 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3081 gen_ne_fop_QF(dc
, rd
, rs2
, gen_helper_fstoq
);
3083 case 0xce: /* fdtoq */
3084 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3085 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fdtoq
);
3087 case 0xd1: /* fstoi */
3088 gen_fop_FF(dc
, rd
, rs2
, gen_helper_fstoi
);
3090 case 0xd2: /* fdtoi */
3091 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fdtoi
);
3093 case 0xd3: /* fqtoi */
3094 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3095 gen_fop_FQ(dc
, rd
, rs2
, gen_helper_fqtoi
);
3097 #ifdef TARGET_SPARC64
3098 case 0x2: /* V9 fmovd */
3099 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
3100 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
3102 case 0x3: /* V9 fmovq */
3103 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3104 gen_move_Q(rd
, rs2
);
3106 case 0x6: /* V9 fnegd */
3107 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fnegd
);
3109 case 0x7: /* V9 fnegq */
3110 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3111 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fnegq
);
3113 case 0xa: /* V9 fabsd */
3114 gen_ne_fop_DD(dc
, rd
, rs2
, gen_helper_fabsd
);
3116 case 0xb: /* V9 fabsq */
3117 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3118 gen_ne_fop_QQ(dc
, rd
, rs2
, gen_helper_fabsq
);
3120 case 0x81: /* V9 fstox */
3121 gen_fop_DF(dc
, rd
, rs2
, gen_helper_fstox
);
3123 case 0x82: /* V9 fdtox */
3124 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fdtox
);
3126 case 0x83: /* V9 fqtox */
3127 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3128 gen_fop_DQ(dc
, rd
, rs2
, gen_helper_fqtox
);
3130 case 0x84: /* V9 fxtos */
3131 gen_fop_FD(dc
, rd
, rs2
, gen_helper_fxtos
);
3133 case 0x88: /* V9 fxtod */
3134 gen_fop_DD(dc
, rd
, rs2
, gen_helper_fxtod
);
3136 case 0x8c: /* V9 fxtoq */
3137 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3138 gen_ne_fop_QD(dc
, rd
, rs2
, gen_helper_fxtoq
);
3144 } else if (xop
== 0x35) { /* FPU Operations */
3145 #ifdef TARGET_SPARC64
3148 if (gen_trap_ifnofpu(dc
)) {
3151 gen_op_clear_ieee_excp_and_FTT();
3152 rs1
= GET_FIELD(insn
, 13, 17);
3153 rs2
= GET_FIELD(insn
, 27, 31);
3154 xop
= GET_FIELD(insn
, 18, 26);
3157 #ifdef TARGET_SPARC64
3161 cond = GET_FIELD_SP(insn, 14, 17); \
3162 cpu_src1 = get_src1(dc, insn); \
3163 gen_compare_reg(&cmp, cond, cpu_src1); \
3164 gen_fmov##sz(dc, &cmp, rd, rs2); \
3165 free_compare(&cmp); \
3168 if ((xop
& 0x11f) == 0x005) { /* V9 fmovsr */
3171 } else if ((xop
& 0x11f) == 0x006) { // V9 fmovdr
3174 } else if ((xop
& 0x11f) == 0x007) { // V9 fmovqr
3175 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3182 #ifdef TARGET_SPARC64
3183 #define FMOVCC(fcc, sz) \
3186 cond = GET_FIELD_SP(insn, 14, 17); \
3187 gen_fcompare(&cmp, fcc, cond); \
3188 gen_fmov##sz(dc, &cmp, rd, rs2); \
3189 free_compare(&cmp); \
3192 case 0x001: /* V9 fmovscc %fcc0 */
3195 case 0x002: /* V9 fmovdcc %fcc0 */
3198 case 0x003: /* V9 fmovqcc %fcc0 */
3199 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3202 case 0x041: /* V9 fmovscc %fcc1 */
3205 case 0x042: /* V9 fmovdcc %fcc1 */
3208 case 0x043: /* V9 fmovqcc %fcc1 */
3209 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3212 case 0x081: /* V9 fmovscc %fcc2 */
3215 case 0x082: /* V9 fmovdcc %fcc2 */
3218 case 0x083: /* V9 fmovqcc %fcc2 */
3219 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3222 case 0x0c1: /* V9 fmovscc %fcc3 */
3225 case 0x0c2: /* V9 fmovdcc %fcc3 */
3228 case 0x0c3: /* V9 fmovqcc %fcc3 */
3229 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3233 #define FMOVCC(xcc, sz) \
3236 cond = GET_FIELD_SP(insn, 14, 17); \
3237 gen_compare(&cmp, xcc, cond, dc); \
3238 gen_fmov##sz(dc, &cmp, rd, rs2); \
3239 free_compare(&cmp); \
3242 case 0x101: /* V9 fmovscc %icc */
3245 case 0x102: /* V9 fmovdcc %icc */
3248 case 0x103: /* V9 fmovqcc %icc */
3249 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3252 case 0x181: /* V9 fmovscc %xcc */
3255 case 0x182: /* V9 fmovdcc %xcc */
3258 case 0x183: /* V9 fmovqcc %xcc */
3259 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3264 case 0x51: /* fcmps, V9 %fcc */
3265 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3266 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3267 gen_op_fcmps(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3269 case 0x52: /* fcmpd, V9 %fcc */
3270 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3271 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3272 gen_op_fcmpd(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3274 case 0x53: /* fcmpq, V9 %fcc */
3275 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3276 gen_op_load_fpr_QT0(QFPREG(rs1
));
3277 gen_op_load_fpr_QT1(QFPREG(rs2
));
3278 gen_op_fcmpq(rd
& 3);
3280 case 0x55: /* fcmpes, V9 %fcc */
3281 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
3282 cpu_src2_32
= gen_load_fpr_F(dc
, rs2
);
3283 gen_op_fcmpes(rd
& 3, cpu_src1_32
, cpu_src2_32
);
3285 case 0x56: /* fcmped, V9 %fcc */
3286 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
3287 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
3288 gen_op_fcmped(rd
& 3, cpu_src1_64
, cpu_src2_64
);
3290 case 0x57: /* fcmpeq, V9 %fcc */
3291 CHECK_FPU_FEATURE(dc
, FLOAT128
);
3292 gen_op_load_fpr_QT0(QFPREG(rs1
));
3293 gen_op_load_fpr_QT1(QFPREG(rs2
));
3294 gen_op_fcmpeq(rd
& 3);
3299 } else if (xop
== 0x2) {
3300 TCGv dst
= gen_dest_gpr(dc
, rd
);
3301 rs1
= GET_FIELD(insn
, 13, 17);
3303 /* clr/mov shortcut : or %g0, x, y -> mov x, y */
3304 if (IS_IMM
) { /* immediate */
3305 simm
= GET_FIELDs(insn
, 19, 31);
3306 tcg_gen_movi_tl(dst
, simm
);
3307 gen_store_gpr(dc
, rd
, dst
);
3308 } else { /* register */
3309 rs2
= GET_FIELD(insn
, 27, 31);
3311 tcg_gen_movi_tl(dst
, 0);
3312 gen_store_gpr(dc
, rd
, dst
);
3314 cpu_src2
= gen_load_gpr(dc
, rs2
);
3315 gen_store_gpr(dc
, rd
, cpu_src2
);
3319 cpu_src1
= get_src1(dc
, insn
);
3320 if (IS_IMM
) { /* immediate */
3321 simm
= GET_FIELDs(insn
, 19, 31);
3322 tcg_gen_ori_tl(dst
, cpu_src1
, simm
);
3323 gen_store_gpr(dc
, rd
, dst
);
3324 } else { /* register */
3325 rs2
= GET_FIELD(insn
, 27, 31);
3327 /* mov shortcut: or x, %g0, y -> mov x, y */
3328 gen_store_gpr(dc
, rd
, cpu_src1
);
3330 cpu_src2
= gen_load_gpr(dc
, rs2
);
3331 tcg_gen_or_tl(dst
, cpu_src1
, cpu_src2
);
3332 gen_store_gpr(dc
, rd
, dst
);
3336 #ifdef TARGET_SPARC64
3337 } else if (xop
== 0x25) { /* sll, V9 sllx */
3338 cpu_src1
= get_src1(dc
, insn
);
3339 if (IS_IMM
) { /* immediate */
3340 simm
= GET_FIELDs(insn
, 20, 31);
3341 if (insn
& (1 << 12)) {
3342 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3344 tcg_gen_shli_i64(cpu_dst
, cpu_src1
, simm
& 0x1f);
3346 } else { /* register */
3347 rs2
= GET_FIELD(insn
, 27, 31);
3348 cpu_src2
= gen_load_gpr(dc
, rs2
);
3349 if (insn
& (1 << 12)) {
3350 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3352 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3354 tcg_gen_shl_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3356 gen_store_gpr(dc
, rd
, cpu_dst
);
3357 } else if (xop
== 0x26) { /* srl, V9 srlx */
3358 cpu_src1
= get_src1(dc
, insn
);
3359 if (IS_IMM
) { /* immediate */
3360 simm
= GET_FIELDs(insn
, 20, 31);
3361 if (insn
& (1 << 12)) {
3362 tcg_gen_shri_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3364 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3365 tcg_gen_shri_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3367 } else { /* register */
3368 rs2
= GET_FIELD(insn
, 27, 31);
3369 cpu_src2
= gen_load_gpr(dc
, rs2
);
3370 if (insn
& (1 << 12)) {
3371 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3372 tcg_gen_shr_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3374 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3375 tcg_gen_andi_i64(cpu_dst
, cpu_src1
, 0xffffffffULL
);
3376 tcg_gen_shr_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3379 gen_store_gpr(dc
, rd
, cpu_dst
);
3380 } else if (xop
== 0x27) { /* sra, V9 srax */
3381 cpu_src1
= get_src1(dc
, insn
);
3382 if (IS_IMM
) { /* immediate */
3383 simm
= GET_FIELDs(insn
, 20, 31);
3384 if (insn
& (1 << 12)) {
3385 tcg_gen_sari_i64(cpu_dst
, cpu_src1
, simm
& 0x3f);
3387 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3388 tcg_gen_sari_i64(cpu_dst
, cpu_dst
, simm
& 0x1f);
3390 } else { /* register */
3391 rs2
= GET_FIELD(insn
, 27, 31);
3392 cpu_src2
= gen_load_gpr(dc
, rs2
);
3393 if (insn
& (1 << 12)) {
3394 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x3f);
3395 tcg_gen_sar_i64(cpu_dst
, cpu_src1
, cpu_tmp0
);
3397 tcg_gen_andi_i64(cpu_tmp0
, cpu_src2
, 0x1f);
3398 tcg_gen_ext32s_i64(cpu_dst
, cpu_src1
);
3399 tcg_gen_sar_i64(cpu_dst
, cpu_dst
, cpu_tmp0
);
3402 gen_store_gpr(dc
, rd
, cpu_dst
);
3404 } else if (xop
< 0x36) {
3406 cpu_src1
= get_src1(dc
, insn
);
3407 cpu_src2
= get_src2(dc
, insn
);
3408 switch (xop
& ~0x10) {
3411 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3412 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3413 dc
->cc_op
= CC_OP_ADD
;
3415 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3419 tcg_gen_and_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3421 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3422 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3423 dc
->cc_op
= CC_OP_LOGIC
;
3427 tcg_gen_or_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3429 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3430 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3431 dc
->cc_op
= CC_OP_LOGIC
;
3435 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3437 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3438 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3439 dc
->cc_op
= CC_OP_LOGIC
;
3444 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3445 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_SUB
);
3446 dc
->cc_op
= CC_OP_SUB
;
3448 tcg_gen_sub_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3451 case 0x5: /* andn */
3452 tcg_gen_andc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3454 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3455 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3456 dc
->cc_op
= CC_OP_LOGIC
;
3460 tcg_gen_orc_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3462 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3463 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3464 dc
->cc_op
= CC_OP_LOGIC
;
3467 case 0x7: /* xorn */
3468 tcg_gen_eqv_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3470 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3471 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3472 dc
->cc_op
= CC_OP_LOGIC
;
3475 case 0x8: /* addx, V9 addc */
3476 gen_op_addx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3479 #ifdef TARGET_SPARC64
3480 case 0x9: /* V9 mulx */
3481 tcg_gen_mul_i64(cpu_dst
, cpu_src1
, cpu_src2
);
3484 case 0xa: /* umul */
3485 CHECK_IU_FEATURE(dc
, MUL
);
3486 gen_op_umul(cpu_dst
, cpu_src1
, cpu_src2
);
3488 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3489 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3490 dc
->cc_op
= CC_OP_LOGIC
;
3493 case 0xb: /* smul */
3494 CHECK_IU_FEATURE(dc
, MUL
);
3495 gen_op_smul(cpu_dst
, cpu_src1
, cpu_src2
);
3497 tcg_gen_mov_tl(cpu_cc_dst
, cpu_dst
);
3498 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_LOGIC
);
3499 dc
->cc_op
= CC_OP_LOGIC
;
3502 case 0xc: /* subx, V9 subc */
3503 gen_op_subx_int(dc
, cpu_dst
, cpu_src1
, cpu_src2
,
3506 #ifdef TARGET_SPARC64
3507 case 0xd: /* V9 udivx */
3508 gen_helper_udivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3511 case 0xe: /* udiv */
3512 CHECK_IU_FEATURE(dc
, DIV
);
3514 gen_helper_udiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3516 dc
->cc_op
= CC_OP_DIV
;
3518 gen_helper_udiv(cpu_dst
, cpu_env
, cpu_src1
,
3522 case 0xf: /* sdiv */
3523 CHECK_IU_FEATURE(dc
, DIV
);
3525 gen_helper_sdiv_cc(cpu_dst
, cpu_env
, cpu_src1
,
3527 dc
->cc_op
= CC_OP_DIV
;
3529 gen_helper_sdiv(cpu_dst
, cpu_env
, cpu_src1
,
3536 gen_store_gpr(dc
, rd
, cpu_dst
);
3538 cpu_src1
= get_src1(dc
, insn
);
3539 cpu_src2
= get_src2(dc
, insn
);
3541 case 0x20: /* taddcc */
3542 gen_op_add_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3543 gen_store_gpr(dc
, rd
, cpu_dst
);
3544 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TADD
);
3545 dc
->cc_op
= CC_OP_TADD
;
3547 case 0x21: /* tsubcc */
3548 gen_op_sub_cc(cpu_dst
, cpu_src1
, cpu_src2
);
3549 gen_store_gpr(dc
, rd
, cpu_dst
);
3550 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_TSUB
);
3551 dc
->cc_op
= CC_OP_TSUB
;
3553 case 0x22: /* taddcctv */
3554 gen_helper_taddcctv(cpu_dst
, cpu_env
,
3555 cpu_src1
, cpu_src2
);
3556 gen_store_gpr(dc
, rd
, cpu_dst
);
3557 dc
->cc_op
= CC_OP_TADDTV
;
3559 case 0x23: /* tsubcctv */
3560 gen_helper_tsubcctv(cpu_dst
, cpu_env
,
3561 cpu_src1
, cpu_src2
);
3562 gen_store_gpr(dc
, rd
, cpu_dst
);
3563 dc
->cc_op
= CC_OP_TSUBTV
;
3565 case 0x24: /* mulscc */
3567 gen_op_mulscc(cpu_dst
, cpu_src1
, cpu_src2
);
3568 gen_store_gpr(dc
, rd
, cpu_dst
);
3569 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_ADD
);
3570 dc
->cc_op
= CC_OP_ADD
;
3572 #ifndef TARGET_SPARC64
3573 case 0x25: /* sll */
3574 if (IS_IMM
) { /* immediate */
3575 simm
= GET_FIELDs(insn
, 20, 31);
3576 tcg_gen_shli_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3577 } else { /* register */
3578 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3579 tcg_gen_shl_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3581 gen_store_gpr(dc
, rd
, cpu_dst
);
3583 case 0x26: /* srl */
3584 if (IS_IMM
) { /* immediate */
3585 simm
= GET_FIELDs(insn
, 20, 31);
3586 tcg_gen_shri_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3587 } else { /* register */
3588 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3589 tcg_gen_shr_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3591 gen_store_gpr(dc
, rd
, cpu_dst
);
3593 case 0x27: /* sra */
3594 if (IS_IMM
) { /* immediate */
3595 simm
= GET_FIELDs(insn
, 20, 31);
3596 tcg_gen_sari_tl(cpu_dst
, cpu_src1
, simm
& 0x1f);
3597 } else { /* register */
3598 tcg_gen_andi_tl(cpu_tmp0
, cpu_src2
, 0x1f);
3599 tcg_gen_sar_tl(cpu_dst
, cpu_src1
, cpu_tmp0
);
3601 gen_store_gpr(dc
, rd
, cpu_dst
);
3608 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3609 tcg_gen_andi_tl(cpu_y
, cpu_tmp0
, 0xffffffff);
3611 #ifndef TARGET_SPARC64
3612 case 0x01 ... 0x0f: /* undefined in the
3616 case 0x10 ... 0x1f: /* implementation-dependent
3622 case 0x2: /* V9 wrccr */
3623 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3624 gen_helper_wrccr(cpu_env
, cpu_dst
);
3625 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3626 dc
->cc_op
= CC_OP_FLAGS
;
3628 case 0x3: /* V9 wrasi */
3629 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3630 tcg_gen_andi_tl(cpu_dst
, cpu_dst
, 0xff);
3631 tcg_gen_trunc_tl_i32(cpu_asi
, cpu_dst
);
3633 case 0x6: /* V9 wrfprs */
3634 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3635 tcg_gen_trunc_tl_i32(cpu_fprs
, cpu_dst
);
3641 case 0xf: /* V9 sir, nop if user */
3642 #if !defined(CONFIG_USER_ONLY)
3643 if (supervisor(dc
)) {
3648 case 0x13: /* Graphics Status */
3649 if (gen_trap_ifnofpu(dc
)) {
3652 tcg_gen_xor_tl(cpu_gsr
, cpu_src1
, cpu_src2
);
3654 case 0x14: /* Softint set */
3655 if (!supervisor(dc
))
3657 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3658 gen_helper_set_softint(cpu_env
, cpu_tmp0
);
3660 case 0x15: /* Softint clear */
3661 if (!supervisor(dc
))
3663 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3664 gen_helper_clear_softint(cpu_env
, cpu_tmp0
);
3666 case 0x16: /* Softint write */
3667 if (!supervisor(dc
))
3669 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3670 gen_helper_write_softint(cpu_env
, cpu_tmp0
);
3672 case 0x17: /* Tick compare */
3673 #if !defined(CONFIG_USER_ONLY)
3674 if (!supervisor(dc
))
3680 tcg_gen_xor_tl(cpu_tick_cmpr
, cpu_src1
,
3682 r_tickptr
= tcg_temp_new_ptr();
3683 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3684 offsetof(CPUSPARCState
, tick
));
3685 gen_helper_tick_set_limit(r_tickptr
,
3687 tcg_temp_free_ptr(r_tickptr
);
3690 case 0x18: /* System tick */
3691 #if !defined(CONFIG_USER_ONLY)
3692 if (!supervisor(dc
))
3698 tcg_gen_xor_tl(cpu_dst
, cpu_src1
,
3700 r_tickptr
= tcg_temp_new_ptr();
3701 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3702 offsetof(CPUSPARCState
, stick
));
3703 gen_helper_tick_set_count(r_tickptr
,
3705 tcg_temp_free_ptr(r_tickptr
);
3708 case 0x19: /* System tick compare */
3709 #if !defined(CONFIG_USER_ONLY)
3710 if (!supervisor(dc
))
3716 tcg_gen_xor_tl(cpu_stick_cmpr
, cpu_src1
,
3718 r_tickptr
= tcg_temp_new_ptr();
3719 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3720 offsetof(CPUSPARCState
, stick
));
3721 gen_helper_tick_set_limit(r_tickptr
,
3723 tcg_temp_free_ptr(r_tickptr
);
3727 case 0x10: /* Performance Control */
3728 case 0x11: /* Performance Instrumentation
3730 case 0x12: /* Dispatch Control */
3737 #if !defined(CONFIG_USER_ONLY)
3738 case 0x31: /* wrpsr, V9 saved, restored */
3740 if (!supervisor(dc
))
3742 #ifdef TARGET_SPARC64
3745 gen_helper_saved(cpu_env
);
3748 gen_helper_restored(cpu_env
);
3750 case 2: /* UA2005 allclean */
3751 case 3: /* UA2005 otherw */
3752 case 4: /* UA2005 normalw */
3753 case 5: /* UA2005 invalw */
3759 tcg_gen_xor_tl(cpu_dst
, cpu_src1
, cpu_src2
);
3760 gen_helper_wrpsr(cpu_env
, cpu_dst
);
3761 tcg_gen_movi_i32(cpu_cc_op
, CC_OP_FLAGS
);
3762 dc
->cc_op
= CC_OP_FLAGS
;
3770 case 0x32: /* wrwim, V9 wrpr */
3772 if (!supervisor(dc
))
3774 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3775 #ifdef TARGET_SPARC64
3781 r_tsptr
= tcg_temp_new_ptr();
3782 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3783 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3784 offsetof(trap_state
, tpc
));
3785 tcg_temp_free_ptr(r_tsptr
);
3792 r_tsptr
= tcg_temp_new_ptr();
3793 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3794 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3795 offsetof(trap_state
, tnpc
));
3796 tcg_temp_free_ptr(r_tsptr
);
3803 r_tsptr
= tcg_temp_new_ptr();
3804 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3805 tcg_gen_st_tl(cpu_tmp0
, r_tsptr
,
3806 offsetof(trap_state
,
3808 tcg_temp_free_ptr(r_tsptr
);
3815 r_tsptr
= tcg_temp_new_ptr();
3816 gen_load_trap_state_at_tl(r_tsptr
, cpu_env
);
3817 tcg_gen_st32_tl(cpu_tmp0
, r_tsptr
,
3818 offsetof(trap_state
, tt
));
3819 tcg_temp_free_ptr(r_tsptr
);
3826 r_tickptr
= tcg_temp_new_ptr();
3827 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3828 offsetof(CPUSPARCState
, tick
));
3829 gen_helper_tick_set_count(r_tickptr
,
3831 tcg_temp_free_ptr(r_tickptr
);
3835 tcg_gen_mov_tl(cpu_tbr
, cpu_tmp0
);
3839 gen_helper_wrpstate(cpu_env
, cpu_tmp0
);
3840 dc
->npc
= DYNAMIC_PC
;
3844 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3845 offsetof(CPUSPARCState
, tl
));
3846 dc
->npc
= DYNAMIC_PC
;
3849 gen_helper_wrpil(cpu_env
, cpu_tmp0
);
3852 gen_helper_wrcwp(cpu_env
, cpu_tmp0
);
3855 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3856 offsetof(CPUSPARCState
,
3859 case 11: // canrestore
3860 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3861 offsetof(CPUSPARCState
,
3864 case 12: // cleanwin
3865 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3866 offsetof(CPUSPARCState
,
3869 case 13: // otherwin
3870 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3871 offsetof(CPUSPARCState
,
3875 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3876 offsetof(CPUSPARCState
,
3879 case 16: // UA2005 gl
3880 CHECK_IU_FEATURE(dc
, GL
);
3881 tcg_gen_st32_tl(cpu_tmp0
, cpu_env
,
3882 offsetof(CPUSPARCState
, gl
));
3884 case 26: // UA2005 strand status
3885 CHECK_IU_FEATURE(dc
, HYPV
);
3886 if (!hypervisor(dc
))
3888 tcg_gen_mov_tl(cpu_ssr
, cpu_tmp0
);
3894 tcg_gen_trunc_tl_i32(cpu_wim
, cpu_tmp0
);
3895 if (dc
->def
->nwindows
!= 32) {
3896 tcg_gen_andi_tl(cpu_wim
, cpu_wim
,
3897 (1 << dc
->def
->nwindows
) - 1);
3902 case 0x33: /* wrtbr, UA2005 wrhpr */
3904 #ifndef TARGET_SPARC64
3905 if (!supervisor(dc
))
3907 tcg_gen_xor_tl(cpu_tbr
, cpu_src1
, cpu_src2
);
3909 CHECK_IU_FEATURE(dc
, HYPV
);
3910 if (!hypervisor(dc
))
3912 tcg_gen_xor_tl(cpu_tmp0
, cpu_src1
, cpu_src2
);
3915 // XXX gen_op_wrhpstate();
3922 // XXX gen_op_wrhtstate();
3925 tcg_gen_mov_tl(cpu_hintp
, cpu_tmp0
);
3928 tcg_gen_mov_tl(cpu_htba
, cpu_tmp0
);
3930 case 31: // hstick_cmpr
3934 tcg_gen_mov_tl(cpu_hstick_cmpr
, cpu_tmp0
);
3935 r_tickptr
= tcg_temp_new_ptr();
3936 tcg_gen_ld_ptr(r_tickptr
, cpu_env
,
3937 offsetof(CPUSPARCState
, hstick
));
3938 gen_helper_tick_set_limit(r_tickptr
,
3940 tcg_temp_free_ptr(r_tickptr
);
3943 case 6: // hver readonly
3951 #ifdef TARGET_SPARC64
3952 case 0x2c: /* V9 movcc */
3954 int cc
= GET_FIELD_SP(insn
, 11, 12);
3955 int cond
= GET_FIELD_SP(insn
, 14, 17);
3959 if (insn
& (1 << 18)) {
3961 gen_compare(&cmp
, 0, cond
, dc
);
3962 } else if (cc
== 2) {
3963 gen_compare(&cmp
, 1, cond
, dc
);
3968 gen_fcompare(&cmp
, cc
, cond
);
3971 /* The get_src2 above loaded the normal 13-bit
3972 immediate field, not the 11-bit field we have
3973 in movcc. But it did handle the reg case. */
3975 simm
= GET_FIELD_SPs(insn
, 0, 10);
3976 tcg_gen_movi_tl(cpu_src2
, simm
);
3979 dst
= gen_load_gpr(dc
, rd
);
3980 tcg_gen_movcond_tl(cmp
.cond
, dst
,
3984 gen_store_gpr(dc
, rd
, dst
);
3987 case 0x2d: /* V9 sdivx */
3988 gen_helper_sdivx(cpu_dst
, cpu_env
, cpu_src1
, cpu_src2
);
3989 gen_store_gpr(dc
, rd
, cpu_dst
);
3991 case 0x2e: /* V9 popc */
3992 gen_helper_popc(cpu_dst
, cpu_src2
);
3993 gen_store_gpr(dc
, rd
, cpu_dst
);
3995 case 0x2f: /* V9 movr */
3997 int cond
= GET_FIELD_SP(insn
, 10, 12);
4001 gen_compare_reg(&cmp
, cond
, cpu_src1
);
4003 /* The get_src2 above loaded the normal 13-bit
4004 immediate field, not the 10-bit field we have
4005 in movr. But it did handle the reg case. */
4007 simm
= GET_FIELD_SPs(insn
, 0, 9);
4008 tcg_gen_movi_tl(cpu_src2
, simm
);
4011 dst
= gen_load_gpr(dc
, rd
);
4012 tcg_gen_movcond_tl(cmp
.cond
, dst
,
4016 gen_store_gpr(dc
, rd
, dst
);
4024 } else if (xop
== 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
4025 #ifdef TARGET_SPARC64
4026 int opf
= GET_FIELD_SP(insn
, 5, 13);
4027 rs1
= GET_FIELD(insn
, 13, 17);
4028 rs2
= GET_FIELD(insn
, 27, 31);
4029 if (gen_trap_ifnofpu(dc
)) {
4034 case 0x000: /* VIS I edge8cc */
4035 CHECK_FPU_FEATURE(dc
, VIS1
);
4036 cpu_src1
= gen_load_gpr(dc
, rs1
);
4037 cpu_src2
= gen_load_gpr(dc
, rs2
);
4038 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 0);
4039 gen_store_gpr(dc
, rd
, cpu_dst
);
4041 case 0x001: /* VIS II edge8n */
4042 CHECK_FPU_FEATURE(dc
, VIS2
);
4043 cpu_src1
= gen_load_gpr(dc
, rs1
);
4044 cpu_src2
= gen_load_gpr(dc
, rs2
);
4045 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 0);
4046 gen_store_gpr(dc
, rd
, cpu_dst
);
4048 case 0x002: /* VIS I edge8lcc */
4049 CHECK_FPU_FEATURE(dc
, VIS1
);
4050 cpu_src1
= gen_load_gpr(dc
, rs1
);
4051 cpu_src2
= gen_load_gpr(dc
, rs2
);
4052 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 1, 1);
4053 gen_store_gpr(dc
, rd
, cpu_dst
);
4055 case 0x003: /* VIS II edge8ln */
4056 CHECK_FPU_FEATURE(dc
, VIS2
);
4057 cpu_src1
= gen_load_gpr(dc
, rs1
);
4058 cpu_src2
= gen_load_gpr(dc
, rs2
);
4059 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 8, 0, 1);
4060 gen_store_gpr(dc
, rd
, cpu_dst
);
4062 case 0x004: /* VIS I edge16cc */
4063 CHECK_FPU_FEATURE(dc
, VIS1
);
4064 cpu_src1
= gen_load_gpr(dc
, rs1
);
4065 cpu_src2
= gen_load_gpr(dc
, rs2
);
4066 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 0);
4067 gen_store_gpr(dc
, rd
, cpu_dst
);
4069 case 0x005: /* VIS II edge16n */
4070 CHECK_FPU_FEATURE(dc
, VIS2
);
4071 cpu_src1
= gen_load_gpr(dc
, rs1
);
4072 cpu_src2
= gen_load_gpr(dc
, rs2
);
4073 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 0);
4074 gen_store_gpr(dc
, rd
, cpu_dst
);
4076 case 0x006: /* VIS I edge16lcc */
4077 CHECK_FPU_FEATURE(dc
, VIS1
);
4078 cpu_src1
= gen_load_gpr(dc
, rs1
);
4079 cpu_src2
= gen_load_gpr(dc
, rs2
);
4080 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 1, 1);
4081 gen_store_gpr(dc
, rd
, cpu_dst
);
4083 case 0x007: /* VIS II edge16ln */
4084 CHECK_FPU_FEATURE(dc
, VIS2
);
4085 cpu_src1
= gen_load_gpr(dc
, rs1
);
4086 cpu_src2
= gen_load_gpr(dc
, rs2
);
4087 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 16, 0, 1);
4088 gen_store_gpr(dc
, rd
, cpu_dst
);
4090 case 0x008: /* VIS I edge32cc */
4091 CHECK_FPU_FEATURE(dc
, VIS1
);
4092 cpu_src1
= gen_load_gpr(dc
, rs1
);
4093 cpu_src2
= gen_load_gpr(dc
, rs2
);
4094 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 0);
4095 gen_store_gpr(dc
, rd
, cpu_dst
);
4097 case 0x009: /* VIS II edge32n */
4098 CHECK_FPU_FEATURE(dc
, VIS2
);
4099 cpu_src1
= gen_load_gpr(dc
, rs1
);
4100 cpu_src2
= gen_load_gpr(dc
, rs2
);
4101 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 0);
4102 gen_store_gpr(dc
, rd
, cpu_dst
);
4104 case 0x00a: /* VIS I edge32lcc */
4105 CHECK_FPU_FEATURE(dc
, VIS1
);
4106 cpu_src1
= gen_load_gpr(dc
, rs1
);
4107 cpu_src2
= gen_load_gpr(dc
, rs2
);
4108 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 1, 1);
4109 gen_store_gpr(dc
, rd
, cpu_dst
);
4111 case 0x00b: /* VIS II edge32ln */
4112 CHECK_FPU_FEATURE(dc
, VIS2
);
4113 cpu_src1
= gen_load_gpr(dc
, rs1
);
4114 cpu_src2
= gen_load_gpr(dc
, rs2
);
4115 gen_edge(dc
, cpu_dst
, cpu_src1
, cpu_src2
, 32, 0, 1);
4116 gen_store_gpr(dc
, rd
, cpu_dst
);
4118 case 0x010: /* VIS I array8 */
4119 CHECK_FPU_FEATURE(dc
, VIS1
);
4120 cpu_src1
= gen_load_gpr(dc
, rs1
);
4121 cpu_src2
= gen_load_gpr(dc
, rs2
);
4122 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4123 gen_store_gpr(dc
, rd
, cpu_dst
);
4125 case 0x012: /* VIS I array16 */
4126 CHECK_FPU_FEATURE(dc
, VIS1
);
4127 cpu_src1
= gen_load_gpr(dc
, rs1
);
4128 cpu_src2
= gen_load_gpr(dc
, rs2
);
4129 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4130 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 1);
4131 gen_store_gpr(dc
, rd
, cpu_dst
);
4133 case 0x014: /* VIS I array32 */
4134 CHECK_FPU_FEATURE(dc
, VIS1
);
4135 cpu_src1
= gen_load_gpr(dc
, rs1
);
4136 cpu_src2
= gen_load_gpr(dc
, rs2
);
4137 gen_helper_array8(cpu_dst
, cpu_src1
, cpu_src2
);
4138 tcg_gen_shli_i64(cpu_dst
, cpu_dst
, 2);
4139 gen_store_gpr(dc
, rd
, cpu_dst
);
4141 case 0x018: /* VIS I alignaddr */
4142 CHECK_FPU_FEATURE(dc
, VIS1
);
4143 cpu_src1
= gen_load_gpr(dc
, rs1
);
4144 cpu_src2
= gen_load_gpr(dc
, rs2
);
4145 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 0);
4146 gen_store_gpr(dc
, rd
, cpu_dst
);
4148 case 0x01a: /* VIS I alignaddrl */
4149 CHECK_FPU_FEATURE(dc
, VIS1
);
4150 cpu_src1
= gen_load_gpr(dc
, rs1
);
4151 cpu_src2
= gen_load_gpr(dc
, rs2
);
4152 gen_alignaddr(cpu_dst
, cpu_src1
, cpu_src2
, 1);
4153 gen_store_gpr(dc
, rd
, cpu_dst
);
4155 case 0x019: /* VIS II bmask */
4156 CHECK_FPU_FEATURE(dc
, VIS2
);
4157 cpu_src1
= gen_load_gpr(dc
, rs1
);
4158 cpu_src2
= gen_load_gpr(dc
, rs2
);
4159 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4160 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, cpu_dst
, 32, 32);
4161 gen_store_gpr(dc
, rd
, cpu_dst
);
4163 case 0x020: /* VIS I fcmple16 */
4164 CHECK_FPU_FEATURE(dc
, VIS1
);
4165 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4166 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4167 gen_helper_fcmple16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4168 gen_store_gpr(dc
, rd
, cpu_dst
);
4170 case 0x022: /* VIS I fcmpne16 */
4171 CHECK_FPU_FEATURE(dc
, VIS1
);
4172 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4173 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4174 gen_helper_fcmpne16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4175 gen_store_gpr(dc
, rd
, cpu_dst
);
4177 case 0x024: /* VIS I fcmple32 */
4178 CHECK_FPU_FEATURE(dc
, VIS1
);
4179 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4180 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4181 gen_helper_fcmple32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4182 gen_store_gpr(dc
, rd
, cpu_dst
);
4184 case 0x026: /* VIS I fcmpne32 */
4185 CHECK_FPU_FEATURE(dc
, VIS1
);
4186 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4187 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4188 gen_helper_fcmpne32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4189 gen_store_gpr(dc
, rd
, cpu_dst
);
4191 case 0x028: /* VIS I fcmpgt16 */
4192 CHECK_FPU_FEATURE(dc
, VIS1
);
4193 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4194 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4195 gen_helper_fcmpgt16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4196 gen_store_gpr(dc
, rd
, cpu_dst
);
4198 case 0x02a: /* VIS I fcmpeq16 */
4199 CHECK_FPU_FEATURE(dc
, VIS1
);
4200 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4201 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4202 gen_helper_fcmpeq16(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4203 gen_store_gpr(dc
, rd
, cpu_dst
);
4205 case 0x02c: /* VIS I fcmpgt32 */
4206 CHECK_FPU_FEATURE(dc
, VIS1
);
4207 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4208 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4209 gen_helper_fcmpgt32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4210 gen_store_gpr(dc
, rd
, cpu_dst
);
4212 case 0x02e: /* VIS I fcmpeq32 */
4213 CHECK_FPU_FEATURE(dc
, VIS1
);
4214 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4215 cpu_src2_64
= gen_load_fpr_D(dc
, rs2
);
4216 gen_helper_fcmpeq32(cpu_dst
, cpu_src1_64
, cpu_src2_64
);
4217 gen_store_gpr(dc
, rd
, cpu_dst
);
4219 case 0x031: /* VIS I fmul8x16 */
4220 CHECK_FPU_FEATURE(dc
, VIS1
);
4221 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16
);
4223 case 0x033: /* VIS I fmul8x16au */
4224 CHECK_FPU_FEATURE(dc
, VIS1
);
4225 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16au
);
4227 case 0x035: /* VIS I fmul8x16al */
4228 CHECK_FPU_FEATURE(dc
, VIS1
);
4229 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8x16al
);
4231 case 0x036: /* VIS I fmul8sux16 */
4232 CHECK_FPU_FEATURE(dc
, VIS1
);
4233 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8sux16
);
4235 case 0x037: /* VIS I fmul8ulx16 */
4236 CHECK_FPU_FEATURE(dc
, VIS1
);
4237 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmul8ulx16
);
4239 case 0x038: /* VIS I fmuld8sux16 */
4240 CHECK_FPU_FEATURE(dc
, VIS1
);
4241 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8sux16
);
4243 case 0x039: /* VIS I fmuld8ulx16 */
4244 CHECK_FPU_FEATURE(dc
, VIS1
);
4245 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fmuld8ulx16
);
4247 case 0x03a: /* VIS I fpack32 */
4248 CHECK_FPU_FEATURE(dc
, VIS1
);
4249 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpack32
);
4251 case 0x03b: /* VIS I fpack16 */
4252 CHECK_FPU_FEATURE(dc
, VIS1
);
4253 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4254 cpu_dst_32
= gen_dest_fpr_F(dc
);
4255 gen_helper_fpack16(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4256 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4258 case 0x03d: /* VIS I fpackfix */
4259 CHECK_FPU_FEATURE(dc
, VIS1
);
4260 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4261 cpu_dst_32
= gen_dest_fpr_F(dc
);
4262 gen_helper_fpackfix(cpu_dst_32
, cpu_gsr
, cpu_src1_64
);
4263 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4265 case 0x03e: /* VIS I pdist */
4266 CHECK_FPU_FEATURE(dc
, VIS1
);
4267 gen_ne_fop_DDDD(dc
, rd
, rs1
, rs2
, gen_helper_pdist
);
4269 case 0x048: /* VIS I faligndata */
4270 CHECK_FPU_FEATURE(dc
, VIS1
);
4271 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_faligndata
);
4273 case 0x04b: /* VIS I fpmerge */
4274 CHECK_FPU_FEATURE(dc
, VIS1
);
4275 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpmerge
);
4277 case 0x04c: /* VIS II bshuffle */
4278 CHECK_FPU_FEATURE(dc
, VIS2
);
4279 gen_gsr_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_bshuffle
);
4281 case 0x04d: /* VIS I fexpand */
4282 CHECK_FPU_FEATURE(dc
, VIS1
);
4283 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fexpand
);
4285 case 0x050: /* VIS I fpadd16 */
4286 CHECK_FPU_FEATURE(dc
, VIS1
);
4287 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16
);
4289 case 0x051: /* VIS I fpadd16s */
4290 CHECK_FPU_FEATURE(dc
, VIS1
);
4291 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpadd16s
);
4293 case 0x052: /* VIS I fpadd32 */
4294 CHECK_FPU_FEATURE(dc
, VIS1
);
4295 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpadd32
);
4297 case 0x053: /* VIS I fpadd32s */
4298 CHECK_FPU_FEATURE(dc
, VIS1
);
4299 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_add_i32
);
4301 case 0x054: /* VIS I fpsub16 */
4302 CHECK_FPU_FEATURE(dc
, VIS1
);
4303 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16
);
4305 case 0x055: /* VIS I fpsub16s */
4306 CHECK_FPU_FEATURE(dc
, VIS1
);
4307 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, gen_helper_fpsub16s
);
4309 case 0x056: /* VIS I fpsub32 */
4310 CHECK_FPU_FEATURE(dc
, VIS1
);
4311 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, gen_helper_fpsub32
);
4313 case 0x057: /* VIS I fpsub32s */
4314 CHECK_FPU_FEATURE(dc
, VIS1
);
4315 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_sub_i32
);
4317 case 0x060: /* VIS I fzero */
4318 CHECK_FPU_FEATURE(dc
, VIS1
);
4319 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4320 tcg_gen_movi_i64(cpu_dst_64
, 0);
4321 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4323 case 0x061: /* VIS I fzeros */
4324 CHECK_FPU_FEATURE(dc
, VIS1
);
4325 cpu_dst_32
= gen_dest_fpr_F(dc
);
4326 tcg_gen_movi_i32(cpu_dst_32
, 0);
4327 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4329 case 0x062: /* VIS I fnor */
4330 CHECK_FPU_FEATURE(dc
, VIS1
);
4331 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i64
);
4333 case 0x063: /* VIS I fnors */
4334 CHECK_FPU_FEATURE(dc
, VIS1
);
4335 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nor_i32
);
4337 case 0x064: /* VIS I fandnot2 */
4338 CHECK_FPU_FEATURE(dc
, VIS1
);
4339 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i64
);
4341 case 0x065: /* VIS I fandnot2s */
4342 CHECK_FPU_FEATURE(dc
, VIS1
);
4343 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_andc_i32
);
4345 case 0x066: /* VIS I fnot2 */
4346 CHECK_FPU_FEATURE(dc
, VIS1
);
4347 gen_ne_fop_DD(dc
, rd
, rs2
, tcg_gen_not_i64
);
4349 case 0x067: /* VIS I fnot2s */
4350 CHECK_FPU_FEATURE(dc
, VIS1
);
4351 gen_ne_fop_FF(dc
, rd
, rs2
, tcg_gen_not_i32
);
4353 case 0x068: /* VIS I fandnot1 */
4354 CHECK_FPU_FEATURE(dc
, VIS1
);
4355 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i64
);
4357 case 0x069: /* VIS I fandnot1s */
4358 CHECK_FPU_FEATURE(dc
, VIS1
);
4359 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_andc_i32
);
4361 case 0x06a: /* VIS I fnot1 */
4362 CHECK_FPU_FEATURE(dc
, VIS1
);
4363 gen_ne_fop_DD(dc
, rd
, rs1
, tcg_gen_not_i64
);
4365 case 0x06b: /* VIS I fnot1s */
4366 CHECK_FPU_FEATURE(dc
, VIS1
);
4367 gen_ne_fop_FF(dc
, rd
, rs1
, tcg_gen_not_i32
);
4369 case 0x06c: /* VIS I fxor */
4370 CHECK_FPU_FEATURE(dc
, VIS1
);
4371 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i64
);
4373 case 0x06d: /* VIS I fxors */
4374 CHECK_FPU_FEATURE(dc
, VIS1
);
4375 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_xor_i32
);
4377 case 0x06e: /* VIS I fnand */
4378 CHECK_FPU_FEATURE(dc
, VIS1
);
4379 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i64
);
4381 case 0x06f: /* VIS I fnands */
4382 CHECK_FPU_FEATURE(dc
, VIS1
);
4383 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_nand_i32
);
4385 case 0x070: /* VIS I fand */
4386 CHECK_FPU_FEATURE(dc
, VIS1
);
4387 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_and_i64
);
4389 case 0x071: /* VIS I fands */
4390 CHECK_FPU_FEATURE(dc
, VIS1
);
4391 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_and_i32
);
4393 case 0x072: /* VIS I fxnor */
4394 CHECK_FPU_FEATURE(dc
, VIS1
);
4395 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i64
);
4397 case 0x073: /* VIS I fxnors */
4398 CHECK_FPU_FEATURE(dc
, VIS1
);
4399 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_eqv_i32
);
4401 case 0x074: /* VIS I fsrc1 */
4402 CHECK_FPU_FEATURE(dc
, VIS1
);
4403 cpu_src1_64
= gen_load_fpr_D(dc
, rs1
);
4404 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4406 case 0x075: /* VIS I fsrc1s */
4407 CHECK_FPU_FEATURE(dc
, VIS1
);
4408 cpu_src1_32
= gen_load_fpr_F(dc
, rs1
);
4409 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4411 case 0x076: /* VIS I fornot2 */
4412 CHECK_FPU_FEATURE(dc
, VIS1
);
4413 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i64
);
4415 case 0x077: /* VIS I fornot2s */
4416 CHECK_FPU_FEATURE(dc
, VIS1
);
4417 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_orc_i32
);
4419 case 0x078: /* VIS I fsrc2 */
4420 CHECK_FPU_FEATURE(dc
, VIS1
);
4421 cpu_src1_64
= gen_load_fpr_D(dc
, rs2
);
4422 gen_store_fpr_D(dc
, rd
, cpu_src1_64
);
4424 case 0x079: /* VIS I fsrc2s */
4425 CHECK_FPU_FEATURE(dc
, VIS1
);
4426 cpu_src1_32
= gen_load_fpr_F(dc
, rs2
);
4427 gen_store_fpr_F(dc
, rd
, cpu_src1_32
);
4429 case 0x07a: /* VIS I fornot1 */
4430 CHECK_FPU_FEATURE(dc
, VIS1
);
4431 gen_ne_fop_DDD(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i64
);
4433 case 0x07b: /* VIS I fornot1s */
4434 CHECK_FPU_FEATURE(dc
, VIS1
);
4435 gen_ne_fop_FFF(dc
, rd
, rs2
, rs1
, tcg_gen_orc_i32
);
4437 case 0x07c: /* VIS I for */
4438 CHECK_FPU_FEATURE(dc
, VIS1
);
4439 gen_ne_fop_DDD(dc
, rd
, rs1
, rs2
, tcg_gen_or_i64
);
4441 case 0x07d: /* VIS I fors */
4442 CHECK_FPU_FEATURE(dc
, VIS1
);
4443 gen_ne_fop_FFF(dc
, rd
, rs1
, rs2
, tcg_gen_or_i32
);
4445 case 0x07e: /* VIS I fone */
4446 CHECK_FPU_FEATURE(dc
, VIS1
);
4447 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4448 tcg_gen_movi_i64(cpu_dst_64
, -1);
4449 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4451 case 0x07f: /* VIS I fones */
4452 CHECK_FPU_FEATURE(dc
, VIS1
);
4453 cpu_dst_32
= gen_dest_fpr_F(dc
);
4454 tcg_gen_movi_i32(cpu_dst_32
, -1);
4455 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4457 case 0x080: /* VIS I shutdown */
4458 case 0x081: /* VIS II siam */
4467 } else if (xop
== 0x37) { /* V8 CPop2, V9 impdep2 */
4468 #ifdef TARGET_SPARC64
4473 #ifdef TARGET_SPARC64
4474 } else if (xop
== 0x39) { /* V9 return */
4478 cpu_src1
= get_src1(dc
, insn
);
4479 if (IS_IMM
) { /* immediate */
4480 simm
= GET_FIELDs(insn
, 19, 31);
4481 tcg_gen_addi_tl(cpu_dst
, cpu_src1
, simm
);
4482 } else { /* register */
4483 rs2
= GET_FIELD(insn
, 27, 31);
4485 cpu_src2
= gen_load_gpr(dc
, rs2
);
4486 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4488 tcg_gen_mov_tl(cpu_dst
, cpu_src1
);
4491 gen_helper_restore(cpu_env
);
4493 r_const
= tcg_const_i32(3);
4494 gen_helper_check_align(cpu_env
, cpu_dst
, r_const
);
4495 tcg_temp_free_i32(r_const
);
4496 tcg_gen_mov_tl(cpu_npc
, cpu_dst
);
4497 dc
->npc
= DYNAMIC_PC
;
4501 cpu_src1
= get_src1(dc
, insn
);
4502 if (IS_IMM
) { /* immediate */
4503 simm
= GET_FIELDs(insn
, 19, 31);
4504 tcg_gen_addi_tl(cpu_dst
, cpu_src1
, simm
);
4505 } else { /* register */
4506 rs2
= GET_FIELD(insn
, 27, 31);
4508 cpu_src2
= gen_load_gpr(dc
, rs2
);
4509 tcg_gen_add_tl(cpu_dst
, cpu_src1
, cpu_src2
);
4511 tcg_gen_mov_tl(cpu_dst
, cpu_src1
);
4515 case 0x38: /* jmpl */
4520 t
= gen_dest_gpr(dc
, rd
);
4521 tcg_gen_movi_tl(t
, dc
->pc
);
4522 gen_store_gpr(dc
, rd
, t
);
4524 r_const
= tcg_const_i32(3);
4525 gen_helper_check_align(cpu_env
, cpu_dst
, r_const
);
4526 tcg_temp_free_i32(r_const
);
4527 gen_address_mask(dc
, cpu_dst
);
4528 tcg_gen_mov_tl(cpu_npc
, cpu_dst
);
4529 dc
->npc
= DYNAMIC_PC
;
4532 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
4533 case 0x39: /* rett, V9 return */
4537 if (!supervisor(dc
))
4540 r_const
= tcg_const_i32(3);
4541 gen_helper_check_align(cpu_env
, cpu_dst
, r_const
);
4542 tcg_temp_free_i32(r_const
);
4543 tcg_gen_mov_tl(cpu_npc
, cpu_dst
);
4544 dc
->npc
= DYNAMIC_PC
;
4545 gen_helper_rett(cpu_env
);
4549 case 0x3b: /* flush */
4550 if (!((dc
)->def
->features
& CPU_FEATURE_FLUSH
))
4554 case 0x3c: /* save */
4556 gen_helper_save(cpu_env
);
4557 gen_store_gpr(dc
, rd
, cpu_dst
);
4559 case 0x3d: /* restore */
4561 gen_helper_restore(cpu_env
);
4562 gen_store_gpr(dc
, rd
, cpu_dst
);
4564 #if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
4565 case 0x3e: /* V9 done/retry */
4569 if (!supervisor(dc
))
4571 dc
->npc
= DYNAMIC_PC
;
4572 dc
->pc
= DYNAMIC_PC
;
4573 gen_helper_done(cpu_env
);
4576 if (!supervisor(dc
))
4578 dc
->npc
= DYNAMIC_PC
;
4579 dc
->pc
= DYNAMIC_PC
;
4580 gen_helper_retry(cpu_env
);
4595 case 3: /* load/store instructions */
4597 unsigned int xop
= GET_FIELD(insn
, 7, 12);
4598 /* ??? gen_address_mask prevents us from using a source
4599 register directly. Always generate a temporary. */
4600 TCGv cpu_addr
= get_temp_tl(dc
);
4602 tcg_gen_mov_tl(cpu_addr
, get_src1(dc
, insn
));
4603 if (xop
== 0x3c || xop
== 0x3e) {
4604 /* V9 casa/casxa : no offset */
4605 } else if (IS_IMM
) { /* immediate */
4606 simm
= GET_FIELDs(insn
, 19, 31);
4608 tcg_gen_addi_tl(cpu_addr
, cpu_addr
, simm
);
4610 } else { /* register */
4611 rs2
= GET_FIELD(insn
, 27, 31);
4613 tcg_gen_add_tl(cpu_addr
, cpu_addr
, gen_load_gpr(dc
, rs2
));
4616 if (xop
< 4 || (xop
> 7 && xop
< 0x14 && xop
!= 0x0e) ||
4617 (xop
> 0x17 && xop
<= 0x1d ) ||
4618 (xop
> 0x2c && xop
<= 0x33) || xop
== 0x1f || xop
== 0x3d) {
4619 TCGv cpu_val
= gen_dest_gpr(dc
, rd
);
4622 case 0x0: /* ld, V9 lduw, load unsigned word */
4623 gen_address_mask(dc
, cpu_addr
);
4624 tcg_gen_qemu_ld32u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4626 case 0x1: /* ldub, load unsigned byte */
4627 gen_address_mask(dc
, cpu_addr
);
4628 tcg_gen_qemu_ld8u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4630 case 0x2: /* lduh, load unsigned halfword */
4631 gen_address_mask(dc
, cpu_addr
);
4632 tcg_gen_qemu_ld16u(cpu_val
, cpu_addr
, dc
->mem_idx
);
4634 case 0x3: /* ldd, load double word */
4642 r_const
= tcg_const_i32(7);
4643 /* XXX remove alignment check */
4644 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4645 tcg_temp_free_i32(r_const
);
4646 gen_address_mask(dc
, cpu_addr
);
4647 t64
= tcg_temp_new_i64();
4648 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4649 tcg_gen_trunc_i64_tl(cpu_tmp0
, t64
);
4650 tcg_gen_andi_tl(cpu_tmp0
, cpu_tmp0
, 0xffffffffULL
);
4651 gen_store_gpr(dc
, rd
+ 1, cpu_tmp0
);
4652 tcg_gen_shri_i64(t64
, t64
, 32);
4653 tcg_gen_trunc_i64_tl(cpu_val
, t64
);
4654 tcg_temp_free_i64(t64
);
4655 tcg_gen_andi_tl(cpu_val
, cpu_val
, 0xffffffffULL
);
4658 case 0x9: /* ldsb, load signed byte */
4659 gen_address_mask(dc
, cpu_addr
);
4660 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4662 case 0xa: /* ldsh, load signed halfword */
4663 gen_address_mask(dc
, cpu_addr
);
4664 tcg_gen_qemu_ld16s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4666 case 0xd: /* ldstub -- XXX: should be atomically */
4670 gen_address_mask(dc
, cpu_addr
);
4671 tcg_gen_qemu_ld8s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4672 r_const
= tcg_const_tl(0xff);
4673 tcg_gen_qemu_st8(r_const
, cpu_addr
, dc
->mem_idx
);
4674 tcg_temp_free(r_const
);
4677 case 0x0f: /* swap, swap register with memory. Also
4679 CHECK_IU_FEATURE(dc
, SWAP
);
4680 cpu_src1
= gen_load_gpr(dc
, rd
);
4681 gen_address_mask(dc
, cpu_addr
);
4682 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4683 tcg_gen_qemu_st32(cpu_src1
, cpu_addr
, dc
->mem_idx
);
4684 tcg_gen_mov_tl(cpu_val
, cpu_tmp0
);
4686 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4687 case 0x10: /* lda, V9 lduwa, load word alternate */
4688 #ifndef TARGET_SPARC64
4691 if (!supervisor(dc
))
4695 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 0);
4697 case 0x11: /* lduba, load unsigned byte alternate */
4698 #ifndef TARGET_SPARC64
4701 if (!supervisor(dc
))
4705 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 0);
4707 case 0x12: /* lduha, load unsigned halfword alternate */
4708 #ifndef TARGET_SPARC64
4711 if (!supervisor(dc
))
4715 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 0);
4717 case 0x13: /* ldda, load double word alternate */
4718 #ifndef TARGET_SPARC64
4721 if (!supervisor(dc
))
4727 gen_ldda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4729 case 0x19: /* ldsba, load signed byte alternate */
4730 #ifndef TARGET_SPARC64
4733 if (!supervisor(dc
))
4737 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 1, 1);
4739 case 0x1a: /* ldsha, load signed halfword alternate */
4740 #ifndef TARGET_SPARC64
4743 if (!supervisor(dc
))
4747 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 2, 1);
4749 case 0x1d: /* ldstuba -- XXX: should be atomically */
4750 #ifndef TARGET_SPARC64
4753 if (!supervisor(dc
))
4757 gen_ldstub_asi(cpu_val
, cpu_addr
, insn
);
4759 case 0x1f: /* swapa, swap reg with alt. memory. Also
4761 CHECK_IU_FEATURE(dc
, SWAP
);
4762 #ifndef TARGET_SPARC64
4765 if (!supervisor(dc
))
4769 cpu_src1
= gen_load_gpr(dc
, rd
);
4770 gen_swap_asi(cpu_val
, cpu_src1
, cpu_addr
, insn
);
4773 #ifndef TARGET_SPARC64
4774 case 0x30: /* ldc */
4775 case 0x31: /* ldcsr */
4776 case 0x33: /* lddc */
4780 #ifdef TARGET_SPARC64
4781 case 0x08: /* V9 ldsw */
4782 gen_address_mask(dc
, cpu_addr
);
4783 tcg_gen_qemu_ld32s(cpu_val
, cpu_addr
, dc
->mem_idx
);
4785 case 0x0b: /* V9 ldx */
4786 gen_address_mask(dc
, cpu_addr
);
4787 tcg_gen_qemu_ld64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4789 case 0x18: /* V9 ldswa */
4791 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 4, 1);
4793 case 0x1b: /* V9 ldxa */
4795 gen_ld_asi(cpu_val
, cpu_addr
, insn
, 8, 0);
4797 case 0x2d: /* V9 prefetch, no effect */
4799 case 0x30: /* V9 ldfa */
4800 if (gen_trap_ifnofpu(dc
)) {
4804 gen_ldf_asi(cpu_addr
, insn
, 4, rd
);
4805 gen_update_fprs_dirty(rd
);
4807 case 0x33: /* V9 lddfa */
4808 if (gen_trap_ifnofpu(dc
)) {
4812 gen_ldf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
4813 gen_update_fprs_dirty(DFPREG(rd
));
4815 case 0x3d: /* V9 prefetcha, no effect */
4817 case 0x32: /* V9 ldqfa */
4818 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4819 if (gen_trap_ifnofpu(dc
)) {
4823 gen_ldf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
4824 gen_update_fprs_dirty(QFPREG(rd
));
4830 gen_store_gpr(dc
, rd
, cpu_val
);
4831 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4834 } else if (xop
>= 0x20 && xop
< 0x24) {
4835 if (gen_trap_ifnofpu(dc
)) {
4840 case 0x20: /* ldf, load fpreg */
4841 gen_address_mask(dc
, cpu_addr
);
4842 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4843 cpu_dst_32
= gen_dest_fpr_F(dc
);
4844 tcg_gen_trunc_tl_i32(cpu_dst_32
, cpu_tmp0
);
4845 gen_store_fpr_F(dc
, rd
, cpu_dst_32
);
4847 case 0x21: /* ldfsr, V9 ldxfsr */
4848 #ifdef TARGET_SPARC64
4849 gen_address_mask(dc
, cpu_addr
);
4851 TCGv_i64 t64
= tcg_temp_new_i64();
4852 tcg_gen_qemu_ld64(t64
, cpu_addr
, dc
->mem_idx
);
4853 gen_helper_ldxfsr(cpu_env
, t64
);
4854 tcg_temp_free_i64(t64
);
4859 TCGv_i32 t32
= get_temp_i32(dc
);
4860 tcg_gen_qemu_ld32u(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
4861 tcg_gen_trunc_tl_i32(t32
, cpu_tmp0
);
4862 gen_helper_ldfsr(cpu_env
, t32
);
4865 case 0x22: /* ldqf, load quad fpreg */
4869 CHECK_FPU_FEATURE(dc
, FLOAT128
);
4870 r_const
= tcg_const_i32(dc
->mem_idx
);
4871 gen_address_mask(dc
, cpu_addr
);
4872 gen_helper_ldqf(cpu_env
, cpu_addr
, r_const
);
4873 tcg_temp_free_i32(r_const
);
4874 gen_op_store_QT0_fpr(QFPREG(rd
));
4875 gen_update_fprs_dirty(QFPREG(rd
));
4878 case 0x23: /* lddf, load double fpreg */
4879 gen_address_mask(dc
, cpu_addr
);
4880 cpu_dst_64
= gen_dest_fpr_D(dc
, rd
);
4881 tcg_gen_qemu_ld64(cpu_dst_64
, cpu_addr
, dc
->mem_idx
);
4882 gen_store_fpr_D(dc
, rd
, cpu_dst_64
);
4887 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18) ||
4888 xop
== 0xe || xop
== 0x1e) {
4889 TCGv cpu_val
= gen_load_gpr(dc
, rd
);
4892 case 0x4: /* st, store word */
4893 gen_address_mask(dc
, cpu_addr
);
4894 tcg_gen_qemu_st32(cpu_val
, cpu_addr
, dc
->mem_idx
);
4896 case 0x5: /* stb, store byte */
4897 gen_address_mask(dc
, cpu_addr
);
4898 tcg_gen_qemu_st8(cpu_val
, cpu_addr
, dc
->mem_idx
);
4900 case 0x6: /* sth, store halfword */
4901 gen_address_mask(dc
, cpu_addr
);
4902 tcg_gen_qemu_st16(cpu_val
, cpu_addr
, dc
->mem_idx
);
4904 case 0x7: /* std, store double word */
4913 gen_address_mask(dc
, cpu_addr
);
4914 r_const
= tcg_const_i32(7);
4915 /* XXX remove alignment check */
4916 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
4917 tcg_temp_free_i32(r_const
);
4918 lo
= gen_load_gpr(dc
, rd
+ 1);
4920 t64
= tcg_temp_new_i64();
4921 tcg_gen_concat_tl_i64(t64
, lo
, cpu_val
);
4922 tcg_gen_qemu_st64(t64
, cpu_addr
, dc
->mem_idx
);
4923 tcg_temp_free_i64(t64
);
4926 #if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
4927 case 0x14: /* sta, V9 stwa, store word alternate */
4928 #ifndef TARGET_SPARC64
4931 if (!supervisor(dc
))
4935 gen_st_asi(cpu_val
, cpu_addr
, insn
, 4);
4936 dc
->npc
= DYNAMIC_PC
;
4938 case 0x15: /* stba, store byte alternate */
4939 #ifndef TARGET_SPARC64
4942 if (!supervisor(dc
))
4946 gen_st_asi(cpu_val
, cpu_addr
, insn
, 1);
4947 dc
->npc
= DYNAMIC_PC
;
4949 case 0x16: /* stha, store halfword alternate */
4950 #ifndef TARGET_SPARC64
4953 if (!supervisor(dc
))
4957 gen_st_asi(cpu_val
, cpu_addr
, insn
, 2);
4958 dc
->npc
= DYNAMIC_PC
;
4960 case 0x17: /* stda, store double word alternate */
4961 #ifndef TARGET_SPARC64
4964 if (!supervisor(dc
))
4971 gen_stda_asi(dc
, cpu_val
, cpu_addr
, insn
, rd
);
4975 #ifdef TARGET_SPARC64
4976 case 0x0e: /* V9 stx */
4977 gen_address_mask(dc
, cpu_addr
);
4978 tcg_gen_qemu_st64(cpu_val
, cpu_addr
, dc
->mem_idx
);
4980 case 0x1e: /* V9 stxa */
4982 gen_st_asi(cpu_val
, cpu_addr
, insn
, 8);
4983 dc
->npc
= DYNAMIC_PC
;
4989 } else if (xop
> 0x23 && xop
< 0x28) {
4990 if (gen_trap_ifnofpu(dc
)) {
4995 case 0x24: /* stf, store fpreg */
4996 gen_address_mask(dc
, cpu_addr
);
4997 cpu_src1_32
= gen_load_fpr_F(dc
, rd
);
4998 tcg_gen_ext_i32_tl(cpu_tmp0
, cpu_src1_32
);
4999 tcg_gen_qemu_st32(cpu_tmp0
, cpu_addr
, dc
->mem_idx
);
5001 case 0x25: /* stfsr, V9 stxfsr */
5003 TCGv t
= get_temp_tl(dc
);
5005 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUSPARCState
, fsr
));
5006 #ifdef TARGET_SPARC64
5007 gen_address_mask(dc
, cpu_addr
);
5009 tcg_gen_qemu_st64(t
, cpu_addr
, dc
->mem_idx
);
5013 tcg_gen_qemu_st32(t
, cpu_addr
, dc
->mem_idx
);
5017 #ifdef TARGET_SPARC64
5018 /* V9 stqf, store quad fpreg */
5022 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5023 gen_op_load_fpr_QT0(QFPREG(rd
));
5024 r_const
= tcg_const_i32(dc
->mem_idx
);
5025 gen_address_mask(dc
, cpu_addr
);
5026 gen_helper_stqf(cpu_env
, cpu_addr
, r_const
);
5027 tcg_temp_free_i32(r_const
);
5030 #else /* !TARGET_SPARC64 */
5031 /* stdfq, store floating point queue */
5032 #if defined(CONFIG_USER_ONLY)
5035 if (!supervisor(dc
))
5037 if (gen_trap_ifnofpu(dc
)) {
5043 case 0x27: /* stdf, store double fpreg */
5044 gen_address_mask(dc
, cpu_addr
);
5045 cpu_src1_64
= gen_load_fpr_D(dc
, rd
);
5046 tcg_gen_qemu_st64(cpu_src1_64
, cpu_addr
, dc
->mem_idx
);
5051 } else if (xop
> 0x33 && xop
< 0x3f) {
5054 #ifdef TARGET_SPARC64
5055 case 0x34: /* V9 stfa */
5056 if (gen_trap_ifnofpu(dc
)) {
5059 gen_stf_asi(cpu_addr
, insn
, 4, rd
);
5061 case 0x36: /* V9 stqfa */
5065 CHECK_FPU_FEATURE(dc
, FLOAT128
);
5066 if (gen_trap_ifnofpu(dc
)) {
5069 r_const
= tcg_const_i32(7);
5070 gen_helper_check_align(cpu_env
, cpu_addr
, r_const
);
5071 tcg_temp_free_i32(r_const
);
5072 gen_stf_asi(cpu_addr
, insn
, 16, QFPREG(rd
));
5075 case 0x37: /* V9 stdfa */
5076 if (gen_trap_ifnofpu(dc
)) {
5079 gen_stf_asi(cpu_addr
, insn
, 8, DFPREG(rd
));
5081 case 0x3c: /* V9 casa */
5082 rs2
= GET_FIELD(insn
, 27, 31);
5083 cpu_src2
= gen_load_gpr(dc
, rs2
);
5084 gen_cas_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5086 case 0x3e: /* V9 casxa */
5087 rs2
= GET_FIELD(insn
, 27, 31);
5088 cpu_src2
= gen_load_gpr(dc
, rs2
);
5089 gen_casx_asi(dc
, cpu_addr
, cpu_src2
, insn
, rd
);
5092 case 0x34: /* stc */
5093 case 0x35: /* stcsr */
5094 case 0x36: /* stdcq */
5095 case 0x37: /* stdc */
5107 /* default case for non jump instructions */
5108 if (dc
->npc
== DYNAMIC_PC
) {
5109 dc
->pc
= DYNAMIC_PC
;
5111 } else if (dc
->npc
== JUMP_PC
) {
5112 /* we can do a static jump */
5113 gen_branch2(dc
, dc
->jump_pc
[0], dc
->jump_pc
[1], cpu_cond
);
5117 dc
->npc
= dc
->npc
+ 4;
5126 r_const
= tcg_const_i32(TT_ILL_INSN
);
5127 gen_helper_raise_exception(cpu_env
, r_const
);
5128 tcg_temp_free_i32(r_const
);
5137 r_const
= tcg_const_i32(TT_UNIMP_FLUSH
);
5138 gen_helper_raise_exception(cpu_env
, r_const
);
5139 tcg_temp_free_i32(r_const
);
5143 #if !defined(CONFIG_USER_ONLY)
5149 r_const
= tcg_const_i32(TT_PRIV_INSN
);
5150 gen_helper_raise_exception(cpu_env
, r_const
);
5151 tcg_temp_free_i32(r_const
);
5158 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
5161 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
5164 gen_op_fpexception_im(FSR_FTT_SEQ_ERROR
);
5168 #ifndef TARGET_SPARC64
5174 r_const
= tcg_const_i32(TT_NCP_INSN
);
5175 gen_helper_raise_exception(cpu_env
, r_const
);
5176 tcg_temp_free(r_const
);
5182 if (dc
->n_t32
!= 0) {
5184 for (i
= dc
->n_t32
- 1; i
>= 0; --i
) {
5185 tcg_temp_free_i32(dc
->t32
[i
]);
5189 if (dc
->n_ttl
!= 0) {
5191 for (i
= dc
->n_ttl
- 1; i
>= 0; --i
) {
5192 tcg_temp_free(dc
->ttl
[i
]);
5198 static inline void gen_intermediate_code_internal(TranslationBlock
* tb
,
5199 int spc
, CPUSPARCState
*env
)
5201 target_ulong pc_start
, last_pc
;
5202 uint16_t *gen_opc_end
;
5203 DisasContext dc1
, *dc
= &dc1
;
5210 memset(dc
, 0, sizeof(DisasContext
));
5215 dc
->npc
= (target_ulong
) tb
->cs_base
;
5216 dc
->cc_op
= CC_OP_DYNAMIC
;
5217 dc
->mem_idx
= cpu_mmu_index(env
);
5219 dc
->fpu_enabled
= tb_fpu_enabled(tb
->flags
);
5220 dc
->address_mask_32bit
= tb_am_enabled(tb
->flags
);
5221 dc
->singlestep
= (env
->singlestep_enabled
|| singlestep
);
5222 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
5225 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
5227 max_insns
= CF_COUNT_MASK
;
5230 if (unlikely(!QTAILQ_EMPTY(&env
->breakpoints
))) {
5231 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
5232 if (bp
->pc
== dc
->pc
) {
5233 if (dc
->pc
!= pc_start
)
5235 gen_helper_debug(cpu_env
);
5243 qemu_log("Search PC...\n");
5244 j
= gen_opc_ptr
- gen_opc_buf
;
5248 gen_opc_instr_start
[lj
++] = 0;
5249 gen_opc_pc
[lj
] = dc
->pc
;
5250 gen_opc_npc
[lj
] = dc
->npc
;
5251 gen_opc_instr_start
[lj
] = 1;
5252 gen_opc_icount
[lj
] = num_insns
;
5255 if (num_insns
+ 1 == max_insns
&& (tb
->cflags
& CF_LAST_IO
))
5258 insn
= cpu_ldl_code(env
, dc
->pc
);
5260 cpu_tmp0
= tcg_temp_new();
5261 cpu_dst
= tcg_temp_new();
5263 disas_sparc_insn(dc
, insn
);
5266 tcg_temp_free(cpu_dst
);
5267 tcg_temp_free(cpu_tmp0
);
5271 /* if the next PC is different, we abort now */
5272 if (dc
->pc
!= (last_pc
+ 4))
5274 /* if we reach a page boundary, we stop generation so that the
5275 PC of a TT_TFAULT exception is always in the right page */
5276 if ((dc
->pc
& (TARGET_PAGE_SIZE
- 1)) == 0)
5278 /* if single step mode, we generate only one instruction and
5279 generate an exception */
5280 if (dc
->singlestep
) {
5283 } while ((gen_opc_ptr
< gen_opc_end
) &&
5284 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32) &&
5285 num_insns
< max_insns
);
5288 if (tb
->cflags
& CF_LAST_IO
) {
5292 if (dc
->pc
!= DYNAMIC_PC
&&
5293 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
5294 /* static PC and NPC: we can use direct chaining */
5295 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5297 if (dc
->pc
!= DYNAMIC_PC
) {
5298 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5304 gen_icount_end(tb
, num_insns
);
5305 *gen_opc_ptr
= INDEX_op_end
;
5307 j
= gen_opc_ptr
- gen_opc_buf
;
5310 gen_opc_instr_start
[lj
++] = 0;
5314 gen_opc_jump_pc
[0] = dc
->jump_pc
[0];
5315 gen_opc_jump_pc
[1] = dc
->jump_pc
[1];
5317 tb
->size
= last_pc
+ 4 - pc_start
;
5318 tb
->icount
= num_insns
;
5321 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)) {
5322 qemu_log("--------------\n");
5323 qemu_log("IN: %s\n", lookup_symbol(pc_start
));
5324 log_target_disas(pc_start
, last_pc
+ 4 - pc_start
, 0);
5330 void gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
5332 gen_intermediate_code_internal(tb
, 0, env
);
5335 void gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
5337 gen_intermediate_code_internal(tb
, 1, env
);
5340 void gen_intermediate_code_init(CPUSPARCState
*env
)
5344 static const char * const gregnames
[8] = {
5345 NULL
, // g0 not used
5354 static const char * const fregnames
[32] = {
5355 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5356 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5357 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5358 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5361 /* init various static tables */
5365 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
5366 cpu_regwptr
= tcg_global_mem_new_ptr(TCG_AREG0
,
5367 offsetof(CPUSPARCState
, regwptr
),
5369 #ifdef TARGET_SPARC64
5370 cpu_xcc
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, xcc
),
5372 cpu_asi
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, asi
),
5374 cpu_fprs
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, fprs
),
5376 cpu_gsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, gsr
),
5378 cpu_tick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5379 offsetof(CPUSPARCState
, tick_cmpr
),
5381 cpu_stick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5382 offsetof(CPUSPARCState
, stick_cmpr
),
5384 cpu_hstick_cmpr
= tcg_global_mem_new(TCG_AREG0
,
5385 offsetof(CPUSPARCState
, hstick_cmpr
),
5387 cpu_hintp
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hintp
),
5389 cpu_htba
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, htba
),
5391 cpu_hver
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, hver
),
5393 cpu_ssr
= tcg_global_mem_new(TCG_AREG0
,
5394 offsetof(CPUSPARCState
, ssr
), "ssr");
5395 cpu_ver
= tcg_global_mem_new(TCG_AREG0
,
5396 offsetof(CPUSPARCState
, version
), "ver");
5397 cpu_softint
= tcg_global_mem_new_i32(TCG_AREG0
,
5398 offsetof(CPUSPARCState
, softint
),
5401 cpu_wim
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, wim
),
5404 cpu_cond
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cond
),
5406 cpu_cc_src
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_src
),
5408 cpu_cc_src2
= tcg_global_mem_new(TCG_AREG0
,
5409 offsetof(CPUSPARCState
, cc_src2
),
5411 cpu_cc_dst
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, cc_dst
),
5413 cpu_cc_op
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, cc_op
),
5415 cpu_psr
= tcg_global_mem_new_i32(TCG_AREG0
, offsetof(CPUSPARCState
, psr
),
5417 cpu_fsr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, fsr
),
5419 cpu_pc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, pc
),
5421 cpu_npc
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, npc
),
5423 cpu_y
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, y
), "y");
5424 #ifndef CONFIG_USER_ONLY
5425 cpu_tbr
= tcg_global_mem_new(TCG_AREG0
, offsetof(CPUSPARCState
, tbr
),
5428 for (i
= 1; i
< 8; i
++) {
5429 cpu_gregs
[i
] = tcg_global_mem_new(TCG_AREG0
,
5430 offsetof(CPUSPARCState
, gregs
[i
]),
5433 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5434 cpu_fpr
[i
] = tcg_global_mem_new_i64(TCG_AREG0
,
5435 offsetof(CPUSPARCState
, fpr
[i
]),
5439 /* register helpers */
5441 #define GEN_HELPER 2
5446 void restore_state_to_opc(CPUSPARCState
*env
, TranslationBlock
*tb
, int pc_pos
)
5449 env
->pc
= gen_opc_pc
[pc_pos
];
5450 npc
= gen_opc_npc
[pc_pos
];
5452 /* dynamic NPC: already stored */
5453 } else if (npc
== 2) {
5454 /* jump PC: use 'cond' and the jump targets of the translation */
5456 env
->npc
= gen_opc_jump_pc
[0];
5458 env
->npc
= gen_opc_jump_pc
[1];