4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "disas/disas.h"
25 #include "exec/helper-proto.h"
26 #include "exec/exec-all.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg/tcg-op-gvec.h"
29 #include "exec/helper-gen.h"
30 #include "exec/translator.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rett(E) qemu_build_not_reached()
41 # define gen_helper_power_down(E) qemu_build_not_reached()
42 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
44 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
45 # define gen_helper_done(E) qemu_build_not_reached()
46 # define gen_helper_fabsd(D, S) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fnegd(D, S) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_sdivx(D, E, A, B) qemu_build_not_reached()
55 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
56 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
57 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
58 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
59 # define gen_helper_udivx(D, E, A, B) qemu_build_not_reached()
60 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
61 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
62 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
63 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
64 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
65 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
66 # define gen_helper_fabsq ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fmul8x16al ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fmul8x16au ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fmuld8sux16 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fmuld8ulx16 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fnegq ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
92 # define FSR_LDXFSR_MASK 0
93 # define FSR_LDXFSR_OLDMASK 0
97 /* Dynamic PC, must exit to main loop. */
99 /* Dynamic PC, one of two values according to jump_pc[T2]. */
101 /* Dynamic PC, may lookup next TB. */
102 #define DYNAMIC_PC_LOOKUP 3
104 #define DISAS_EXIT DISAS_TARGET_0
106 /* global register indexes */
107 static TCGv_ptr cpu_regwptr
;
108 static TCGv cpu_fsr
, cpu_pc
, cpu_npc
;
109 static TCGv cpu_regs
[32];
112 static TCGv cpu_cond
;
113 static TCGv cpu_cc_N
;
114 static TCGv cpu_cc_V
;
115 static TCGv cpu_icc_Z
;
116 static TCGv cpu_icc_C
;
117 #ifdef TARGET_SPARC64
118 static TCGv cpu_xcc_Z
;
119 static TCGv cpu_xcc_C
;
120 static TCGv_i32 cpu_fprs
;
123 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
124 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
127 #ifdef TARGET_SPARC64
128 #define cpu_cc_Z cpu_xcc_Z
129 #define cpu_cc_C cpu_xcc_C
131 #define cpu_cc_Z cpu_icc_Z
132 #define cpu_cc_C cpu_icc_C
133 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
134 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
137 /* Floating point registers */
138 static TCGv_i64 cpu_fpr
[TARGET_DPREGS
];
140 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
141 #ifdef TARGET_SPARC64
142 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
143 # define env64_field_offsetof(X) env_field_offsetof(X)
145 # define env32_field_offsetof(X) env_field_offsetof(X)
146 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
149 typedef struct DisasCompare
{
155 typedef struct DisasDelayException
{
156 struct DisasDelayException
*next
;
159 /* Saved state at parent insn. */
162 } DisasDelayException
;
164 typedef struct DisasContext
{
165 DisasContextBase base
;
166 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
167 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
169 /* Used when JUMP_PC value is used. */
171 target_ulong jump_pc
[2];
176 bool address_mask_32bit
;
177 #ifndef CONFIG_USER_ONLY
179 #ifdef TARGET_SPARC64
185 #ifdef TARGET_SPARC64
189 DisasDelayException
*delay_excp_list
;
192 // This function uses non-native bit order
193 #define GET_FIELD(X, FROM, TO) \
194 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
196 // This function uses the order in the manuals, i.e. bit 0 is 2^0
197 #define GET_FIELD_SP(X, FROM, TO) \
198 GET_FIELD(X, 31 - (TO), 31 - (FROM))
200 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
201 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
203 #ifdef TARGET_SPARC64
204 #define DFPREG(r) (((r & 1) << 5) | (r & 0x1e))
205 #define QFPREG(r) (((r & 1) << 5) | (r & 0x1c))
207 #define DFPREG(r) (r & 0x1e)
208 #define QFPREG(r) (r & 0x1c)
211 #define UA2005_HTRAP_MASK 0xff
212 #define V8_TRAP_MASK 0x7f
214 #define IS_IMM (insn & (1<<13))
216 static void gen_update_fprs_dirty(DisasContext
*dc
, int rd
)
218 #if defined(TARGET_SPARC64)
219 int bit
= (rd
< 32) ? 1 : 2;
220 /* If we know we've already set this bit within the TB,
221 we can avoid setting it again. */
222 if (!(dc
->fprs_dirty
& bit
)) {
223 dc
->fprs_dirty
|= bit
;
224 tcg_gen_ori_i32(cpu_fprs
, cpu_fprs
, bit
);
229 /* floating point registers moves */
230 static TCGv_i32
gen_load_fpr_F(DisasContext
*dc
, unsigned int src
)
232 TCGv_i32 ret
= tcg_temp_new_i32();
234 tcg_gen_extrl_i64_i32(ret
, cpu_fpr
[src
/ 2]);
236 tcg_gen_extrh_i64_i32(ret
, cpu_fpr
[src
/ 2]);
241 static void gen_store_fpr_F(DisasContext
*dc
, unsigned int dst
, TCGv_i32 v
)
243 TCGv_i64 t
= tcg_temp_new_i64();
245 tcg_gen_extu_i32_i64(t
, v
);
246 tcg_gen_deposit_i64(cpu_fpr
[dst
/ 2], cpu_fpr
[dst
/ 2], t
,
247 (dst
& 1 ? 0 : 32), 32);
248 gen_update_fprs_dirty(dc
, dst
);
251 static TCGv_i32
gen_dest_fpr_F(DisasContext
*dc
)
253 return tcg_temp_new_i32();
256 static TCGv_i64
gen_load_fpr_D(DisasContext
*dc
, unsigned int src
)
259 return cpu_fpr
[src
/ 2];
262 static void gen_store_fpr_D(DisasContext
*dc
, unsigned int dst
, TCGv_i64 v
)
265 tcg_gen_mov_i64(cpu_fpr
[dst
/ 2], v
);
266 gen_update_fprs_dirty(dc
, dst
);
269 static TCGv_i64
gen_dest_fpr_D(DisasContext
*dc
, unsigned int dst
)
271 return cpu_fpr
[DFPREG(dst
) / 2];
274 static void gen_op_load_fpr_QT0(unsigned int src
)
276 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
277 offsetof(CPU_QuadU
, ll
.upper
));
278 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
279 offsetof(CPU_QuadU
, ll
.lower
));
282 static void gen_op_load_fpr_QT1(unsigned int src
)
284 tcg_gen_st_i64(cpu_fpr
[src
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
285 offsetof(CPU_QuadU
, ll
.upper
));
286 tcg_gen_st_i64(cpu_fpr
[src
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt1
) +
287 offsetof(CPU_QuadU
, ll
.lower
));
290 static void gen_op_store_QT0_fpr(unsigned int dst
)
292 tcg_gen_ld_i64(cpu_fpr
[dst
/ 2], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
293 offsetof(CPU_QuadU
, ll
.upper
));
294 tcg_gen_ld_i64(cpu_fpr
[dst
/2 + 1], tcg_env
, offsetof(CPUSPARCState
, qt0
) +
295 offsetof(CPU_QuadU
, ll
.lower
));
299 #ifdef CONFIG_USER_ONLY
300 #define supervisor(dc) 0
301 #define hypervisor(dc) 0
303 #ifdef TARGET_SPARC64
304 #define hypervisor(dc) (dc->hypervisor)
305 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
307 #define supervisor(dc) (dc->supervisor)
308 #define hypervisor(dc) 0
312 #if !defined(TARGET_SPARC64)
313 # define AM_CHECK(dc) false
314 #elif defined(TARGET_ABI32)
315 # define AM_CHECK(dc) true
316 #elif defined(CONFIG_USER_ONLY)
317 # define AM_CHECK(dc) false
319 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
322 static void gen_address_mask(DisasContext
*dc
, TCGv addr
)
325 tcg_gen_andi_tl(addr
, addr
, 0xffffffffULL
);
329 static target_ulong
address_mask_i(DisasContext
*dc
, target_ulong addr
)
331 return AM_CHECK(dc
) ? (uint32_t)addr
: addr
;
334 static TCGv
gen_load_gpr(DisasContext
*dc
, int reg
)
338 return cpu_regs
[reg
];
340 TCGv t
= tcg_temp_new();
341 tcg_gen_movi_tl(t
, 0);
346 static void gen_store_gpr(DisasContext
*dc
, int reg
, TCGv v
)
350 tcg_gen_mov_tl(cpu_regs
[reg
], v
);
354 static TCGv
gen_dest_gpr(DisasContext
*dc
, int reg
)
358 return cpu_regs
[reg
];
360 return tcg_temp_new();
364 static bool use_goto_tb(DisasContext
*s
, target_ulong pc
, target_ulong npc
)
366 return translator_use_goto_tb(&s
->base
, pc
) &&
367 translator_use_goto_tb(&s
->base
, npc
);
370 static void gen_goto_tb(DisasContext
*s
, int tb_num
,
371 target_ulong pc
, target_ulong npc
)
373 if (use_goto_tb(s
, pc
, npc
)) {
374 /* jump to same page: we can use a direct jump */
375 tcg_gen_goto_tb(tb_num
);
376 tcg_gen_movi_tl(cpu_pc
, pc
);
377 tcg_gen_movi_tl(cpu_npc
, npc
);
378 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
380 /* jump to another page: we can use an indirect jump */
381 tcg_gen_movi_tl(cpu_pc
, pc
);
382 tcg_gen_movi_tl(cpu_npc
, npc
);
383 tcg_gen_lookup_and_goto_ptr();
387 static TCGv
gen_carry32(void)
389 if (TARGET_LONG_BITS
== 64) {
390 TCGv t
= tcg_temp_new();
391 tcg_gen_extract_tl(t
, cpu_icc_C
, 32, 1);
397 static void gen_op_addcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
399 TCGv z
= tcg_constant_tl(0);
402 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
403 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
405 tcg_gen_add2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
407 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
408 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src2
);
409 tcg_gen_andc_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
410 if (TARGET_LONG_BITS
== 64) {
412 * Carry-in to bit 32 is result ^ src1 ^ src2.
413 * We already have the src xor term in Z, from computation of V.
415 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
416 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
418 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
419 tcg_gen_mov_tl(dst
, cpu_cc_N
);
422 static void gen_op_addcc(TCGv dst
, TCGv src1
, TCGv src2
)
424 gen_op_addcc_int(dst
, src1
, src2
, NULL
);
427 static void gen_op_taddcc(TCGv dst
, TCGv src1
, TCGv src2
)
429 TCGv t
= tcg_temp_new();
431 /* Save the tag bits around modification of dst. */
432 tcg_gen_or_tl(t
, src1
, src2
);
434 gen_op_addcc(dst
, src1
, src2
);
436 /* Incorprate tag bits into icc.V */
437 tcg_gen_andi_tl(t
, t
, 3);
438 tcg_gen_neg_tl(t
, t
);
439 tcg_gen_ext32u_tl(t
, t
);
440 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
443 static void gen_op_addc(TCGv dst
, TCGv src1
, TCGv src2
)
445 tcg_gen_add_tl(dst
, src1
, src2
);
446 tcg_gen_add_tl(dst
, dst
, gen_carry32());
449 static void gen_op_addccc(TCGv dst
, TCGv src1
, TCGv src2
)
451 gen_op_addcc_int(dst
, src1
, src2
, gen_carry32());
454 static void gen_op_subcc_int(TCGv dst
, TCGv src1
, TCGv src2
, TCGv cin
)
456 TCGv z
= tcg_constant_tl(0);
459 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, cin
, z
);
460 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, cpu_cc_N
, cpu_cc_C
, src2
, z
);
462 tcg_gen_sub2_tl(cpu_cc_N
, cpu_cc_C
, src1
, z
, src2
, z
);
464 tcg_gen_neg_tl(cpu_cc_C
, cpu_cc_C
);
465 tcg_gen_xor_tl(cpu_cc_Z
, src1
, src2
);
466 tcg_gen_xor_tl(cpu_cc_V
, cpu_cc_N
, src1
);
467 tcg_gen_and_tl(cpu_cc_V
, cpu_cc_V
, cpu_cc_Z
);
468 #ifdef TARGET_SPARC64
469 tcg_gen_xor_tl(cpu_icc_C
, cpu_cc_Z
, cpu_cc_N
);
470 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
472 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
473 tcg_gen_mov_tl(dst
, cpu_cc_N
);
476 static void gen_op_subcc(TCGv dst
, TCGv src1
, TCGv src2
)
478 gen_op_subcc_int(dst
, src1
, src2
, NULL
);
481 static void gen_op_tsubcc(TCGv dst
, TCGv src1
, TCGv src2
)
483 TCGv t
= tcg_temp_new();
485 /* Save the tag bits around modification of dst. */
486 tcg_gen_or_tl(t
, src1
, src2
);
488 gen_op_subcc(dst
, src1
, src2
);
490 /* Incorprate tag bits into icc.V */
491 tcg_gen_andi_tl(t
, t
, 3);
492 tcg_gen_neg_tl(t
, t
);
493 tcg_gen_ext32u_tl(t
, t
);
494 tcg_gen_or_tl(cpu_cc_V
, cpu_cc_V
, t
);
497 static void gen_op_subc(TCGv dst
, TCGv src1
, TCGv src2
)
499 tcg_gen_sub_tl(dst
, src1
, src2
);
500 tcg_gen_sub_tl(dst
, dst
, gen_carry32());
503 static void gen_op_subccc(TCGv dst
, TCGv src1
, TCGv src2
)
505 gen_op_subcc_int(dst
, src1
, src2
, gen_carry32());
508 static void gen_op_mulscc(TCGv dst
, TCGv src1
, TCGv src2
)
510 TCGv zero
= tcg_constant_tl(0);
511 TCGv t_src1
= tcg_temp_new();
512 TCGv t_src2
= tcg_temp_new();
513 TCGv t0
= tcg_temp_new();
515 tcg_gen_ext32u_tl(t_src1
, src1
);
516 tcg_gen_ext32u_tl(t_src2
, src2
);
522 tcg_gen_andi_tl(t0
, cpu_y
, 0x1);
523 tcg_gen_movcond_tl(TCG_COND_EQ
, t_src2
, t0
, zero
, zero
, t_src2
);
527 * y = (b2 << 31) | (y >> 1);
529 tcg_gen_extract_tl(t0
, cpu_y
, 1, 31);
530 tcg_gen_deposit_tl(cpu_y
, t0
, src1
, 31, 1);
533 tcg_gen_xor_tl(t0
, cpu_cc_N
, cpu_cc_V
);
536 * src1 = (b1 << 31) | (src1 >> 1)
538 tcg_gen_andi_tl(t0
, t0
, 1u << 31);
539 tcg_gen_shri_tl(t_src1
, t_src1
, 1);
540 tcg_gen_or_tl(t_src1
, t_src1
, t0
);
542 gen_op_addcc(dst
, t_src1
, t_src2
);
545 static void gen_op_multiply(TCGv dst
, TCGv src1
, TCGv src2
, int sign_ext
)
547 #if TARGET_LONG_BITS == 32
549 tcg_gen_muls2_tl(dst
, cpu_y
, src1
, src2
);
551 tcg_gen_mulu2_tl(dst
, cpu_y
, src1
, src2
);
554 TCGv t0
= tcg_temp_new_i64();
555 TCGv t1
= tcg_temp_new_i64();
558 tcg_gen_ext32s_i64(t0
, src1
);
559 tcg_gen_ext32s_i64(t1
, src2
);
561 tcg_gen_ext32u_i64(t0
, src1
);
562 tcg_gen_ext32u_i64(t1
, src2
);
565 tcg_gen_mul_i64(dst
, t0
, t1
);
566 tcg_gen_shri_i64(cpu_y
, dst
, 32);
570 static void gen_op_umul(TCGv dst
, TCGv src1
, TCGv src2
)
572 /* zero-extend truncated operands before multiplication */
573 gen_op_multiply(dst
, src1
, src2
, 0);
576 static void gen_op_smul(TCGv dst
, TCGv src1
, TCGv src2
)
578 /* sign-extend truncated operands before multiplication */
579 gen_op_multiply(dst
, src1
, src2
, 1);
582 static void gen_op_udivx(TCGv dst
, TCGv src1
, TCGv src2
)
584 gen_helper_udivx(dst
, tcg_env
, src1
, src2
);
587 static void gen_op_sdivx(TCGv dst
, TCGv src1
, TCGv src2
)
589 gen_helper_sdivx(dst
, tcg_env
, src1
, src2
);
592 static void gen_op_udiv(TCGv dst
, TCGv src1
, TCGv src2
)
594 #ifdef TARGET_SPARC64
595 gen_helper_udiv(dst
, tcg_env
, src1
, src2
);
596 tcg_gen_ext32u_tl(dst
, dst
);
598 TCGv_i64 t64
= tcg_temp_new_i64();
599 gen_helper_udiv(t64
, tcg_env
, src1
, src2
);
600 tcg_gen_trunc_i64_tl(dst
, t64
);
604 static void gen_op_sdiv(TCGv dst
, TCGv src1
, TCGv src2
)
606 #ifdef TARGET_SPARC64
607 gen_helper_sdiv(dst
, tcg_env
, src1
, src2
);
608 tcg_gen_ext32s_tl(dst
, dst
);
610 TCGv_i64 t64
= tcg_temp_new_i64();
611 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
612 tcg_gen_trunc_i64_tl(dst
, t64
);
616 static void gen_op_udivcc(TCGv dst
, TCGv src1
, TCGv src2
)
620 #ifdef TARGET_SPARC64
623 t64
= tcg_temp_new_i64();
626 gen_helper_udiv(t64
, tcg_env
, src1
, src2
);
628 #ifdef TARGET_SPARC64
629 tcg_gen_ext32u_tl(cpu_cc_N
, t64
);
630 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
631 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
632 tcg_gen_movi_tl(cpu_icc_C
, 0);
634 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
636 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
637 tcg_gen_movi_tl(cpu_cc_C
, 0);
638 tcg_gen_mov_tl(dst
, cpu_cc_N
);
641 static void gen_op_sdivcc(TCGv dst
, TCGv src1
, TCGv src2
)
645 #ifdef TARGET_SPARC64
648 t64
= tcg_temp_new_i64();
651 gen_helper_sdiv(t64
, tcg_env
, src1
, src2
);
653 #ifdef TARGET_SPARC64
654 tcg_gen_ext32s_tl(cpu_cc_N
, t64
);
655 tcg_gen_shri_tl(cpu_cc_V
, t64
, 32);
656 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
657 tcg_gen_movi_tl(cpu_icc_C
, 0);
659 tcg_gen_extr_i64_tl(cpu_cc_N
, cpu_cc_V
, t64
);
661 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
662 tcg_gen_movi_tl(cpu_cc_C
, 0);
663 tcg_gen_mov_tl(dst
, cpu_cc_N
);
666 static void gen_op_taddcctv(TCGv dst
, TCGv src1
, TCGv src2
)
668 gen_helper_taddcctv(dst
, tcg_env
, src1
, src2
);
671 static void gen_op_tsubcctv(TCGv dst
, TCGv src1
, TCGv src2
)
673 gen_helper_tsubcctv(dst
, tcg_env
, src1
, src2
);
676 static void gen_op_popc(TCGv dst
, TCGv src1
, TCGv src2
)
678 tcg_gen_ctpop_tl(dst
, src2
);
681 #ifndef TARGET_SPARC64
682 static void gen_helper_array8(TCGv dst
, TCGv src1
, TCGv src2
)
684 g_assert_not_reached();
688 static void gen_op_array16(TCGv dst
, TCGv src1
, TCGv src2
)
690 gen_helper_array8(dst
, src1
, src2
);
691 tcg_gen_shli_tl(dst
, dst
, 1);
694 static void gen_op_array32(TCGv dst
, TCGv src1
, TCGv src2
)
696 gen_helper_array8(dst
, src1
, src2
);
697 tcg_gen_shli_tl(dst
, dst
, 2);
700 static void gen_op_fpack16(TCGv_i32 dst
, TCGv_i64 src
)
702 #ifdef TARGET_SPARC64
703 gen_helper_fpack16(dst
, cpu_gsr
, src
);
705 g_assert_not_reached();
709 static void gen_op_fpackfix(TCGv_i32 dst
, TCGv_i64 src
)
711 #ifdef TARGET_SPARC64
712 gen_helper_fpackfix(dst
, cpu_gsr
, src
);
714 g_assert_not_reached();
718 static void gen_op_fpack32(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
720 #ifdef TARGET_SPARC64
721 gen_helper_fpack32(dst
, cpu_gsr
, src1
, src2
);
723 g_assert_not_reached();
727 static void gen_op_faligndata(TCGv_i64 dst
, TCGv_i64 s1
, TCGv_i64 s2
)
729 #ifdef TARGET_SPARC64
734 shift
= tcg_temp_new();
736 tcg_gen_andi_tl(shift
, cpu_gsr
, 7);
737 tcg_gen_shli_tl(shift
, shift
, 3);
738 tcg_gen_shl_tl(t1
, s1
, shift
);
741 * A shift of 64 does not produce 0 in TCG. Divide this into a
742 * shift of (up to 63) followed by a constant shift of 1.
744 tcg_gen_xori_tl(shift
, shift
, 63);
745 tcg_gen_shr_tl(t2
, s2
, shift
);
746 tcg_gen_shri_tl(t2
, t2
, 1);
748 tcg_gen_or_tl(dst
, t1
, t2
);
750 g_assert_not_reached();
754 static void gen_op_bshuffle(TCGv_i64 dst
, TCGv_i64 src1
, TCGv_i64 src2
)
756 #ifdef TARGET_SPARC64
757 gen_helper_bshuffle(dst
, cpu_gsr
, src1
, src2
);
759 g_assert_not_reached();
764 static void gen_op_eval_ba(TCGv dst
)
766 tcg_gen_movi_tl(dst
, 1);
770 static void gen_op_eval_bn(TCGv dst
)
772 tcg_gen_movi_tl(dst
, 0);
776 FPSR bit field FCC1 | FCC0:
782 static void gen_mov_reg_FCC0(TCGv reg
, TCGv src
,
783 unsigned int fcc_offset
)
785 tcg_gen_shri_tl(reg
, src
, FSR_FCC0_SHIFT
+ fcc_offset
);
786 tcg_gen_andi_tl(reg
, reg
, 0x1);
789 static void gen_mov_reg_FCC1(TCGv reg
, TCGv src
, unsigned int fcc_offset
)
791 tcg_gen_shri_tl(reg
, src
, FSR_FCC1_SHIFT
+ fcc_offset
);
792 tcg_gen_andi_tl(reg
, reg
, 0x1);
796 static void gen_op_eval_fbne(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
798 TCGv t0
= tcg_temp_new();
799 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
800 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
801 tcg_gen_or_tl(dst
, dst
, t0
);
804 // 1 or 2: FCC0 ^ FCC1
805 static void gen_op_eval_fblg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
807 TCGv t0
= tcg_temp_new();
808 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
809 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
810 tcg_gen_xor_tl(dst
, dst
, t0
);
814 static void gen_op_eval_fbul(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
816 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
820 static void gen_op_eval_fbl(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
822 TCGv t0
= tcg_temp_new();
823 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
824 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
825 tcg_gen_andc_tl(dst
, dst
, t0
);
829 static void gen_op_eval_fbug(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
831 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
835 static void gen_op_eval_fbg(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
837 TCGv t0
= tcg_temp_new();
838 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
839 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
840 tcg_gen_andc_tl(dst
, t0
, dst
);
844 static void gen_op_eval_fbu(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
846 TCGv t0
= tcg_temp_new();
847 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
848 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
849 tcg_gen_and_tl(dst
, dst
, t0
);
853 static void gen_op_eval_fbe(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
855 TCGv t0
= tcg_temp_new();
856 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
857 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
858 tcg_gen_or_tl(dst
, dst
, t0
);
859 tcg_gen_xori_tl(dst
, dst
, 0x1);
862 // 0 or 3: !(FCC0 ^ FCC1)
863 static void gen_op_eval_fbue(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
865 TCGv t0
= tcg_temp_new();
866 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
867 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
868 tcg_gen_xor_tl(dst
, dst
, t0
);
869 tcg_gen_xori_tl(dst
, dst
, 0x1);
873 static void gen_op_eval_fbge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
875 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
876 tcg_gen_xori_tl(dst
, dst
, 0x1);
879 // !1: !(FCC0 & !FCC1)
880 static void gen_op_eval_fbuge(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
882 TCGv t0
= tcg_temp_new();
883 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
884 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
885 tcg_gen_andc_tl(dst
, dst
, t0
);
886 tcg_gen_xori_tl(dst
, dst
, 0x1);
890 static void gen_op_eval_fble(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
892 gen_mov_reg_FCC1(dst
, src
, fcc_offset
);
893 tcg_gen_xori_tl(dst
, dst
, 0x1);
896 // !2: !(!FCC0 & FCC1)
897 static void gen_op_eval_fbule(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
899 TCGv t0
= tcg_temp_new();
900 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
901 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
902 tcg_gen_andc_tl(dst
, t0
, dst
);
903 tcg_gen_xori_tl(dst
, dst
, 0x1);
906 // !3: !(FCC0 & FCC1)
907 static void gen_op_eval_fbo(TCGv dst
, TCGv src
, unsigned int fcc_offset
)
909 TCGv t0
= tcg_temp_new();
910 gen_mov_reg_FCC0(dst
, src
, fcc_offset
);
911 gen_mov_reg_FCC1(t0
, src
, fcc_offset
);
912 tcg_gen_and_tl(dst
, dst
, t0
);
913 tcg_gen_xori_tl(dst
, dst
, 0x1);
916 static void finishing_insn(DisasContext
*dc
)
919 * From here, there is no future path through an unwinding exception.
920 * If the current insn cannot raise an exception, the computation of
921 * cpu_cond may be able to be elided.
923 if (dc
->cpu_cond_live
) {
924 tcg_gen_discard_tl(cpu_cond
);
925 dc
->cpu_cond_live
= false;
929 static void gen_generic_branch(DisasContext
*dc
)
931 TCGv npc0
= tcg_constant_tl(dc
->jump_pc
[0]);
932 TCGv npc1
= tcg_constant_tl(dc
->jump_pc
[1]);
933 TCGv c2
= tcg_constant_tl(dc
->jump
.c2
);
935 tcg_gen_movcond_tl(dc
->jump
.cond
, cpu_npc
, dc
->jump
.c1
, c2
, npc0
, npc1
);
938 /* call this function before using the condition register as it may
939 have been set for a jump */
940 static void flush_cond(DisasContext
*dc
)
942 if (dc
->npc
== JUMP_PC
) {
943 gen_generic_branch(dc
);
944 dc
->npc
= DYNAMIC_PC_LOOKUP
;
948 static void save_npc(DisasContext
*dc
)
953 gen_generic_branch(dc
);
954 dc
->npc
= DYNAMIC_PC_LOOKUP
;
957 case DYNAMIC_PC_LOOKUP
:
960 g_assert_not_reached();
963 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
967 static void save_state(DisasContext
*dc
)
969 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
973 static void gen_exception(DisasContext
*dc
, int which
)
977 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(which
));
978 dc
->base
.is_jmp
= DISAS_NORETURN
;
981 static TCGLabel
*delay_exceptionv(DisasContext
*dc
, TCGv_i32 excp
)
983 DisasDelayException
*e
= g_new0(DisasDelayException
, 1);
985 e
->next
= dc
->delay_excp_list
;
986 dc
->delay_excp_list
= e
;
988 e
->lab
= gen_new_label();
991 /* Caller must have used flush_cond before branch. */
992 assert(e
->npc
!= JUMP_PC
);
998 static TCGLabel
*delay_exception(DisasContext
*dc
, int excp
)
1000 return delay_exceptionv(dc
, tcg_constant_i32(excp
));
1003 static void gen_check_align(DisasContext
*dc
, TCGv addr
, int mask
)
1005 TCGv t
= tcg_temp_new();
1008 tcg_gen_andi_tl(t
, addr
, mask
);
1011 lab
= delay_exception(dc
, TT_UNALIGNED
);
1012 tcg_gen_brcondi_tl(TCG_COND_NE
, t
, 0, lab
);
1015 static void gen_mov_pc_npc(DisasContext
*dc
)
1022 gen_generic_branch(dc
);
1023 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1024 dc
->pc
= DYNAMIC_PC_LOOKUP
;
1027 case DYNAMIC_PC_LOOKUP
:
1028 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
1032 g_assert_not_reached();
1039 static void gen_compare(DisasCompare
*cmp
, bool xcc
, unsigned int cond
,
1044 cmp
->c1
= t1
= tcg_temp_new();
1048 case 0x0: /* never */
1049 cmp
->cond
= TCG_COND_NEVER
;
1050 cmp
->c1
= tcg_constant_tl(0);
1053 case 0x1: /* eq: Z */
1054 cmp
->cond
= TCG_COND_EQ
;
1055 if (TARGET_LONG_BITS
== 32 || xcc
) {
1056 tcg_gen_mov_tl(t1
, cpu_cc_Z
);
1058 tcg_gen_ext32u_tl(t1
, cpu_icc_Z
);
1062 case 0x2: /* le: Z | (N ^ V) */
1065 * cc_Z || (N ^ V) < 0 NE
1066 * cc_Z && !((N ^ V) < 0) EQ
1067 * cc_Z & ~((N ^ V) >> TLB) EQ
1069 cmp
->cond
= TCG_COND_EQ
;
1070 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
1071 tcg_gen_sextract_tl(t1
, t1
, xcc
? 63 : 31, 1);
1072 tcg_gen_andc_tl(t1
, xcc
? cpu_cc_Z
: cpu_icc_Z
, t1
);
1073 if (TARGET_LONG_BITS
== 64 && !xcc
) {
1074 tcg_gen_ext32u_tl(t1
, t1
);
1078 case 0x3: /* lt: N ^ V */
1079 cmp
->cond
= TCG_COND_LT
;
1080 tcg_gen_xor_tl(t1
, cpu_cc_N
, cpu_cc_V
);
1081 if (TARGET_LONG_BITS
== 64 && !xcc
) {
1082 tcg_gen_ext32s_tl(t1
, t1
);
1086 case 0x4: /* leu: Z | C */
1089 * cc_Z == 0 || cc_C != 0 NE
1090 * cc_Z != 0 && cc_C == 0 EQ
1091 * cc_Z & (cc_C ? 0 : -1) EQ
1092 * cc_Z & (cc_C - 1) EQ
1094 cmp
->cond
= TCG_COND_EQ
;
1095 if (TARGET_LONG_BITS
== 32 || xcc
) {
1096 tcg_gen_subi_tl(t1
, cpu_cc_C
, 1);
1097 tcg_gen_and_tl(t1
, t1
, cpu_cc_Z
);
1099 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
1100 tcg_gen_subi_tl(t1
, t1
, 1);
1101 tcg_gen_and_tl(t1
, t1
, cpu_icc_Z
);
1102 tcg_gen_ext32u_tl(t1
, t1
);
1106 case 0x5: /* ltu: C */
1107 cmp
->cond
= TCG_COND_NE
;
1108 if (TARGET_LONG_BITS
== 32 || xcc
) {
1109 tcg_gen_mov_tl(t1
, cpu_cc_C
);
1111 tcg_gen_extract_tl(t1
, cpu_icc_C
, 32, 1);
1115 case 0x6: /* neg: N */
1116 cmp
->cond
= TCG_COND_LT
;
1117 if (TARGET_LONG_BITS
== 32 || xcc
) {
1118 tcg_gen_mov_tl(t1
, cpu_cc_N
);
1120 tcg_gen_ext32s_tl(t1
, cpu_cc_N
);
1124 case 0x7: /* vs: V */
1125 cmp
->cond
= TCG_COND_LT
;
1126 if (TARGET_LONG_BITS
== 32 || xcc
) {
1127 tcg_gen_mov_tl(t1
, cpu_cc_V
);
1129 tcg_gen_ext32s_tl(t1
, cpu_cc_V
);
1134 cmp
->cond
= tcg_invert_cond(cmp
->cond
);
1138 static void gen_fcompare(DisasCompare
*cmp
, unsigned int cc
, unsigned int cond
)
1140 unsigned int offset
;
1143 /* For now we still generate a straight boolean result. */
1144 cmp
->cond
= TCG_COND_NE
;
1145 cmp
->c1
= r_dst
= tcg_temp_new();
1166 gen_op_eval_bn(r_dst
);
1169 gen_op_eval_fbne(r_dst
, cpu_fsr
, offset
);
1172 gen_op_eval_fblg(r_dst
, cpu_fsr
, offset
);
1175 gen_op_eval_fbul(r_dst
, cpu_fsr
, offset
);
1178 gen_op_eval_fbl(r_dst
, cpu_fsr
, offset
);
1181 gen_op_eval_fbug(r_dst
, cpu_fsr
, offset
);
1184 gen_op_eval_fbg(r_dst
, cpu_fsr
, offset
);
1187 gen_op_eval_fbu(r_dst
, cpu_fsr
, offset
);
1190 gen_op_eval_ba(r_dst
);
1193 gen_op_eval_fbe(r_dst
, cpu_fsr
, offset
);
1196 gen_op_eval_fbue(r_dst
, cpu_fsr
, offset
);
1199 gen_op_eval_fbge(r_dst
, cpu_fsr
, offset
);
1202 gen_op_eval_fbuge(r_dst
, cpu_fsr
, offset
);
1205 gen_op_eval_fble(r_dst
, cpu_fsr
, offset
);
1208 gen_op_eval_fbule(r_dst
, cpu_fsr
, offset
);
1211 gen_op_eval_fbo(r_dst
, cpu_fsr
, offset
);
1217 static const TCGCond gen_tcg_cond_reg
[8] = {
1218 TCG_COND_NEVER
, /* reserved */
1222 TCG_COND_NEVER
, /* reserved */
1228 static void gen_compare_reg(DisasCompare
*cmp
, int cond
, TCGv r_src
)
1230 cmp
->cond
= tcg_invert_cond(gen_tcg_cond_reg
[cond
]);
1231 cmp
->c1
= tcg_temp_new();
1233 tcg_gen_mov_tl(cmp
->c1
, r_src
);
1236 static void gen_op_clear_ieee_excp_and_FTT(void)
1238 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_CEXC_NMASK
);
1241 static void gen_op_fmovs(TCGv_i32 dst
, TCGv_i32 src
)
1243 gen_op_clear_ieee_excp_and_FTT();
1244 tcg_gen_mov_i32(dst
, src
);
1247 static void gen_op_fnegs(TCGv_i32 dst
, TCGv_i32 src
)
1249 gen_op_clear_ieee_excp_and_FTT();
1250 gen_helper_fnegs(dst
, src
);
1253 static void gen_op_fabss(TCGv_i32 dst
, TCGv_i32 src
)
1255 gen_op_clear_ieee_excp_and_FTT();
1256 gen_helper_fabss(dst
, src
);
1259 static void gen_op_fmovd(TCGv_i64 dst
, TCGv_i64 src
)
1261 gen_op_clear_ieee_excp_and_FTT();
1262 tcg_gen_mov_i64(dst
, src
);
1265 static void gen_op_fnegd(TCGv_i64 dst
, TCGv_i64 src
)
1267 gen_op_clear_ieee_excp_and_FTT();
1268 gen_helper_fnegd(dst
, src
);
1271 static void gen_op_fabsd(TCGv_i64 dst
, TCGv_i64 src
)
1273 gen_op_clear_ieee_excp_and_FTT();
1274 gen_helper_fabsd(dst
, src
);
1277 #ifdef TARGET_SPARC64
1278 static void gen_op_fcmps(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1282 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1285 gen_helper_fcmps_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1288 gen_helper_fcmps_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1291 gen_helper_fcmps_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1296 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1300 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1303 gen_helper_fcmpd_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1306 gen_helper_fcmpd_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1309 gen_helper_fcmpd_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1314 static void gen_op_fcmpq(int fccno
)
1318 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1321 gen_helper_fcmpq_fcc1(cpu_fsr
, tcg_env
);
1324 gen_helper_fcmpq_fcc2(cpu_fsr
, tcg_env
);
1327 gen_helper_fcmpq_fcc3(cpu_fsr
, tcg_env
);
1332 static void gen_op_fcmpes(int fccno
, TCGv_i32 r_rs1
, TCGv_i32 r_rs2
)
1336 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1339 gen_helper_fcmpes_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1342 gen_helper_fcmpes_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1345 gen_helper_fcmpes_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1350 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1354 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1357 gen_helper_fcmped_fcc1(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1360 gen_helper_fcmped_fcc2(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1363 gen_helper_fcmped_fcc3(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1368 static void gen_op_fcmpeq(int fccno
)
1372 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1375 gen_helper_fcmpeq_fcc1(cpu_fsr
, tcg_env
);
1378 gen_helper_fcmpeq_fcc2(cpu_fsr
, tcg_env
);
1381 gen_helper_fcmpeq_fcc3(cpu_fsr
, tcg_env
);
1388 static void gen_op_fcmps(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1390 gen_helper_fcmps(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1393 static void gen_op_fcmpd(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1395 gen_helper_fcmpd(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1398 static void gen_op_fcmpq(int fccno
)
1400 gen_helper_fcmpq(cpu_fsr
, tcg_env
);
1403 static void gen_op_fcmpes(int fccno
, TCGv r_rs1
, TCGv r_rs2
)
1405 gen_helper_fcmpes(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1408 static void gen_op_fcmped(int fccno
, TCGv_i64 r_rs1
, TCGv_i64 r_rs2
)
1410 gen_helper_fcmped(cpu_fsr
, tcg_env
, r_rs1
, r_rs2
);
1413 static void gen_op_fcmpeq(int fccno
)
1415 gen_helper_fcmpeq(cpu_fsr
, tcg_env
);
1419 static void gen_op_fpexception_im(DisasContext
*dc
, int fsr_flags
)
1421 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, FSR_FTT_NMASK
);
1422 tcg_gen_ori_tl(cpu_fsr
, cpu_fsr
, fsr_flags
);
1423 gen_exception(dc
, TT_FP_EXCP
);
1426 static int gen_trap_ifnofpu(DisasContext
*dc
)
1428 #if !defined(CONFIG_USER_ONLY)
1429 if (!dc
->fpu_enabled
) {
1430 gen_exception(dc
, TT_NFPU_INSN
);
1458 * For asi == -1, treat as non-asi.
1459 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1461 static DisasASI
resolve_asi(DisasContext
*dc
, int asi
, MemOp memop
)
1463 ASIType type
= GET_ASI_HELPER
;
1464 int mem_idx
= dc
->mem_idx
;
1467 /* Artificial "non-asi" case. */
1468 type
= GET_ASI_DIRECT
;
1472 #ifndef TARGET_SPARC64
1473 /* Before v9, all asis are immediate and privileged. */
1475 gen_exception(dc
, TT_ILL_INSN
);
1476 type
= GET_ASI_EXCP
;
1477 } else if (supervisor(dc
)
1478 /* Note that LEON accepts ASI_USERDATA in user mode, for
1479 use with CASA. Also note that previous versions of
1480 QEMU allowed (and old versions of gcc emitted) ASI_P
1481 for LEON, which is incorrect. */
1482 || (asi
== ASI_USERDATA
1483 && (dc
->def
->features
& CPU_FEATURE_CASA
))) {
1485 case ASI_USERDATA
: /* User data access */
1486 mem_idx
= MMU_USER_IDX
;
1487 type
= GET_ASI_DIRECT
;
1489 case ASI_KERNELDATA
: /* Supervisor data access */
1490 mem_idx
= MMU_KERNEL_IDX
;
1491 type
= GET_ASI_DIRECT
;
1493 case ASI_M_BYPASS
: /* MMU passthrough */
1494 case ASI_LEON_BYPASS
: /* LEON MMU passthrough */
1495 mem_idx
= MMU_PHYS_IDX
;
1496 type
= GET_ASI_DIRECT
;
1498 case ASI_M_BCOPY
: /* Block copy, sta access */
1499 mem_idx
= MMU_KERNEL_IDX
;
1500 type
= GET_ASI_BCOPY
;
1502 case ASI_M_BFILL
: /* Block fill, stda access */
1503 mem_idx
= MMU_KERNEL_IDX
;
1504 type
= GET_ASI_BFILL
;
1508 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1509 * permissions check in get_physical_address(..).
1511 mem_idx
= (dc
->mem_idx
== MMU_PHYS_IDX
) ? MMU_PHYS_IDX
: mem_idx
;
1513 gen_exception(dc
, TT_PRIV_INSN
);
1514 type
= GET_ASI_EXCP
;
1520 /* With v9, all asis below 0x80 are privileged. */
1521 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1522 down that bit into DisasContext. For the moment that's ok,
1523 since the direct implementations below doesn't have any ASIs
1524 in the restricted [0x30, 0x7f] range, and the check will be
1525 done properly in the helper. */
1526 if (!supervisor(dc
) && asi
< 0x80) {
1527 gen_exception(dc
, TT_PRIV_ACT
);
1528 type
= GET_ASI_EXCP
;
1531 case ASI_REAL
: /* Bypass */
1532 case ASI_REAL_IO
: /* Bypass, non-cacheable */
1533 case ASI_REAL_L
: /* Bypass LE */
1534 case ASI_REAL_IO_L
: /* Bypass, non-cacheable LE */
1535 case ASI_TWINX_REAL
: /* Real address, twinx */
1536 case ASI_TWINX_REAL_L
: /* Real address, twinx, LE */
1537 case ASI_QUAD_LDD_PHYS
:
1538 case ASI_QUAD_LDD_PHYS_L
:
1539 mem_idx
= MMU_PHYS_IDX
;
1541 case ASI_N
: /* Nucleus */
1542 case ASI_NL
: /* Nucleus LE */
1545 case ASI_NUCLEUS_QUAD_LDD
:
1546 case ASI_NUCLEUS_QUAD_LDD_L
:
1547 if (hypervisor(dc
)) {
1548 mem_idx
= MMU_PHYS_IDX
;
1550 mem_idx
= MMU_NUCLEUS_IDX
;
1553 case ASI_AIUP
: /* As if user primary */
1554 case ASI_AIUPL
: /* As if user primary LE */
1555 case ASI_TWINX_AIUP
:
1556 case ASI_TWINX_AIUP_L
:
1557 case ASI_BLK_AIUP_4V
:
1558 case ASI_BLK_AIUP_L_4V
:
1561 mem_idx
= MMU_USER_IDX
;
1563 case ASI_AIUS
: /* As if user secondary */
1564 case ASI_AIUSL
: /* As if user secondary LE */
1565 case ASI_TWINX_AIUS
:
1566 case ASI_TWINX_AIUS_L
:
1567 case ASI_BLK_AIUS_4V
:
1568 case ASI_BLK_AIUS_L_4V
:
1571 mem_idx
= MMU_USER_SECONDARY_IDX
;
1573 case ASI_S
: /* Secondary */
1574 case ASI_SL
: /* Secondary LE */
1577 case ASI_BLK_COMMIT_S
:
1584 if (mem_idx
== MMU_USER_IDX
) {
1585 mem_idx
= MMU_USER_SECONDARY_IDX
;
1586 } else if (mem_idx
== MMU_KERNEL_IDX
) {
1587 mem_idx
= MMU_KERNEL_SECONDARY_IDX
;
1590 case ASI_P
: /* Primary */
1591 case ASI_PL
: /* Primary LE */
1594 case ASI_BLK_COMMIT_P
:
1618 type
= GET_ASI_DIRECT
;
1620 case ASI_TWINX_REAL
:
1621 case ASI_TWINX_REAL_L
:
1624 case ASI_TWINX_AIUP
:
1625 case ASI_TWINX_AIUP_L
:
1626 case ASI_TWINX_AIUS
:
1627 case ASI_TWINX_AIUS_L
:
1632 case ASI_QUAD_LDD_PHYS
:
1633 case ASI_QUAD_LDD_PHYS_L
:
1634 case ASI_NUCLEUS_QUAD_LDD
:
1635 case ASI_NUCLEUS_QUAD_LDD_L
:
1636 type
= GET_ASI_DTWINX
;
1638 case ASI_BLK_COMMIT_P
:
1639 case ASI_BLK_COMMIT_S
:
1640 case ASI_BLK_AIUP_4V
:
1641 case ASI_BLK_AIUP_L_4V
:
1644 case ASI_BLK_AIUS_4V
:
1645 case ASI_BLK_AIUS_L_4V
:
1652 type
= GET_ASI_BLOCK
;
1659 type
= GET_ASI_SHORT
;
1666 type
= GET_ASI_SHORT
;
1669 /* The little-endian asis all have bit 3 set. */
1677 return (DisasASI
){ type
, asi
, mem_idx
, memop
};
1680 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1681 static void gen_helper_ld_asi(TCGv_i64 r
, TCGv_env e
, TCGv a
,
1682 TCGv_i32 asi
, TCGv_i32 mop
)
1684 g_assert_not_reached();
1687 static void gen_helper_st_asi(TCGv_env e
, TCGv a
, TCGv_i64 r
,
1688 TCGv_i32 asi
, TCGv_i32 mop
)
1690 g_assert_not_reached();
1694 static void gen_ld_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1699 case GET_ASI_DTWINX
: /* Reserved for ldda. */
1700 gen_exception(dc
, TT_ILL_INSN
);
1702 case GET_ASI_DIRECT
:
1703 tcg_gen_qemu_ld_tl(dst
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1707 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1708 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1711 #ifdef TARGET_SPARC64
1712 gen_helper_ld_asi(dst
, tcg_env
, addr
, r_asi
, r_mop
);
1715 TCGv_i64 t64
= tcg_temp_new_i64();
1716 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1717 tcg_gen_trunc_i64_tl(dst
, t64
);
1725 static void gen_st_asi(DisasContext
*dc
, DisasASI
*da
, TCGv src
, TCGv addr
)
1731 case GET_ASI_DTWINX
: /* Reserved for stda. */
1732 if (TARGET_LONG_BITS
== 32) {
1733 gen_exception(dc
, TT_ILL_INSN
);
1735 } else if (!(dc
->def
->features
& CPU_FEATURE_HYPV
)) {
1736 /* Pre OpenSPARC CPUs don't have these */
1737 gen_exception(dc
, TT_ILL_INSN
);
1740 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1743 case GET_ASI_DIRECT
:
1744 tcg_gen_qemu_st_tl(src
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
1748 assert(TARGET_LONG_BITS
== 32);
1749 /* Copy 32 bytes from the address in SRC to ADDR. */
1750 /* ??? The original qemu code suggests 4-byte alignment, dropping
1751 the low bits, but the only place I can see this used is in the
1752 Linux kernel with 32 byte alignment, which would make more sense
1753 as a cacheline-style operation. */
1755 TCGv saddr
= tcg_temp_new();
1756 TCGv daddr
= tcg_temp_new();
1757 TCGv four
= tcg_constant_tl(4);
1758 TCGv_i32 tmp
= tcg_temp_new_i32();
1761 tcg_gen_andi_tl(saddr
, src
, -4);
1762 tcg_gen_andi_tl(daddr
, addr
, -4);
1763 for (i
= 0; i
< 32; i
+= 4) {
1764 /* Since the loads and stores are paired, allow the
1765 copy to happen in the host endianness. */
1766 tcg_gen_qemu_ld_i32(tmp
, saddr
, da
->mem_idx
, MO_UL
);
1767 tcg_gen_qemu_st_i32(tmp
, daddr
, da
->mem_idx
, MO_UL
);
1768 tcg_gen_add_tl(saddr
, saddr
, four
);
1769 tcg_gen_add_tl(daddr
, daddr
, four
);
1776 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1777 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
| MO_ALIGN
);
1780 #ifdef TARGET_SPARC64
1781 gen_helper_st_asi(tcg_env
, addr
, src
, r_asi
, r_mop
);
1784 TCGv_i64 t64
= tcg_temp_new_i64();
1785 tcg_gen_extu_tl_i64(t64
, src
);
1786 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
1790 /* A write to a TLB register may alter page maps. End the TB. */
1791 dc
->npc
= DYNAMIC_PC
;
1797 static void gen_swap_asi(DisasContext
*dc
, DisasASI
*da
,
1798 TCGv dst
, TCGv src
, TCGv addr
)
1803 case GET_ASI_DIRECT
:
1804 tcg_gen_atomic_xchg_tl(dst
, addr
, src
,
1805 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1808 /* ??? Should be DAE_invalid_asi. */
1809 gen_exception(dc
, TT_DATA_ACCESS
);
1814 static void gen_cas_asi(DisasContext
*dc
, DisasASI
*da
,
1815 TCGv oldv
, TCGv newv
, TCGv cmpv
, TCGv addr
)
1820 case GET_ASI_DIRECT
:
1821 tcg_gen_atomic_cmpxchg_tl(oldv
, addr
, cmpv
, newv
,
1822 da
->mem_idx
, da
->memop
| MO_ALIGN
);
1825 /* ??? Should be DAE_invalid_asi. */
1826 gen_exception(dc
, TT_DATA_ACCESS
);
1831 static void gen_ldstub_asi(DisasContext
*dc
, DisasASI
*da
, TCGv dst
, TCGv addr
)
1836 case GET_ASI_DIRECT
:
1837 tcg_gen_atomic_xchg_tl(dst
, addr
, tcg_constant_tl(0xff),
1838 da
->mem_idx
, MO_UB
);
1841 /* ??? In theory, this should be raise DAE_invalid_asi.
1842 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1843 if (tb_cflags(dc
->base
.tb
) & CF_PARALLEL
) {
1844 gen_helper_exit_atomic(tcg_env
);
1846 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1847 TCGv_i32 r_mop
= tcg_constant_i32(MO_UB
);
1851 t64
= tcg_temp_new_i64();
1852 gen_helper_ld_asi(t64
, tcg_env
, addr
, r_asi
, r_mop
);
1854 s64
= tcg_constant_i64(0xff);
1855 gen_helper_st_asi(tcg_env
, addr
, s64
, r_asi
, r_mop
);
1857 tcg_gen_trunc_i64_tl(dst
, t64
);
1860 dc
->npc
= DYNAMIC_PC
;
1866 static void gen_ldf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1869 MemOp memop
= da
->memop
;
1870 MemOp size
= memop
& MO_SIZE
;
1875 /* TODO: Use 128-bit load/store below. */
1876 if (size
== MO_128
) {
1877 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1884 case GET_ASI_DIRECT
:
1885 memop
|= MO_ALIGN_4
;
1888 d32
= gen_dest_fpr_F(dc
);
1889 tcg_gen_qemu_ld_i32(d32
, addr
, da
->mem_idx
, memop
);
1890 gen_store_fpr_F(dc
, rd
, d32
);
1894 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
, memop
);
1898 d64
= tcg_temp_new_i64();
1899 tcg_gen_qemu_ld_i64(d64
, addr
, da
->mem_idx
, memop
);
1900 addr_tmp
= tcg_temp_new();
1901 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1902 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
1903 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1906 g_assert_not_reached();
1911 /* Valid for lddfa on aligned registers only. */
1912 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
1913 /* The first operation checks required alignment. */
1914 addr_tmp
= tcg_temp_new();
1915 for (int i
= 0; ; ++i
) {
1916 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
1917 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
1921 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1925 gen_exception(dc
, TT_ILL_INSN
);
1930 /* Valid for lddfa only. */
1931 if (orig_size
== MO_64
) {
1932 tcg_gen_qemu_ld_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
1935 gen_exception(dc
, TT_ILL_INSN
);
1941 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
1942 TCGv_i32 r_mop
= tcg_constant_i32(memop
| MO_ALIGN
);
1945 /* According to the table in the UA2011 manual, the only
1946 other asis that are valid for ldfa/lddfa/ldqfa are
1947 the NO_FAULT asis. We still need a helper for these,
1948 but we can just use the integer asi helper for them. */
1951 d64
= tcg_temp_new_i64();
1952 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1953 d32
= gen_dest_fpr_F(dc
);
1954 tcg_gen_extrl_i64_i32(d32
, d64
);
1955 gen_store_fpr_F(dc
, rd
, d32
);
1958 gen_helper_ld_asi(cpu_fpr
[rd
/ 2], tcg_env
, addr
,
1962 d64
= tcg_temp_new_i64();
1963 gen_helper_ld_asi(d64
, tcg_env
, addr
, r_asi
, r_mop
);
1964 addr_tmp
= tcg_temp_new();
1965 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
1966 gen_helper_ld_asi(cpu_fpr
[rd
/ 2 + 1], tcg_env
, addr_tmp
,
1968 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], d64
);
1971 g_assert_not_reached();
1978 static void gen_stf_asi(DisasContext
*dc
, DisasASI
*da
, MemOp orig_size
,
1981 MemOp memop
= da
->memop
;
1982 MemOp size
= memop
& MO_SIZE
;
1986 /* TODO: Use 128-bit load/store below. */
1987 if (size
== MO_128
) {
1988 memop
= (memop
& ~MO_SIZE
) | MO_64
;
1995 case GET_ASI_DIRECT
:
1996 memop
|= MO_ALIGN_4
;
1999 d32
= gen_load_fpr_F(dc
, rd
);
2000 tcg_gen_qemu_st_i32(d32
, addr
, da
->mem_idx
, memop
| MO_ALIGN
);
2003 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2004 memop
| MO_ALIGN_4
);
2007 /* Only 4-byte alignment required. However, it is legal for the
2008 cpu to signal the alignment fault, and the OS trap handler is
2009 required to fix it up. Requiring 16-byte alignment here avoids
2010 having to probe the second page before performing the first
2012 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2013 memop
| MO_ALIGN_16
);
2014 addr_tmp
= tcg_temp_new();
2015 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2016 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + 1], addr_tmp
, da
->mem_idx
, memop
);
2019 g_assert_not_reached();
2024 /* Valid for stdfa on aligned registers only. */
2025 if (orig_size
== MO_64
&& (rd
& 7) == 0) {
2026 /* The first operation checks required alignment. */
2027 addr_tmp
= tcg_temp_new();
2028 for (int i
= 0; ; ++i
) {
2029 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2 + i
], addr
, da
->mem_idx
,
2030 memop
| (i
== 0 ? MO_ALIGN_64
: 0));
2034 tcg_gen_addi_tl(addr_tmp
, addr
, 8);
2038 gen_exception(dc
, TT_ILL_INSN
);
2043 /* Valid for stdfa only. */
2044 if (orig_size
== MO_64
) {
2045 tcg_gen_qemu_st_i64(cpu_fpr
[rd
/ 2], addr
, da
->mem_idx
,
2048 gen_exception(dc
, TT_ILL_INSN
);
2053 /* According to the table in the UA2011 manual, the only
2054 other asis that are valid for ldfa/lddfa/ldqfa are
2055 the PST* asis, which aren't currently handled. */
2056 gen_exception(dc
, TT_ILL_INSN
);
2061 static void gen_ldda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2063 TCGv hi
= gen_dest_gpr(dc
, rd
);
2064 TCGv lo
= gen_dest_gpr(dc
, rd
+ 1);
2070 case GET_ASI_DTWINX
:
2071 #ifdef TARGET_SPARC64
2073 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2074 TCGv_i128 t
= tcg_temp_new_i128();
2076 tcg_gen_qemu_ld_i128(t
, addr
, da
->mem_idx
, mop
);
2078 * Note that LE twinx acts as if each 64-bit register result is
2079 * byte swapped. We perform one 128-bit LE load, so must swap
2080 * the order of the writebacks.
2082 if ((mop
& MO_BSWAP
) == MO_TE
) {
2083 tcg_gen_extr_i128_i64(lo
, hi
, t
);
2085 tcg_gen_extr_i128_i64(hi
, lo
, t
);
2090 g_assert_not_reached();
2093 case GET_ASI_DIRECT
:
2095 TCGv_i64 tmp
= tcg_temp_new_i64();
2097 tcg_gen_qemu_ld_i64(tmp
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2099 /* Note that LE ldda acts as if each 32-bit register
2100 result is byte swapped. Having just performed one
2101 64-bit bswap, we need now to swap the writebacks. */
2102 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2103 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2105 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2111 /* ??? In theory we've handled all of the ASIs that are valid
2112 for ldda, and this should raise DAE_invalid_asi. However,
2113 real hardware allows others. This can be seen with e.g.
2114 FreeBSD 10.3 wrt ASI_IC_TAG. */
2116 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2117 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2118 TCGv_i64 tmp
= tcg_temp_new_i64();
2121 gen_helper_ld_asi(tmp
, tcg_env
, addr
, r_asi
, r_mop
);
2124 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2125 tcg_gen_extr_i64_tl(lo
, hi
, tmp
);
2127 tcg_gen_extr_i64_tl(hi
, lo
, tmp
);
2133 gen_store_gpr(dc
, rd
, hi
);
2134 gen_store_gpr(dc
, rd
+ 1, lo
);
2137 static void gen_stda_asi(DisasContext
*dc
, DisasASI
*da
, TCGv addr
, int rd
)
2139 TCGv hi
= gen_load_gpr(dc
, rd
);
2140 TCGv lo
= gen_load_gpr(dc
, rd
+ 1);
2146 case GET_ASI_DTWINX
:
2147 #ifdef TARGET_SPARC64
2149 MemOp mop
= (da
->memop
& MO_BSWAP
) | MO_128
| MO_ALIGN_16
;
2150 TCGv_i128 t
= tcg_temp_new_i128();
2153 * Note that LE twinx acts as if each 64-bit register result is
2154 * byte swapped. We perform one 128-bit LE store, so must swap
2155 * the order of the construction.
2157 if ((mop
& MO_BSWAP
) == MO_TE
) {
2158 tcg_gen_concat_i64_i128(t
, lo
, hi
);
2160 tcg_gen_concat_i64_i128(t
, hi
, lo
);
2162 tcg_gen_qemu_st_i128(t
, addr
, da
->mem_idx
, mop
);
2166 g_assert_not_reached();
2169 case GET_ASI_DIRECT
:
2171 TCGv_i64 t64
= tcg_temp_new_i64();
2173 /* Note that LE stda acts as if each 32-bit register result is
2174 byte swapped. We will perform one 64-bit LE store, so now
2175 we must swap the order of the construction. */
2176 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2177 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2179 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2181 tcg_gen_qemu_st_i64(t64
, addr
, da
->mem_idx
, da
->memop
| MO_ALIGN
);
2186 assert(TARGET_LONG_BITS
== 32);
2187 /* Store 32 bytes of T64 to ADDR. */
2188 /* ??? The original qemu code suggests 8-byte alignment, dropping
2189 the low bits, but the only place I can see this used is in the
2190 Linux kernel with 32 byte alignment, which would make more sense
2191 as a cacheline-style operation. */
2193 TCGv_i64 t64
= tcg_temp_new_i64();
2194 TCGv d_addr
= tcg_temp_new();
2195 TCGv eight
= tcg_constant_tl(8);
2198 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2199 tcg_gen_andi_tl(d_addr
, addr
, -8);
2200 for (i
= 0; i
< 32; i
+= 8) {
2201 tcg_gen_qemu_st_i64(t64
, d_addr
, da
->mem_idx
, da
->memop
);
2202 tcg_gen_add_tl(d_addr
, d_addr
, eight
);
2208 /* ??? In theory we've handled all of the ASIs that are valid
2209 for stda, and this should raise DAE_invalid_asi. */
2211 TCGv_i32 r_asi
= tcg_constant_i32(da
->asi
);
2212 TCGv_i32 r_mop
= tcg_constant_i32(da
->memop
);
2213 TCGv_i64 t64
= tcg_temp_new_i64();
2216 if ((da
->memop
& MO_BSWAP
) == MO_TE
) {
2217 tcg_gen_concat_tl_i64(t64
, lo
, hi
);
2219 tcg_gen_concat_tl_i64(t64
, hi
, lo
);
2223 gen_helper_st_asi(tcg_env
, addr
, t64
, r_asi
, r_mop
);
2229 static void gen_fmovs(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2231 #ifdef TARGET_SPARC64
2232 TCGv_i32 c32
, zero
, dst
, s1
, s2
;
2233 TCGv_i64 c64
= tcg_temp_new_i64();
2235 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2236 or fold the comparison down to 32 bits and use movcond_i32. Choose
2238 c32
= tcg_temp_new_i32();
2239 tcg_gen_setcondi_i64(cmp
->cond
, c64
, cmp
->c1
, cmp
->c2
);
2240 tcg_gen_extrl_i64_i32(c32
, c64
);
2242 s1
= gen_load_fpr_F(dc
, rs
);
2243 s2
= gen_load_fpr_F(dc
, rd
);
2244 dst
= gen_dest_fpr_F(dc
);
2245 zero
= tcg_constant_i32(0);
2247 tcg_gen_movcond_i32(TCG_COND_NE
, dst
, c32
, zero
, s1
, s2
);
2249 gen_store_fpr_F(dc
, rd
, dst
);
2251 qemu_build_not_reached();
2255 static void gen_fmovd(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2257 #ifdef TARGET_SPARC64
2258 TCGv_i64 dst
= gen_dest_fpr_D(dc
, rd
);
2259 tcg_gen_movcond_i64(cmp
->cond
, dst
, cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2260 gen_load_fpr_D(dc
, rs
),
2261 gen_load_fpr_D(dc
, rd
));
2262 gen_store_fpr_D(dc
, rd
, dst
);
2264 qemu_build_not_reached();
2268 static void gen_fmovq(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, int rs
)
2270 #ifdef TARGET_SPARC64
2271 int qd
= QFPREG(rd
);
2272 int qs
= QFPREG(rs
);
2273 TCGv c2
= tcg_constant_tl(cmp
->c2
);
2275 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2], cmp
->c1
, c2
,
2276 cpu_fpr
[qs
/ 2], cpu_fpr
[qd
/ 2]);
2277 tcg_gen_movcond_i64(cmp
->cond
, cpu_fpr
[qd
/ 2 + 1], cmp
->c1
, c2
,
2278 cpu_fpr
[qs
/ 2 + 1], cpu_fpr
[qd
/ 2 + 1]);
2280 gen_update_fprs_dirty(dc
, qd
);
2282 qemu_build_not_reached();
2286 #ifdef TARGET_SPARC64
2287 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr
)
2289 TCGv_i32 r_tl
= tcg_temp_new_i32();
2291 /* load env->tl into r_tl */
2292 tcg_gen_ld_i32(r_tl
, tcg_env
, offsetof(CPUSPARCState
, tl
));
2294 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2295 tcg_gen_andi_i32(r_tl
, r_tl
, MAXTL_MASK
);
2297 /* calculate offset to current trap state from env->ts, reuse r_tl */
2298 tcg_gen_muli_i32(r_tl
, r_tl
, sizeof (trap_state
));
2299 tcg_gen_addi_ptr(r_tsptr
, tcg_env
, offsetof(CPUSPARCState
, ts
));
2301 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2303 TCGv_ptr r_tl_tmp
= tcg_temp_new_ptr();
2304 tcg_gen_ext_i32_ptr(r_tl_tmp
, r_tl
);
2305 tcg_gen_add_ptr(r_tsptr
, r_tsptr
, r_tl_tmp
);
2310 static int extract_dfpreg(DisasContext
*dc
, int x
)
2315 static int extract_qfpreg(DisasContext
*dc
, int x
)
2320 /* Include the auto-generated decoder. */
2321 #include "decode-insns.c.inc"
2323 #define TRANS(NAME, AVAIL, FUNC, ...) \
2324 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2325 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2327 #define avail_ALL(C) true
2328 #ifdef TARGET_SPARC64
2329 # define avail_32(C) false
2330 # define avail_ASR17(C) false
2331 # define avail_CASA(C) true
2332 # define avail_DIV(C) true
2333 # define avail_MUL(C) true
2334 # define avail_POWERDOWN(C) false
2335 # define avail_64(C) true
2336 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2337 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2338 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2339 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2341 # define avail_32(C) true
2342 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2343 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2344 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2345 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2346 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2347 # define avail_64(C) false
2348 # define avail_GL(C) false
2349 # define avail_HYPV(C) false
2350 # define avail_VIS1(C) false
2351 # define avail_VIS2(C) false
2354 /* Default case for non jump instructions. */
2355 static bool advance_pc(DisasContext
*dc
)
2364 case DYNAMIC_PC_LOOKUP
:
2366 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2367 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2371 /* we can do a static jump */
2372 l1
= gen_new_label();
2373 tcg_gen_brcondi_tl(dc
->jump
.cond
, dc
->jump
.c1
, dc
->jump
.c2
, l1
);
2375 /* jump not taken */
2376 gen_goto_tb(dc
, 1, dc
->jump_pc
[1], dc
->jump_pc
[1] + 4);
2380 gen_goto_tb(dc
, 0, dc
->jump_pc
[0], dc
->jump_pc
[0] + 4);
2382 dc
->base
.is_jmp
= DISAS_NORETURN
;
2386 g_assert_not_reached();
2390 dc
->npc
= dc
->npc
+ 4;
2396 * Major opcodes 00 and 01 -- branches, call, and sethi
2399 static bool advance_jump_cond(DisasContext
*dc
, DisasCompare
*cmp
,
2400 bool annul
, int disp
)
2402 target_ulong dest
= address_mask_i(dc
, dc
->pc
+ disp
* 4);
2407 if (cmp
->cond
== TCG_COND_ALWAYS
) {
2418 if (cmp
->cond
== TCG_COND_NEVER
) {
2423 tcg_gen_addi_tl(cpu_pc
, cpu_pc
, 4);
2425 tcg_gen_addi_tl(cpu_npc
, cpu_pc
, 4);
2427 dc
->pc
= npc
+ (annul
? 4 : 0);
2428 dc
->npc
= dc
->pc
+ 4;
2437 TCGLabel
*l1
= gen_new_label();
2439 tcg_gen_brcondi_tl(tcg_invert_cond(cmp
->cond
), cmp
->c1
, cmp
->c2
, l1
);
2440 gen_goto_tb(dc
, 0, npc
, dest
);
2442 gen_goto_tb(dc
, 1, npc
+ 4, npc
+ 8);
2444 dc
->base
.is_jmp
= DISAS_NORETURN
;
2449 case DYNAMIC_PC_LOOKUP
:
2450 tcg_gen_mov_tl(cpu_pc
, cpu_npc
);
2451 tcg_gen_addi_tl(cpu_npc
, cpu_npc
, 4);
2452 tcg_gen_movcond_tl(cmp
->cond
, cpu_npc
,
2453 cmp
->c1
, tcg_constant_tl(cmp
->c2
),
2454 tcg_constant_tl(dest
), cpu_npc
);
2458 g_assert_not_reached();
2464 dc
->jump_pc
[0] = dest
;
2465 dc
->jump_pc
[1] = npc
+ 4;
2467 /* The condition for cpu_cond is always NE -- normalize. */
2468 if (cmp
->cond
== TCG_COND_NE
) {
2469 tcg_gen_xori_tl(cpu_cond
, cmp
->c1
, cmp
->c2
);
2471 tcg_gen_setcondi_tl(cmp
->cond
, cpu_cond
, cmp
->c1
, cmp
->c2
);
2473 dc
->cpu_cond_live
= true;
2479 static bool raise_priv(DisasContext
*dc
)
2481 gen_exception(dc
, TT_PRIV_INSN
);
2485 static bool raise_unimpfpop(DisasContext
*dc
)
2487 gen_op_fpexception_im(dc
, FSR_FTT_UNIMPFPOP
);
2491 static bool gen_trap_float128(DisasContext
*dc
)
2493 if (dc
->def
->features
& CPU_FEATURE_FLOAT128
) {
2496 return raise_unimpfpop(dc
);
2499 static bool do_bpcc(DisasContext
*dc
, arg_bcc
*a
)
2503 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
2504 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2507 TRANS(Bicc
, ALL
, do_bpcc
, a
)
2508 TRANS(BPcc
, 64, do_bpcc
, a
)
2510 static bool do_fbpfcc(DisasContext
*dc
, arg_bcc
*a
)
2514 if (gen_trap_ifnofpu(dc
)) {
2517 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
2518 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2521 TRANS(FBPfcc
, 64, do_fbpfcc
, a
)
2522 TRANS(FBfcc
, ALL
, do_fbpfcc
, a
)
2524 static bool trans_BPr(DisasContext
*dc
, arg_BPr
*a
)
2528 if (!avail_64(dc
)) {
2531 if (gen_tcg_cond_reg
[a
->cond
] == TCG_COND_NEVER
) {
2535 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
2536 return advance_jump_cond(dc
, &cmp
, a
->a
, a
->i
);
2539 static bool trans_CALL(DisasContext
*dc
, arg_CALL
*a
)
2541 target_long target
= address_mask_i(dc
, dc
->pc
+ a
->i
* 4);
2543 gen_store_gpr(dc
, 15, tcg_constant_tl(dc
->pc
));
2549 static bool trans_NCP(DisasContext
*dc
, arg_NCP
*a
)
2552 * For sparc32, always generate the no-coprocessor exception.
2553 * For sparc64, always generate illegal instruction.
2555 #ifdef TARGET_SPARC64
2558 gen_exception(dc
, TT_NCP_INSN
);
2563 static bool trans_SETHI(DisasContext
*dc
, arg_SETHI
*a
)
2565 /* Special-case %g0 because that's the canonical nop. */
2567 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl((uint32_t)a
->i
<< 10));
2569 return advance_pc(dc
);
2573 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2576 static bool do_tcc(DisasContext
*dc
, int cond
, int cc
,
2577 int rs1
, bool imm
, int rs2_or_imm
)
2579 int mask
= ((dc
->def
->features
& CPU_FEATURE_HYPV
) && supervisor(dc
)
2580 ? UA2005_HTRAP_MASK
: V8_TRAP_MASK
);
2587 return advance_pc(dc
);
2591 * Immediate traps are the most common case. Since this value is
2592 * live across the branch, it really pays to evaluate the constant.
2594 if (rs1
== 0 && (imm
|| rs2_or_imm
== 0)) {
2595 trap
= tcg_constant_i32((rs2_or_imm
& mask
) + TT_TRAP
);
2597 trap
= tcg_temp_new_i32();
2598 tcg_gen_trunc_tl_i32(trap
, gen_load_gpr(dc
, rs1
));
2600 tcg_gen_addi_i32(trap
, trap
, rs2_or_imm
);
2602 TCGv_i32 t2
= tcg_temp_new_i32();
2603 tcg_gen_trunc_tl_i32(t2
, gen_load_gpr(dc
, rs2_or_imm
));
2604 tcg_gen_add_i32(trap
, trap
, t2
);
2606 tcg_gen_andi_i32(trap
, trap
, mask
);
2607 tcg_gen_addi_i32(trap
, trap
, TT_TRAP
);
2615 gen_helper_raise_exception(tcg_env
, trap
);
2616 dc
->base
.is_jmp
= DISAS_NORETURN
;
2620 /* Conditional trap. */
2622 lab
= delay_exceptionv(dc
, trap
);
2623 gen_compare(&cmp
, cc
, cond
, dc
);
2624 tcg_gen_brcondi_tl(cmp
.cond
, cmp
.c1
, cmp
.c2
, lab
);
2626 return advance_pc(dc
);
2629 static bool trans_Tcc_r(DisasContext
*dc
, arg_Tcc_r
*a
)
2631 if (avail_32(dc
) && a
->cc
) {
2634 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, false, a
->rs2
);
2637 static bool trans_Tcc_i_v7(DisasContext
*dc
, arg_Tcc_i_v7
*a
)
2642 return do_tcc(dc
, a
->cond
, 0, a
->rs1
, true, a
->i
);
2645 static bool trans_Tcc_i_v9(DisasContext
*dc
, arg_Tcc_i_v9
*a
)
2650 return do_tcc(dc
, a
->cond
, a
->cc
, a
->rs1
, true, a
->i
);
2653 static bool trans_STBAR(DisasContext
*dc
, arg_STBAR
*a
)
2655 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
2656 return advance_pc(dc
);
2659 static bool trans_MEMBAR(DisasContext
*dc
, arg_MEMBAR
*a
)
2665 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2666 tcg_gen_mb(a
->mmask
| TCG_BAR_SC
);
2669 /* For #Sync, etc, end the TB to recognize interrupts. */
2670 dc
->base
.is_jmp
= DISAS_EXIT
;
2672 return advance_pc(dc
);
2675 static bool do_rd_special(DisasContext
*dc
, bool priv
, int rd
,
2676 TCGv (*func
)(DisasContext
*, TCGv
))
2679 return raise_priv(dc
);
2681 gen_store_gpr(dc
, rd
, func(dc
, gen_dest_gpr(dc
, rd
)));
2682 return advance_pc(dc
);
2685 static TCGv
do_rdy(DisasContext
*dc
, TCGv dst
)
2690 static bool trans_RDY(DisasContext
*dc
, arg_RDY
*a
)
2693 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2694 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2695 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2697 if (avail_64(dc
) && a
->rs1
!= 0) {
2700 return do_rd_special(dc
, true, a
->rd
, do_rdy
);
2703 static TCGv
do_rd_leon3_config(DisasContext
*dc
, TCGv dst
)
2708 * TODO: There are many more fields to be filled,
2709 * some of which are writable.
2711 val
= dc
->def
->nwindows
- 1; /* [4:0] NWIN */
2712 val
|= 1 << 8; /* [8] V8 */
2714 return tcg_constant_tl(val
);
2717 TRANS(RDASR17
, ASR17
, do_rd_special
, true, a
->rd
, do_rd_leon3_config
)
2719 static TCGv
do_rdccr(DisasContext
*dc
, TCGv dst
)
2721 gen_helper_rdccr(dst
, tcg_env
);
2725 TRANS(RDCCR
, 64, do_rd_special
, true, a
->rd
, do_rdccr
)
2727 static TCGv
do_rdasi(DisasContext
*dc
, TCGv dst
)
2729 #ifdef TARGET_SPARC64
2730 return tcg_constant_tl(dc
->asi
);
2732 qemu_build_not_reached();
2736 TRANS(RDASI
, 64, do_rd_special
, true, a
->rd
, do_rdasi
)
2738 static TCGv
do_rdtick(DisasContext
*dc
, TCGv dst
)
2740 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2742 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
2743 if (translator_io_start(&dc
->base
)) {
2744 dc
->base
.is_jmp
= DISAS_EXIT
;
2746 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2747 tcg_constant_i32(dc
->mem_idx
));
2751 /* TODO: non-priv access only allowed when enabled. */
2752 TRANS(RDTICK
, 64, do_rd_special
, true, a
->rd
, do_rdtick
)
2754 static TCGv
do_rdpc(DisasContext
*dc
, TCGv dst
)
2756 return tcg_constant_tl(address_mask_i(dc
, dc
->pc
));
2759 TRANS(RDPC
, 64, do_rd_special
, true, a
->rd
, do_rdpc
)
2761 static TCGv
do_rdfprs(DisasContext
*dc
, TCGv dst
)
2763 tcg_gen_ext_i32_tl(dst
, cpu_fprs
);
2767 TRANS(RDFPRS
, 64, do_rd_special
, true, a
->rd
, do_rdfprs
)
2769 static TCGv
do_rdgsr(DisasContext
*dc
, TCGv dst
)
2771 gen_trap_ifnofpu(dc
);
2775 TRANS(RDGSR
, 64, do_rd_special
, true, a
->rd
, do_rdgsr
)
2777 static TCGv
do_rdsoftint(DisasContext
*dc
, TCGv dst
)
2779 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(softint
));
2783 TRANS(RDSOFTINT
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdsoftint
)
2785 static TCGv
do_rdtick_cmpr(DisasContext
*dc
, TCGv dst
)
2787 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(tick_cmpr
));
2791 /* TODO: non-priv access only allowed when enabled. */
2792 TRANS(RDTICK_CMPR
, 64, do_rd_special
, true, a
->rd
, do_rdtick_cmpr
)
2794 static TCGv
do_rdstick(DisasContext
*dc
, TCGv dst
)
2796 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
2798 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
2799 if (translator_io_start(&dc
->base
)) {
2800 dc
->base
.is_jmp
= DISAS_EXIT
;
2802 gen_helper_tick_get_count(dst
, tcg_env
, r_tickptr
,
2803 tcg_constant_i32(dc
->mem_idx
));
2807 /* TODO: non-priv access only allowed when enabled. */
2808 TRANS(RDSTICK
, 64, do_rd_special
, true, a
->rd
, do_rdstick
)
2810 static TCGv
do_rdstick_cmpr(DisasContext
*dc
, TCGv dst
)
2812 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(stick_cmpr
));
2816 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2817 TRANS(RDSTICK_CMPR
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdstick_cmpr
)
2820 * UltraSPARC-T1 Strand status.
2821 * HYPV check maybe not enough, UA2005 & UA2007 describe
2822 * this ASR as impl. dep
2824 static TCGv
do_rdstrand_status(DisasContext
*dc
, TCGv dst
)
2826 return tcg_constant_tl(1);
2829 TRANS(RDSTRAND_STATUS
, HYPV
, do_rd_special
, true, a
->rd
, do_rdstrand_status
)
2831 static TCGv
do_rdpsr(DisasContext
*dc
, TCGv dst
)
2833 gen_helper_rdpsr(dst
, tcg_env
);
2837 TRANS(RDPSR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpsr
)
2839 static TCGv
do_rdhpstate(DisasContext
*dc
, TCGv dst
)
2841 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hpstate
));
2845 TRANS(RDHPR_hpstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhpstate
)
2847 static TCGv
do_rdhtstate(DisasContext
*dc
, TCGv dst
)
2849 TCGv_i32 tl
= tcg_temp_new_i32();
2850 TCGv_ptr tp
= tcg_temp_new_ptr();
2852 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
2853 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
2854 tcg_gen_shli_i32(tl
, tl
, 3);
2855 tcg_gen_ext_i32_ptr(tp
, tl
);
2856 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
2858 tcg_gen_ld_tl(dst
, tp
, env64_field_offsetof(htstate
));
2862 TRANS(RDHPR_htstate
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtstate
)
2864 static TCGv
do_rdhintp(DisasContext
*dc
, TCGv dst
)
2866 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hintp
));
2870 TRANS(RDHPR_hintp
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhintp
)
2872 static TCGv
do_rdhtba(DisasContext
*dc
, TCGv dst
)
2874 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(htba
));
2878 TRANS(RDHPR_htba
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhtba
)
2880 static TCGv
do_rdhver(DisasContext
*dc
, TCGv dst
)
2882 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hver
));
2886 TRANS(RDHPR_hver
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdhver
)
2888 static TCGv
do_rdhstick_cmpr(DisasContext
*dc
, TCGv dst
)
2890 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
2894 TRANS(RDHPR_hstick_cmpr
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
,
2897 static TCGv
do_rdwim(DisasContext
*dc
, TCGv dst
)
2899 tcg_gen_ld_tl(dst
, tcg_env
, env32_field_offsetof(wim
));
2903 TRANS(RDWIM
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwim
)
2905 static TCGv
do_rdtpc(DisasContext
*dc
, TCGv dst
)
2907 #ifdef TARGET_SPARC64
2908 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2910 gen_load_trap_state_at_tl(r_tsptr
);
2911 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tpc
));
2914 qemu_build_not_reached();
2918 TRANS(RDPR_tpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtpc
)
2920 static TCGv
do_rdtnpc(DisasContext
*dc
, TCGv dst
)
2922 #ifdef TARGET_SPARC64
2923 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2925 gen_load_trap_state_at_tl(r_tsptr
);
2926 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tnpc
));
2929 qemu_build_not_reached();
2933 TRANS(RDPR_tnpc
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtnpc
)
2935 static TCGv
do_rdtstate(DisasContext
*dc
, TCGv dst
)
2937 #ifdef TARGET_SPARC64
2938 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2940 gen_load_trap_state_at_tl(r_tsptr
);
2941 tcg_gen_ld_tl(dst
, r_tsptr
, offsetof(trap_state
, tstate
));
2944 qemu_build_not_reached();
2948 TRANS(RDPR_tstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtstate
)
2950 static TCGv
do_rdtt(DisasContext
*dc
, TCGv dst
)
2952 #ifdef TARGET_SPARC64
2953 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
2955 gen_load_trap_state_at_tl(r_tsptr
);
2956 tcg_gen_ld32s_tl(dst
, r_tsptr
, offsetof(trap_state
, tt
));
2959 qemu_build_not_reached();
2963 TRANS(RDPR_tt
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtt
)
2964 TRANS(RDPR_tick
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtick
)
2966 static TCGv
do_rdtba(DisasContext
*dc
, TCGv dst
)
2971 TRANS(RDTBR
, 32, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2972 TRANS(RDPR_tba
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtba
)
2974 static TCGv
do_rdpstate(DisasContext
*dc
, TCGv dst
)
2976 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(pstate
));
2980 TRANS(RDPR_pstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpstate
)
2982 static TCGv
do_rdtl(DisasContext
*dc
, TCGv dst
)
2984 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(tl
));
2988 TRANS(RDPR_tl
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdtl
)
2990 static TCGv
do_rdpil(DisasContext
*dc
, TCGv dst
)
2992 tcg_gen_ld32s_tl(dst
, tcg_env
, env_field_offsetof(psrpil
));
2996 TRANS(RDPR_pil
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdpil
)
2998 static TCGv
do_rdcwp(DisasContext
*dc
, TCGv dst
)
3000 gen_helper_rdcwp(dst
, tcg_env
);
3004 TRANS(RDPR_cwp
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcwp
)
3006 static TCGv
do_rdcansave(DisasContext
*dc
, TCGv dst
)
3008 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cansave
));
3012 TRANS(RDPR_cansave
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcansave
)
3014 static TCGv
do_rdcanrestore(DisasContext
*dc
, TCGv dst
)
3016 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(canrestore
));
3020 TRANS(RDPR_canrestore
, 64, do_rd_special
, supervisor(dc
), a
->rd
,
3023 static TCGv
do_rdcleanwin(DisasContext
*dc
, TCGv dst
)
3025 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(cleanwin
));
3029 TRANS(RDPR_cleanwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdcleanwin
)
3031 static TCGv
do_rdotherwin(DisasContext
*dc
, TCGv dst
)
3033 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(otherwin
));
3037 TRANS(RDPR_otherwin
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdotherwin
)
3039 static TCGv
do_rdwstate(DisasContext
*dc
, TCGv dst
)
3041 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(wstate
));
3045 TRANS(RDPR_wstate
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdwstate
)
3047 static TCGv
do_rdgl(DisasContext
*dc
, TCGv dst
)
3049 tcg_gen_ld32s_tl(dst
, tcg_env
, env64_field_offsetof(gl
));
3053 TRANS(RDPR_gl
, GL
, do_rd_special
, supervisor(dc
), a
->rd
, do_rdgl
)
3055 /* UA2005 strand status */
3056 static TCGv
do_rdssr(DisasContext
*dc
, TCGv dst
)
3058 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(ssr
));
3062 TRANS(RDPR_strand_status
, HYPV
, do_rd_special
, hypervisor(dc
), a
->rd
, do_rdssr
)
3064 static TCGv
do_rdver(DisasContext
*dc
, TCGv dst
)
3066 tcg_gen_ld_tl(dst
, tcg_env
, env64_field_offsetof(version
));
3070 TRANS(RDPR_ver
, 64, do_rd_special
, supervisor(dc
), a
->rd
, do_rdver
)
3072 static bool trans_FLUSHW(DisasContext
*dc
, arg_FLUSHW
*a
)
3075 gen_helper_flushw(tcg_env
);
3076 return advance_pc(dc
);
3081 static bool do_wr_special(DisasContext
*dc
, arg_r_r_ri
*a
, bool priv
,
3082 void (*func
)(DisasContext
*, TCGv
))
3086 /* For simplicity, we under-decoded the rs2 form. */
3087 if (!a
->imm
&& (a
->rs2_or_imm
& ~0x1f)) {
3091 return raise_priv(dc
);
3094 if (a
->rs1
== 0 && (a
->imm
|| a
->rs2_or_imm
== 0)) {
3095 src
= tcg_constant_tl(a
->rs2_or_imm
);
3097 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3098 if (a
->rs2_or_imm
== 0) {
3101 src
= tcg_temp_new();
3103 tcg_gen_xori_tl(src
, src1
, a
->rs2_or_imm
);
3105 tcg_gen_xor_tl(src
, src1
, gen_load_gpr(dc
, a
->rs2_or_imm
));
3110 return advance_pc(dc
);
3113 static void do_wry(DisasContext
*dc
, TCGv src
)
3115 tcg_gen_ext32u_tl(cpu_y
, src
);
3118 TRANS(WRY
, ALL
, do_wr_special
, a
, true, do_wry
)
3120 static void do_wrccr(DisasContext
*dc
, TCGv src
)
3122 gen_helper_wrccr(tcg_env
, src
);
3125 TRANS(WRCCR
, 64, do_wr_special
, a
, true, do_wrccr
)
3127 static void do_wrasi(DisasContext
*dc
, TCGv src
)
3129 TCGv tmp
= tcg_temp_new();
3131 tcg_gen_ext8u_tl(tmp
, src
);
3132 tcg_gen_st32_tl(tmp
, tcg_env
, env64_field_offsetof(asi
));
3133 /* End TB to notice changed ASI. */
3134 dc
->base
.is_jmp
= DISAS_EXIT
;
3137 TRANS(WRASI
, 64, do_wr_special
, a
, true, do_wrasi
)
3139 static void do_wrfprs(DisasContext
*dc
, TCGv src
)
3141 #ifdef TARGET_SPARC64
3142 tcg_gen_trunc_tl_i32(cpu_fprs
, src
);
3144 dc
->base
.is_jmp
= DISAS_EXIT
;
3146 qemu_build_not_reached();
3150 TRANS(WRFPRS
, 64, do_wr_special
, a
, true, do_wrfprs
)
3152 static void do_wrgsr(DisasContext
*dc
, TCGv src
)
3154 gen_trap_ifnofpu(dc
);
3155 tcg_gen_mov_tl(cpu_gsr
, src
);
3158 TRANS(WRGSR
, 64, do_wr_special
, a
, true, do_wrgsr
)
3160 static void do_wrsoftint_set(DisasContext
*dc
, TCGv src
)
3162 gen_helper_set_softint(tcg_env
, src
);
3165 TRANS(WRSOFTINT_SET
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_set
)
3167 static void do_wrsoftint_clr(DisasContext
*dc
, TCGv src
)
3169 gen_helper_clear_softint(tcg_env
, src
);
3172 TRANS(WRSOFTINT_CLR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint_clr
)
3174 static void do_wrsoftint(DisasContext
*dc
, TCGv src
)
3176 gen_helper_write_softint(tcg_env
, src
);
3179 TRANS(WRSOFTINT
, 64, do_wr_special
, a
, supervisor(dc
), do_wrsoftint
)
3181 static void do_wrtick_cmpr(DisasContext
*dc
, TCGv src
)
3183 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3185 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(tick_cmpr
));
3186 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3187 translator_io_start(&dc
->base
);
3188 gen_helper_tick_set_limit(r_tickptr
, src
);
3189 /* End TB to handle timer interrupt */
3190 dc
->base
.is_jmp
= DISAS_EXIT
;
3193 TRANS(WRTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick_cmpr
)
3195 static void do_wrstick(DisasContext
*dc
, TCGv src
)
3197 #ifdef TARGET_SPARC64
3198 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3200 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, offsetof(CPUSPARCState
, stick
));
3201 translator_io_start(&dc
->base
);
3202 gen_helper_tick_set_count(r_tickptr
, src
);
3203 /* End TB to handle timer interrupt */
3204 dc
->base
.is_jmp
= DISAS_EXIT
;
3206 qemu_build_not_reached();
3210 TRANS(WRSTICK
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick
)
3212 static void do_wrstick_cmpr(DisasContext
*dc
, TCGv src
)
3214 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3216 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(stick_cmpr
));
3217 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(stick
));
3218 translator_io_start(&dc
->base
);
3219 gen_helper_tick_set_limit(r_tickptr
, src
);
3220 /* End TB to handle timer interrupt */
3221 dc
->base
.is_jmp
= DISAS_EXIT
;
3224 TRANS(WRSTICK_CMPR
, 64, do_wr_special
, a
, supervisor(dc
), do_wrstick_cmpr
)
3226 static void do_wrpowerdown(DisasContext
*dc
, TCGv src
)
3230 gen_helper_power_down(tcg_env
);
3233 TRANS(WRPOWERDOWN
, POWERDOWN
, do_wr_special
, a
, supervisor(dc
), do_wrpowerdown
)
3235 static void do_wrpsr(DisasContext
*dc
, TCGv src
)
3237 gen_helper_wrpsr(tcg_env
, src
);
3238 dc
->base
.is_jmp
= DISAS_EXIT
;
3241 TRANS(WRPSR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrpsr
)
3243 static void do_wrwim(DisasContext
*dc
, TCGv src
)
3245 target_ulong mask
= MAKE_64BIT_MASK(0, dc
->def
->nwindows
);
3246 TCGv tmp
= tcg_temp_new();
3248 tcg_gen_andi_tl(tmp
, src
, mask
);
3249 tcg_gen_st_tl(tmp
, tcg_env
, env32_field_offsetof(wim
));
3252 TRANS(WRWIM
, 32, do_wr_special
, a
, supervisor(dc
), do_wrwim
)
3254 static void do_wrtpc(DisasContext
*dc
, TCGv src
)
3256 #ifdef TARGET_SPARC64
3257 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3259 gen_load_trap_state_at_tl(r_tsptr
);
3260 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tpc
));
3262 qemu_build_not_reached();
3266 TRANS(WRPR_tpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtpc
)
3268 static void do_wrtnpc(DisasContext
*dc
, TCGv src
)
3270 #ifdef TARGET_SPARC64
3271 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3273 gen_load_trap_state_at_tl(r_tsptr
);
3274 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tnpc
));
3276 qemu_build_not_reached();
3280 TRANS(WRPR_tnpc
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtnpc
)
3282 static void do_wrtstate(DisasContext
*dc
, TCGv src
)
3284 #ifdef TARGET_SPARC64
3285 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3287 gen_load_trap_state_at_tl(r_tsptr
);
3288 tcg_gen_st_tl(src
, r_tsptr
, offsetof(trap_state
, tstate
));
3290 qemu_build_not_reached();
3294 TRANS(WRPR_tstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtstate
)
3296 static void do_wrtt(DisasContext
*dc
, TCGv src
)
3298 #ifdef TARGET_SPARC64
3299 TCGv_ptr r_tsptr
= tcg_temp_new_ptr();
3301 gen_load_trap_state_at_tl(r_tsptr
);
3302 tcg_gen_st32_tl(src
, r_tsptr
, offsetof(trap_state
, tt
));
3304 qemu_build_not_reached();
3308 TRANS(WRPR_tt
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtt
)
3310 static void do_wrtick(DisasContext
*dc
, TCGv src
)
3312 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3314 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(tick
));
3315 translator_io_start(&dc
->base
);
3316 gen_helper_tick_set_count(r_tickptr
, src
);
3317 /* End TB to handle timer interrupt */
3318 dc
->base
.is_jmp
= DISAS_EXIT
;
3321 TRANS(WRPR_tick
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtick
)
3323 static void do_wrtba(DisasContext
*dc
, TCGv src
)
3325 tcg_gen_mov_tl(cpu_tbr
, src
);
3328 TRANS(WRPR_tba
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3330 static void do_wrpstate(DisasContext
*dc
, TCGv src
)
3333 if (translator_io_start(&dc
->base
)) {
3334 dc
->base
.is_jmp
= DISAS_EXIT
;
3336 gen_helper_wrpstate(tcg_env
, src
);
3337 dc
->npc
= DYNAMIC_PC
;
3340 TRANS(WRPR_pstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpstate
)
3342 static void do_wrtl(DisasContext
*dc
, TCGv src
)
3345 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(tl
));
3346 dc
->npc
= DYNAMIC_PC
;
3349 TRANS(WRPR_tl
, 64, do_wr_special
, a
, supervisor(dc
), do_wrtl
)
3351 static void do_wrpil(DisasContext
*dc
, TCGv src
)
3353 if (translator_io_start(&dc
->base
)) {
3354 dc
->base
.is_jmp
= DISAS_EXIT
;
3356 gen_helper_wrpil(tcg_env
, src
);
3359 TRANS(WRPR_pil
, 64, do_wr_special
, a
, supervisor(dc
), do_wrpil
)
3361 static void do_wrcwp(DisasContext
*dc
, TCGv src
)
3363 gen_helper_wrcwp(tcg_env
, src
);
3366 TRANS(WRPR_cwp
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcwp
)
3368 static void do_wrcansave(DisasContext
*dc
, TCGv src
)
3370 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cansave
));
3373 TRANS(WRPR_cansave
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcansave
)
3375 static void do_wrcanrestore(DisasContext
*dc
, TCGv src
)
3377 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(canrestore
));
3380 TRANS(WRPR_canrestore
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcanrestore
)
3382 static void do_wrcleanwin(DisasContext
*dc
, TCGv src
)
3384 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(cleanwin
));
3387 TRANS(WRPR_cleanwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrcleanwin
)
3389 static void do_wrotherwin(DisasContext
*dc
, TCGv src
)
3391 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(otherwin
));
3394 TRANS(WRPR_otherwin
, 64, do_wr_special
, a
, supervisor(dc
), do_wrotherwin
)
3396 static void do_wrwstate(DisasContext
*dc
, TCGv src
)
3398 tcg_gen_st32_tl(src
, tcg_env
, env64_field_offsetof(wstate
));
3401 TRANS(WRPR_wstate
, 64, do_wr_special
, a
, supervisor(dc
), do_wrwstate
)
3403 static void do_wrgl(DisasContext
*dc
, TCGv src
)
3405 gen_helper_wrgl(tcg_env
, src
);
3408 TRANS(WRPR_gl
, GL
, do_wr_special
, a
, supervisor(dc
), do_wrgl
)
3410 /* UA2005 strand status */
3411 static void do_wrssr(DisasContext
*dc
, TCGv src
)
3413 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(ssr
));
3416 TRANS(WRPR_strand_status
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrssr
)
3418 TRANS(WRTBR
, 32, do_wr_special
, a
, supervisor(dc
), do_wrtba
)
3420 static void do_wrhpstate(DisasContext
*dc
, TCGv src
)
3422 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hpstate
));
3423 dc
->base
.is_jmp
= DISAS_EXIT
;
3426 TRANS(WRHPR_hpstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhpstate
)
3428 static void do_wrhtstate(DisasContext
*dc
, TCGv src
)
3430 TCGv_i32 tl
= tcg_temp_new_i32();
3431 TCGv_ptr tp
= tcg_temp_new_ptr();
3433 tcg_gen_ld_i32(tl
, tcg_env
, env64_field_offsetof(tl
));
3434 tcg_gen_andi_i32(tl
, tl
, MAXTL_MASK
);
3435 tcg_gen_shli_i32(tl
, tl
, 3);
3436 tcg_gen_ext_i32_ptr(tp
, tl
);
3437 tcg_gen_add_ptr(tp
, tp
, tcg_env
);
3439 tcg_gen_st_tl(src
, tp
, env64_field_offsetof(htstate
));
3442 TRANS(WRHPR_htstate
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtstate
)
3444 static void do_wrhintp(DisasContext
*dc
, TCGv src
)
3446 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hintp
));
3449 TRANS(WRHPR_hintp
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhintp
)
3451 static void do_wrhtba(DisasContext
*dc
, TCGv src
)
3453 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(htba
));
3456 TRANS(WRHPR_htba
, HYPV
, do_wr_special
, a
, hypervisor(dc
), do_wrhtba
)
3458 static void do_wrhstick_cmpr(DisasContext
*dc
, TCGv src
)
3460 TCGv_ptr r_tickptr
= tcg_temp_new_ptr();
3462 tcg_gen_st_tl(src
, tcg_env
, env64_field_offsetof(hstick_cmpr
));
3463 tcg_gen_ld_ptr(r_tickptr
, tcg_env
, env64_field_offsetof(hstick
));
3464 translator_io_start(&dc
->base
);
3465 gen_helper_tick_set_limit(r_tickptr
, src
);
3466 /* End TB to handle timer interrupt */
3467 dc
->base
.is_jmp
= DISAS_EXIT
;
3470 TRANS(WRHPR_hstick_cmpr
, HYPV
, do_wr_special
, a
, hypervisor(dc
),
3473 static bool do_saved_restored(DisasContext
*dc
, bool saved
)
3475 if (!supervisor(dc
)) {
3476 return raise_priv(dc
);
3479 gen_helper_saved(tcg_env
);
3481 gen_helper_restored(tcg_env
);
3483 return advance_pc(dc
);
3486 TRANS(SAVED
, 64, do_saved_restored
, true)
3487 TRANS(RESTORED
, 64, do_saved_restored
, false)
3489 static bool trans_NOP(DisasContext
*dc
, arg_NOP
*a
)
3491 return advance_pc(dc
);
3495 * TODO: Need a feature bit for sparcv8.
3496 * In the meantime, treat all 32-bit cpus like sparcv7.
3498 TRANS(NOP_v7
, 32, trans_NOP
, a
)
3499 TRANS(NOP_v9
, 64, trans_NOP
, a
)
3501 static bool do_arith_int(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3502 void (*func
)(TCGv
, TCGv
, TCGv
),
3503 void (*funci
)(TCGv
, TCGv
, target_long
),
3508 /* For simplicity, we under-decoded the rs2 form. */
3509 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3516 dst
= gen_dest_gpr(dc
, a
->rd
);
3518 src1
= gen_load_gpr(dc
, a
->rs1
);
3520 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3522 funci(dst
, src1
, a
->rs2_or_imm
);
3524 func(dst
, src1
, tcg_constant_tl(a
->rs2_or_imm
));
3527 func(dst
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3531 if (TARGET_LONG_BITS
== 64) {
3532 tcg_gen_mov_tl(cpu_icc_Z
, cpu_cc_N
);
3533 tcg_gen_movi_tl(cpu_icc_C
, 0);
3535 tcg_gen_mov_tl(cpu_cc_Z
, cpu_cc_N
);
3536 tcg_gen_movi_tl(cpu_cc_C
, 0);
3537 tcg_gen_movi_tl(cpu_cc_V
, 0);
3540 gen_store_gpr(dc
, a
->rd
, dst
);
3541 return advance_pc(dc
);
3544 static bool do_arith(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3545 void (*func
)(TCGv
, TCGv
, TCGv
),
3546 void (*funci
)(TCGv
, TCGv
, target_long
),
3547 void (*func_cc
)(TCGv
, TCGv
, TCGv
))
3550 return do_arith_int(dc
, a
, func_cc
, NULL
, false);
3552 return do_arith_int(dc
, a
, func
, funci
, false);
3555 static bool do_logic(DisasContext
*dc
, arg_r_r_ri_cc
*a
,
3556 void (*func
)(TCGv
, TCGv
, TCGv
),
3557 void (*funci
)(TCGv
, TCGv
, target_long
))
3559 return do_arith_int(dc
, a
, func
, funci
, a
->cc
);
3562 TRANS(ADD
, ALL
, do_arith
, a
, tcg_gen_add_tl
, tcg_gen_addi_tl
, gen_op_addcc
)
3563 TRANS(SUB
, ALL
, do_arith
, a
, tcg_gen_sub_tl
, tcg_gen_subi_tl
, gen_op_subcc
)
3564 TRANS(ADDC
, ALL
, do_arith
, a
, gen_op_addc
, NULL
, gen_op_addccc
)
3565 TRANS(SUBC
, ALL
, do_arith
, a
, gen_op_subc
, NULL
, gen_op_subccc
)
3567 TRANS(TADDcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcc
)
3568 TRANS(TSUBcc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcc
)
3569 TRANS(TADDccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_taddcctv
)
3570 TRANS(TSUBccTV
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_tsubcctv
)
3572 TRANS(AND
, ALL
, do_logic
, a
, tcg_gen_and_tl
, tcg_gen_andi_tl
)
3573 TRANS(XOR
, ALL
, do_logic
, a
, tcg_gen_xor_tl
, tcg_gen_xori_tl
)
3574 TRANS(ANDN
, ALL
, do_logic
, a
, tcg_gen_andc_tl
, NULL
)
3575 TRANS(ORN
, ALL
, do_logic
, a
, tcg_gen_orc_tl
, NULL
)
3576 TRANS(XORN
, ALL
, do_logic
, a
, tcg_gen_eqv_tl
, NULL
)
3578 TRANS(MULX
, 64, do_arith
, a
, tcg_gen_mul_tl
, tcg_gen_muli_tl
, NULL
)
3579 TRANS(UMUL
, MUL
, do_logic
, a
, gen_op_umul
, NULL
)
3580 TRANS(SMUL
, MUL
, do_logic
, a
, gen_op_smul
, NULL
)
3581 TRANS(MULScc
, ALL
, do_arith
, a
, NULL
, NULL
, gen_op_mulscc
)
3583 TRANS(UDIVX
, 64, do_arith
, a
, gen_op_udivx
, NULL
, NULL
)
3584 TRANS(SDIVX
, 64, do_arith
, a
, gen_op_sdivx
, NULL
, NULL
)
3585 TRANS(UDIV
, DIV
, do_arith
, a
, gen_op_udiv
, NULL
, gen_op_udivcc
)
3586 TRANS(SDIV
, DIV
, do_arith
, a
, gen_op_sdiv
, NULL
, gen_op_sdivcc
)
3588 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3589 TRANS(POPC
, 64, do_arith
, a
, gen_op_popc
, NULL
, NULL
)
3591 static bool trans_OR(DisasContext
*dc
, arg_r_r_ri_cc
*a
)
3593 /* OR with %g0 is the canonical alias for MOV. */
3594 if (!a
->cc
&& a
->rs1
== 0) {
3595 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3596 gen_store_gpr(dc
, a
->rd
, tcg_constant_tl(a
->rs2_or_imm
));
3597 } else if (a
->rs2_or_imm
& ~0x1f) {
3598 /* For simplicity, we under-decoded the rs2 form. */
3601 gen_store_gpr(dc
, a
->rd
, cpu_regs
[a
->rs2_or_imm
]);
3603 return advance_pc(dc
);
3605 return do_logic(dc
, a
, tcg_gen_or_tl
, tcg_gen_ori_tl
);
3608 static bool gen_edge(DisasContext
*dc
, arg_r_r_r
*a
,
3609 int width
, bool cc
, bool left
)
3611 TCGv dst
, s1
, s2
, lo1
, lo2
;
3612 uint64_t amask
, tabl
, tabr
;
3613 int shift
, imask
, omask
;
3615 dst
= gen_dest_gpr(dc
, a
->rd
);
3616 s1
= gen_load_gpr(dc
, a
->rs1
);
3617 s2
= gen_load_gpr(dc
, a
->rs2
);
3620 gen_op_subcc(cpu_cc_N
, s1
, s2
);
3624 * Theory of operation: there are two tables, left and right (not to
3625 * be confused with the left and right versions of the opcode). These
3626 * are indexed by the low 3 bits of the inputs. To make things "easy",
3627 * these tables are loaded into two constants, TABL and TABR below.
3628 * The operation index = (input & imask) << shift calculates the index
3629 * into the constant, while val = (table >> index) & omask calculates
3630 * the value we're looking for.
3638 tabl
= 0x80c0e0f0f8fcfeffULL
;
3639 tabr
= 0xff7f3f1f0f070301ULL
;
3641 tabl
= 0x0103070f1f3f7fffULL
;
3642 tabr
= 0xfffefcf8f0e0c080ULL
;
3662 tabl
= (2 << 2) | 3;
3663 tabr
= (3 << 2) | 1;
3665 tabl
= (1 << 2) | 3;
3666 tabr
= (3 << 2) | 2;
3673 lo1
= tcg_temp_new();
3674 lo2
= tcg_temp_new();
3675 tcg_gen_andi_tl(lo1
, s1
, imask
);
3676 tcg_gen_andi_tl(lo2
, s2
, imask
);
3677 tcg_gen_shli_tl(lo1
, lo1
, shift
);
3678 tcg_gen_shli_tl(lo2
, lo2
, shift
);
3680 tcg_gen_shr_tl(lo1
, tcg_constant_tl(tabl
), lo1
);
3681 tcg_gen_shr_tl(lo2
, tcg_constant_tl(tabr
), lo2
);
3682 tcg_gen_andi_tl(lo1
, lo1
, omask
);
3683 tcg_gen_andi_tl(lo2
, lo2
, omask
);
3685 amask
= address_mask_i(dc
, -8);
3686 tcg_gen_andi_tl(s1
, s1
, amask
);
3687 tcg_gen_andi_tl(s2
, s2
, amask
);
3689 /* Compute dst = (s1 == s2 ? lo1 : lo1 & lo2). */
3690 tcg_gen_and_tl(lo2
, lo2
, lo1
);
3691 tcg_gen_movcond_tl(TCG_COND_EQ
, dst
, s1
, s2
, lo1
, lo2
);
3693 gen_store_gpr(dc
, a
->rd
, dst
);
3694 return advance_pc(dc
);
3697 TRANS(EDGE8cc
, VIS1
, gen_edge
, a
, 8, 1, 0)
3698 TRANS(EDGE8Lcc
, VIS1
, gen_edge
, a
, 8, 1, 1)
3699 TRANS(EDGE16cc
, VIS1
, gen_edge
, a
, 16, 1, 0)
3700 TRANS(EDGE16Lcc
, VIS1
, gen_edge
, a
, 16, 1, 1)
3701 TRANS(EDGE32cc
, VIS1
, gen_edge
, a
, 32, 1, 0)
3702 TRANS(EDGE32Lcc
, VIS1
, gen_edge
, a
, 32, 1, 1)
3704 TRANS(EDGE8N
, VIS2
, gen_edge
, a
, 8, 0, 0)
3705 TRANS(EDGE8LN
, VIS2
, gen_edge
, a
, 8, 0, 1)
3706 TRANS(EDGE16N
, VIS2
, gen_edge
, a
, 16, 0, 0)
3707 TRANS(EDGE16LN
, VIS2
, gen_edge
, a
, 16, 0, 1)
3708 TRANS(EDGE32N
, VIS2
, gen_edge
, a
, 32, 0, 0)
3709 TRANS(EDGE32LN
, VIS2
, gen_edge
, a
, 32, 0, 1)
3711 static bool do_rrr(DisasContext
*dc
, arg_r_r_r
*a
,
3712 void (*func
)(TCGv
, TCGv
, TCGv
))
3714 TCGv dst
= gen_dest_gpr(dc
, a
->rd
);
3715 TCGv src1
= gen_load_gpr(dc
, a
->rs1
);
3716 TCGv src2
= gen_load_gpr(dc
, a
->rs2
);
3718 func(dst
, src1
, src2
);
3719 gen_store_gpr(dc
, a
->rd
, dst
);
3720 return advance_pc(dc
);
3723 TRANS(ARRAY8
, VIS1
, do_rrr
, a
, gen_helper_array8
)
3724 TRANS(ARRAY16
, VIS1
, do_rrr
, a
, gen_op_array16
)
3725 TRANS(ARRAY32
, VIS1
, do_rrr
, a
, gen_op_array32
)
3727 static void gen_op_alignaddr(TCGv dst
, TCGv s1
, TCGv s2
)
3729 #ifdef TARGET_SPARC64
3730 TCGv tmp
= tcg_temp_new();
3732 tcg_gen_add_tl(tmp
, s1
, s2
);
3733 tcg_gen_andi_tl(dst
, tmp
, -8);
3734 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3736 g_assert_not_reached();
3740 static void gen_op_alignaddrl(TCGv dst
, TCGv s1
, TCGv s2
)
3742 #ifdef TARGET_SPARC64
3743 TCGv tmp
= tcg_temp_new();
3745 tcg_gen_add_tl(tmp
, s1
, s2
);
3746 tcg_gen_andi_tl(dst
, tmp
, -8);
3747 tcg_gen_neg_tl(tmp
, tmp
);
3748 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, tmp
, 0, 3);
3750 g_assert_not_reached();
3754 TRANS(ALIGNADDR
, VIS1
, do_rrr
, a
, gen_op_alignaddr
)
3755 TRANS(ALIGNADDRL
, VIS1
, do_rrr
, a
, gen_op_alignaddrl
)
3757 static void gen_op_bmask(TCGv dst
, TCGv s1
, TCGv s2
)
3759 #ifdef TARGET_SPARC64
3760 tcg_gen_add_tl(dst
, s1
, s2
);
3761 tcg_gen_deposit_tl(cpu_gsr
, cpu_gsr
, dst
, 32, 32);
3763 g_assert_not_reached();
3767 TRANS(BMASK
, VIS2
, do_rrr
, a
, gen_op_bmask
)
3769 static bool do_shift_r(DisasContext
*dc
, arg_shiftr
*a
, bool l
, bool u
)
3771 TCGv dst
, src1
, src2
;
3773 /* Reject 64-bit shifts for sparc32. */
3774 if (avail_32(dc
) && a
->x
) {
3778 src2
= tcg_temp_new();
3779 tcg_gen_andi_tl(src2
, gen_load_gpr(dc
, a
->rs2
), a
->x
? 63 : 31);
3780 src1
= gen_load_gpr(dc
, a
->rs1
);
3781 dst
= gen_dest_gpr(dc
, a
->rd
);
3784 tcg_gen_shl_tl(dst
, src1
, src2
);
3786 tcg_gen_ext32u_tl(dst
, dst
);
3790 tcg_gen_ext32u_tl(dst
, src1
);
3793 tcg_gen_shr_tl(dst
, src1
, src2
);
3796 tcg_gen_ext32s_tl(dst
, src1
);
3799 tcg_gen_sar_tl(dst
, src1
, src2
);
3801 gen_store_gpr(dc
, a
->rd
, dst
);
3802 return advance_pc(dc
);
3805 TRANS(SLL_r
, ALL
, do_shift_r
, a
, true, true)
3806 TRANS(SRL_r
, ALL
, do_shift_r
, a
, false, true)
3807 TRANS(SRA_r
, ALL
, do_shift_r
, a
, false, false)
3809 static bool do_shift_i(DisasContext
*dc
, arg_shifti
*a
, bool l
, bool u
)
3813 /* Reject 64-bit shifts for sparc32. */
3814 if (avail_32(dc
) && (a
->x
|| a
->i
>= 32)) {
3818 src1
= gen_load_gpr(dc
, a
->rs1
);
3819 dst
= gen_dest_gpr(dc
, a
->rd
);
3821 if (avail_32(dc
) || a
->x
) {
3823 tcg_gen_shli_tl(dst
, src1
, a
->i
);
3825 tcg_gen_shri_tl(dst
, src1
, a
->i
);
3827 tcg_gen_sari_tl(dst
, src1
, a
->i
);
3831 tcg_gen_deposit_z_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3833 tcg_gen_extract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3835 tcg_gen_sextract_tl(dst
, src1
, a
->i
, 32 - a
->i
);
3838 gen_store_gpr(dc
, a
->rd
, dst
);
3839 return advance_pc(dc
);
3842 TRANS(SLL_i
, ALL
, do_shift_i
, a
, true, true)
3843 TRANS(SRL_i
, ALL
, do_shift_i
, a
, false, true)
3844 TRANS(SRA_i
, ALL
, do_shift_i
, a
, false, false)
3846 static TCGv
gen_rs2_or_imm(DisasContext
*dc
, bool imm
, int rs2_or_imm
)
3848 /* For simplicity, we under-decoded the rs2 form. */
3849 if (!imm
&& rs2_or_imm
& ~0x1f) {
3852 if (imm
|| rs2_or_imm
== 0) {
3853 return tcg_constant_tl(rs2_or_imm
);
3855 return cpu_regs
[rs2_or_imm
];
3859 static bool do_mov_cond(DisasContext
*dc
, DisasCompare
*cmp
, int rd
, TCGv src2
)
3861 TCGv dst
= gen_load_gpr(dc
, rd
);
3862 TCGv c2
= tcg_constant_tl(cmp
->c2
);
3864 tcg_gen_movcond_tl(cmp
->cond
, dst
, cmp
->c1
, c2
, src2
, dst
);
3865 gen_store_gpr(dc
, rd
, dst
);
3866 return advance_pc(dc
);
3869 static bool trans_MOVcc(DisasContext
*dc
, arg_MOVcc
*a
)
3871 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3877 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
3878 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
3881 static bool trans_MOVfcc(DisasContext
*dc
, arg_MOVfcc
*a
)
3883 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3889 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
3890 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
3893 static bool trans_MOVR(DisasContext
*dc
, arg_MOVR
*a
)
3895 TCGv src2
= gen_rs2_or_imm(dc
, a
->imm
, a
->rs2_or_imm
);
3901 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
3902 return do_mov_cond(dc
, &cmp
, a
->rd
, src2
);
3905 static bool do_add_special(DisasContext
*dc
, arg_r_r_ri
*a
,
3906 bool (*func
)(DisasContext
*dc
, int rd
, TCGv src
))
3910 /* For simplicity, we under-decoded the rs2 form. */
3911 if (!a
->imm
&& a
->rs2_or_imm
& ~0x1f) {
3916 * Always load the sum into a new temporary.
3917 * This is required to capture the value across a window change,
3918 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
3920 sum
= tcg_temp_new();
3921 src1
= gen_load_gpr(dc
, a
->rs1
);
3922 if (a
->imm
|| a
->rs2_or_imm
== 0) {
3923 tcg_gen_addi_tl(sum
, src1
, a
->rs2_or_imm
);
3925 tcg_gen_add_tl(sum
, src1
, cpu_regs
[a
->rs2_or_imm
]);
3927 return func(dc
, a
->rd
, sum
);
3930 static bool do_jmpl(DisasContext
*dc
, int rd
, TCGv src
)
3933 * Preserve pc across advance, so that we can delay
3934 * the writeback to rd until after src is consumed.
3936 target_ulong cur_pc
= dc
->pc
;
3938 gen_check_align(dc
, src
, 3);
3941 tcg_gen_mov_tl(cpu_npc
, src
);
3942 gen_address_mask(dc
, cpu_npc
);
3943 gen_store_gpr(dc
, rd
, tcg_constant_tl(cur_pc
));
3945 dc
->npc
= DYNAMIC_PC_LOOKUP
;
3949 TRANS(JMPL
, ALL
, do_add_special
, a
, do_jmpl
)
3951 static bool do_rett(DisasContext
*dc
, int rd
, TCGv src
)
3953 if (!supervisor(dc
)) {
3954 return raise_priv(dc
);
3957 gen_check_align(dc
, src
, 3);
3960 tcg_gen_mov_tl(cpu_npc
, src
);
3961 gen_helper_rett(tcg_env
);
3963 dc
->npc
= DYNAMIC_PC
;
3967 TRANS(RETT
, 32, do_add_special
, a
, do_rett
)
3969 static bool do_return(DisasContext
*dc
, int rd
, TCGv src
)
3971 gen_check_align(dc
, src
, 3);
3974 tcg_gen_mov_tl(cpu_npc
, src
);
3975 gen_address_mask(dc
, cpu_npc
);
3977 gen_helper_restore(tcg_env
);
3978 dc
->npc
= DYNAMIC_PC_LOOKUP
;
3982 TRANS(RETURN
, 64, do_add_special
, a
, do_return
)
3984 static bool do_save(DisasContext
*dc
, int rd
, TCGv src
)
3986 gen_helper_save(tcg_env
);
3987 gen_store_gpr(dc
, rd
, src
);
3988 return advance_pc(dc
);
3991 TRANS(SAVE
, ALL
, do_add_special
, a
, do_save
)
3993 static bool do_restore(DisasContext
*dc
, int rd
, TCGv src
)
3995 gen_helper_restore(tcg_env
);
3996 gen_store_gpr(dc
, rd
, src
);
3997 return advance_pc(dc
);
4000 TRANS(RESTORE
, ALL
, do_add_special
, a
, do_restore
)
4002 static bool do_done_retry(DisasContext
*dc
, bool done
)
4004 if (!supervisor(dc
)) {
4005 return raise_priv(dc
);
4007 dc
->npc
= DYNAMIC_PC
;
4008 dc
->pc
= DYNAMIC_PC
;
4009 translator_io_start(&dc
->base
);
4011 gen_helper_done(tcg_env
);
4013 gen_helper_retry(tcg_env
);
4018 TRANS(DONE
, 64, do_done_retry
, true)
4019 TRANS(RETRY
, 64, do_done_retry
, false)
4022 * Major opcode 11 -- load and store instructions
4025 static TCGv
gen_ldst_addr(DisasContext
*dc
, int rs1
, bool imm
, int rs2_or_imm
)
4027 TCGv addr
, tmp
= NULL
;
4029 /* For simplicity, we under-decoded the rs2 form. */
4030 if (!imm
&& rs2_or_imm
& ~0x1f) {
4034 addr
= gen_load_gpr(dc
, rs1
);
4036 tmp
= tcg_temp_new();
4038 tcg_gen_addi_tl(tmp
, addr
, rs2_or_imm
);
4040 tcg_gen_add_tl(tmp
, addr
, cpu_regs
[rs2_or_imm
]);
4046 tmp
= tcg_temp_new();
4048 tcg_gen_ext32u_tl(tmp
, addr
);
4054 static bool do_ld_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4056 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4062 da
= resolve_asi(dc
, a
->asi
, mop
);
4064 reg
= gen_dest_gpr(dc
, a
->rd
);
4065 gen_ld_asi(dc
, &da
, reg
, addr
);
4066 gen_store_gpr(dc
, a
->rd
, reg
);
4067 return advance_pc(dc
);
4070 TRANS(LDUW
, ALL
, do_ld_gpr
, a
, MO_TEUL
)
4071 TRANS(LDUB
, ALL
, do_ld_gpr
, a
, MO_UB
)
4072 TRANS(LDUH
, ALL
, do_ld_gpr
, a
, MO_TEUW
)
4073 TRANS(LDSB
, ALL
, do_ld_gpr
, a
, MO_SB
)
4074 TRANS(LDSH
, ALL
, do_ld_gpr
, a
, MO_TESW
)
4075 TRANS(LDSW
, 64, do_ld_gpr
, a
, MO_TESL
)
4076 TRANS(LDX
, 64, do_ld_gpr
, a
, MO_TEUQ
)
4078 static bool do_st_gpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4080 TCGv reg
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4086 da
= resolve_asi(dc
, a
->asi
, mop
);
4088 reg
= gen_load_gpr(dc
, a
->rd
);
4089 gen_st_asi(dc
, &da
, reg
, addr
);
4090 return advance_pc(dc
);
4093 TRANS(STW
, ALL
, do_st_gpr
, a
, MO_TEUL
)
4094 TRANS(STB
, ALL
, do_st_gpr
, a
, MO_UB
)
4095 TRANS(STH
, ALL
, do_st_gpr
, a
, MO_TEUW
)
4096 TRANS(STX
, 64, do_st_gpr
, a
, MO_TEUQ
)
4098 static bool trans_LDD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4106 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4110 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4111 gen_ldda_asi(dc
, &da
, addr
, a
->rd
);
4112 return advance_pc(dc
);
4115 static bool trans_STD(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4123 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4127 da
= resolve_asi(dc
, a
->asi
, MO_TEUQ
);
4128 gen_stda_asi(dc
, &da
, addr
, a
->rd
);
4129 return advance_pc(dc
);
4132 static bool trans_LDSTUB(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4137 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4141 da
= resolve_asi(dc
, a
->asi
, MO_UB
);
4143 reg
= gen_dest_gpr(dc
, a
->rd
);
4144 gen_ldstub_asi(dc
, &da
, reg
, addr
);
4145 gen_store_gpr(dc
, a
->rd
, reg
);
4146 return advance_pc(dc
);
4149 static bool trans_SWAP(DisasContext
*dc
, arg_r_r_ri_asi
*a
)
4151 TCGv addr
, dst
, src
;
4154 addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4158 da
= resolve_asi(dc
, a
->asi
, MO_TEUL
);
4160 dst
= gen_dest_gpr(dc
, a
->rd
);
4161 src
= gen_load_gpr(dc
, a
->rd
);
4162 gen_swap_asi(dc
, &da
, dst
, src
, addr
);
4163 gen_store_gpr(dc
, a
->rd
, dst
);
4164 return advance_pc(dc
);
4167 static bool do_casa(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp mop
)
4172 addr
= gen_ldst_addr(dc
, a
->rs1
, true, 0);
4176 da
= resolve_asi(dc
, a
->asi
, mop
);
4178 o
= gen_dest_gpr(dc
, a
->rd
);
4179 n
= gen_load_gpr(dc
, a
->rd
);
4180 c
= gen_load_gpr(dc
, a
->rs2_or_imm
);
4181 gen_cas_asi(dc
, &da
, o
, n
, c
, addr
);
4182 gen_store_gpr(dc
, a
->rd
, o
);
4183 return advance_pc(dc
);
4186 TRANS(CASA
, CASA
, do_casa
, a
, MO_TEUL
)
4187 TRANS(CASXA
, 64, do_casa
, a
, MO_TEUQ
)
4189 static bool do_ld_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4191 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4197 if (gen_trap_ifnofpu(dc
)) {
4200 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4203 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4204 gen_ldf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4205 gen_update_fprs_dirty(dc
, a
->rd
);
4206 return advance_pc(dc
);
4209 TRANS(LDF
, ALL
, do_ld_fpr
, a
, MO_32
)
4210 TRANS(LDDF
, ALL
, do_ld_fpr
, a
, MO_64
)
4211 TRANS(LDQF
, ALL
, do_ld_fpr
, a
, MO_128
)
4213 TRANS(LDFA
, 64, do_ld_fpr
, a
, MO_32
)
4214 TRANS(LDDFA
, 64, do_ld_fpr
, a
, MO_64
)
4215 TRANS(LDQFA
, 64, do_ld_fpr
, a
, MO_128
)
4217 static bool do_st_fpr(DisasContext
*dc
, arg_r_r_ri_asi
*a
, MemOp sz
)
4219 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4225 if (gen_trap_ifnofpu(dc
)) {
4228 if (sz
== MO_128
&& gen_trap_float128(dc
)) {
4231 da
= resolve_asi(dc
, a
->asi
, MO_TE
| sz
);
4232 gen_stf_asi(dc
, &da
, sz
, addr
, a
->rd
);
4233 return advance_pc(dc
);
4236 TRANS(STF
, ALL
, do_st_fpr
, a
, MO_32
)
4237 TRANS(STDF
, ALL
, do_st_fpr
, a
, MO_64
)
4238 TRANS(STQF
, ALL
, do_st_fpr
, a
, MO_128
)
4240 TRANS(STFA
, 64, do_st_fpr
, a
, MO_32
)
4241 TRANS(STDFA
, 64, do_st_fpr
, a
, MO_64
)
4242 TRANS(STQFA
, 64, do_st_fpr
, a
, MO_128
)
4244 static bool trans_STDFQ(DisasContext
*dc
, arg_STDFQ
*a
)
4246 if (!avail_32(dc
)) {
4249 if (!supervisor(dc
)) {
4250 return raise_priv(dc
);
4252 if (gen_trap_ifnofpu(dc
)) {
4255 gen_op_fpexception_im(dc
, FSR_FTT_SEQ_ERROR
);
4259 static bool do_ldfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
,
4260 target_ulong new_mask
, target_ulong old_mask
)
4262 TCGv tmp
, addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4266 if (gen_trap_ifnofpu(dc
)) {
4269 tmp
= tcg_temp_new();
4270 tcg_gen_qemu_ld_tl(tmp
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4271 tcg_gen_andi_tl(tmp
, tmp
, new_mask
);
4272 tcg_gen_andi_tl(cpu_fsr
, cpu_fsr
, old_mask
);
4273 tcg_gen_or_tl(cpu_fsr
, cpu_fsr
, tmp
);
4274 gen_helper_set_fsr(tcg_env
, cpu_fsr
);
4275 return advance_pc(dc
);
4278 TRANS(LDFSR
, ALL
, do_ldfsr
, a
, MO_TEUL
, FSR_LDFSR_MASK
, FSR_LDFSR_OLDMASK
)
4279 TRANS(LDXFSR
, 64, do_ldfsr
, a
, MO_TEUQ
, FSR_LDXFSR_MASK
, FSR_LDXFSR_OLDMASK
)
4281 static bool do_stfsr(DisasContext
*dc
, arg_r_r_ri
*a
, MemOp mop
)
4283 TCGv addr
= gen_ldst_addr(dc
, a
->rs1
, a
->imm
, a
->rs2_or_imm
);
4287 if (gen_trap_ifnofpu(dc
)) {
4290 tcg_gen_qemu_st_tl(cpu_fsr
, addr
, dc
->mem_idx
, mop
| MO_ALIGN
);
4291 return advance_pc(dc
);
4294 TRANS(STFSR
, ALL
, do_stfsr
, a
, MO_TEUL
)
4295 TRANS(STXFSR
, 64, do_stfsr
, a
, MO_TEUQ
)
4297 static bool do_fc(DisasContext
*dc
, int rd
, bool c
)
4301 if (gen_trap_ifnofpu(dc
)) {
4306 mask
= MAKE_64BIT_MASK(0, 32);
4308 mask
= MAKE_64BIT_MASK(32, 32);
4311 tcg_gen_ori_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], mask
);
4313 tcg_gen_andi_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rd
/ 2], ~mask
);
4315 gen_update_fprs_dirty(dc
, rd
);
4316 return advance_pc(dc
);
4319 TRANS(FZEROs
, VIS1
, do_fc
, a
->rd
, 0)
4320 TRANS(FONEs
, VIS1
, do_fc
, a
->rd
, 1)
4322 static bool do_dc(DisasContext
*dc
, int rd
, int64_t c
)
4324 if (gen_trap_ifnofpu(dc
)) {
4328 tcg_gen_movi_i64(cpu_fpr
[rd
/ 2], c
);
4329 gen_update_fprs_dirty(dc
, rd
);
4330 return advance_pc(dc
);
4333 TRANS(FZEROd
, VIS1
, do_dc
, a
->rd
, 0)
4334 TRANS(FONEd
, VIS1
, do_dc
, a
->rd
, -1)
4336 static bool do_ff(DisasContext
*dc
, arg_r_r
*a
,
4337 void (*func
)(TCGv_i32
, TCGv_i32
))
4341 if (gen_trap_ifnofpu(dc
)) {
4345 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4347 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4348 return advance_pc(dc
);
4351 TRANS(FMOVs
, ALL
, do_ff
, a
, gen_op_fmovs
)
4352 TRANS(FNEGs
, ALL
, do_ff
, a
, gen_op_fnegs
)
4353 TRANS(FABSs
, ALL
, do_ff
, a
, gen_op_fabss
)
4354 TRANS(FSRCs
, VIS1
, do_ff
, a
, tcg_gen_mov_i32
)
4355 TRANS(FNOTs
, VIS1
, do_ff
, a
, tcg_gen_not_i32
)
4357 static bool do_fd(DisasContext
*dc
, arg_r_r
*a
,
4358 void (*func
)(TCGv_i32
, TCGv_i64
))
4363 if (gen_trap_ifnofpu(dc
)) {
4367 dst
= gen_dest_fpr_F(dc
);
4368 src
= gen_load_fpr_D(dc
, a
->rs
);
4370 gen_store_fpr_F(dc
, a
->rd
, dst
);
4371 return advance_pc(dc
);
4374 TRANS(FPACK16
, VIS1
, do_fd
, a
, gen_op_fpack16
)
4375 TRANS(FPACKFIX
, VIS1
, do_fd
, a
, gen_op_fpackfix
)
4377 static bool do_env_ff(DisasContext
*dc
, arg_r_r
*a
,
4378 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
4382 if (gen_trap_ifnofpu(dc
)) {
4386 gen_op_clear_ieee_excp_and_FTT();
4387 tmp
= gen_load_fpr_F(dc
, a
->rs
);
4388 func(tmp
, tcg_env
, tmp
);
4389 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4390 gen_store_fpr_F(dc
, a
->rd
, tmp
);
4391 return advance_pc(dc
);
4394 TRANS(FSQRTs
, ALL
, do_env_ff
, a
, gen_helper_fsqrts
)
4395 TRANS(FiTOs
, ALL
, do_env_ff
, a
, gen_helper_fitos
)
4396 TRANS(FsTOi
, ALL
, do_env_ff
, a
, gen_helper_fstoi
)
4398 static bool do_env_fd(DisasContext
*dc
, arg_r_r
*a
,
4399 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
4404 if (gen_trap_ifnofpu(dc
)) {
4408 gen_op_clear_ieee_excp_and_FTT();
4409 dst
= gen_dest_fpr_F(dc
);
4410 src
= gen_load_fpr_D(dc
, a
->rs
);
4411 func(dst
, tcg_env
, src
);
4412 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4413 gen_store_fpr_F(dc
, a
->rd
, dst
);
4414 return advance_pc(dc
);
4417 TRANS(FdTOs
, ALL
, do_env_fd
, a
, gen_helper_fdtos
)
4418 TRANS(FdTOi
, ALL
, do_env_fd
, a
, gen_helper_fdtoi
)
4419 TRANS(FxTOs
, 64, do_env_fd
, a
, gen_helper_fxtos
)
4421 static bool do_dd(DisasContext
*dc
, arg_r_r
*a
,
4422 void (*func
)(TCGv_i64
, TCGv_i64
))
4426 if (gen_trap_ifnofpu(dc
)) {
4430 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4431 src
= gen_load_fpr_D(dc
, a
->rs
);
4433 gen_store_fpr_D(dc
, a
->rd
, dst
);
4434 return advance_pc(dc
);
4437 TRANS(FMOVd
, 64, do_dd
, a
, gen_op_fmovd
)
4438 TRANS(FNEGd
, 64, do_dd
, a
, gen_op_fnegd
)
4439 TRANS(FABSd
, 64, do_dd
, a
, gen_op_fabsd
)
4440 TRANS(FSRCd
, VIS1
, do_dd
, a
, tcg_gen_mov_i64
)
4441 TRANS(FNOTd
, VIS1
, do_dd
, a
, tcg_gen_not_i64
)
4443 static bool do_env_dd(DisasContext
*dc
, arg_r_r
*a
,
4444 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
4448 if (gen_trap_ifnofpu(dc
)) {
4452 gen_op_clear_ieee_excp_and_FTT();
4453 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4454 src
= gen_load_fpr_D(dc
, a
->rs
);
4455 func(dst
, tcg_env
, src
);
4456 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4457 gen_store_fpr_D(dc
, a
->rd
, dst
);
4458 return advance_pc(dc
);
4461 TRANS(FSQRTd
, ALL
, do_env_dd
, a
, gen_helper_fsqrtd
)
4462 TRANS(FxTOd
, 64, do_env_dd
, a
, gen_helper_fxtod
)
4463 TRANS(FdTOx
, 64, do_env_dd
, a
, gen_helper_fdtox
)
4465 static bool do_env_df(DisasContext
*dc
, arg_r_r
*a
,
4466 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
4471 if (gen_trap_ifnofpu(dc
)) {
4475 gen_op_clear_ieee_excp_and_FTT();
4476 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4477 src
= gen_load_fpr_F(dc
, a
->rs
);
4478 func(dst
, tcg_env
, src
);
4479 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4480 gen_store_fpr_D(dc
, a
->rd
, dst
);
4481 return advance_pc(dc
);
4484 TRANS(FiTOd
, ALL
, do_env_df
, a
, gen_helper_fitod
)
4485 TRANS(FsTOd
, ALL
, do_env_df
, a
, gen_helper_fstod
)
4486 TRANS(FsTOx
, 64, do_env_df
, a
, gen_helper_fstox
)
4488 static bool trans_FMOVq(DisasContext
*dc
, arg_FMOVq
*a
)
4492 if (!avail_64(dc
)) {
4495 if (gen_trap_ifnofpu(dc
)) {
4498 if (gen_trap_float128(dc
)) {
4502 gen_op_clear_ieee_excp_and_FTT();
4505 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2], cpu_fpr
[rs
/ 2]);
4506 tcg_gen_mov_i64(cpu_fpr
[rd
/ 2 + 1], cpu_fpr
[rs
/ 2 + 1]);
4507 gen_update_fprs_dirty(dc
, rd
);
4508 return advance_pc(dc
);
4511 static bool do_qq(DisasContext
*dc
, arg_r_r
*a
,
4512 void (*func
)(TCGv_env
))
4514 if (gen_trap_ifnofpu(dc
)) {
4517 if (gen_trap_float128(dc
)) {
4521 gen_op_clear_ieee_excp_and_FTT();
4522 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4524 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4525 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4526 return advance_pc(dc
);
4529 TRANS(FNEGq
, 64, do_qq
, a
, gen_helper_fnegq
)
4530 TRANS(FABSq
, 64, do_qq
, a
, gen_helper_fabsq
)
4532 static bool do_env_qq(DisasContext
*dc
, arg_r_r
*a
,
4533 void (*func
)(TCGv_env
))
4535 if (gen_trap_ifnofpu(dc
)) {
4538 if (gen_trap_float128(dc
)) {
4542 gen_op_clear_ieee_excp_and_FTT();
4543 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4545 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4546 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4547 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4548 return advance_pc(dc
);
4551 TRANS(FSQRTq
, ALL
, do_env_qq
, a
, gen_helper_fsqrtq
)
4553 static bool do_env_fq(DisasContext
*dc
, arg_r_r
*a
,
4554 void (*func
)(TCGv_i32
, TCGv_env
))
4558 if (gen_trap_ifnofpu(dc
)) {
4561 if (gen_trap_float128(dc
)) {
4565 gen_op_clear_ieee_excp_and_FTT();
4566 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4567 dst
= gen_dest_fpr_F(dc
);
4569 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4570 gen_store_fpr_F(dc
, a
->rd
, dst
);
4571 return advance_pc(dc
);
4574 TRANS(FqTOs
, ALL
, do_env_fq
, a
, gen_helper_fqtos
)
4575 TRANS(FqTOi
, ALL
, do_env_fq
, a
, gen_helper_fqtoi
)
4577 static bool do_env_dq(DisasContext
*dc
, arg_r_r
*a
,
4578 void (*func
)(TCGv_i64
, TCGv_env
))
4582 if (gen_trap_ifnofpu(dc
)) {
4585 if (gen_trap_float128(dc
)) {
4589 gen_op_clear_ieee_excp_and_FTT();
4590 gen_op_load_fpr_QT1(QFPREG(a
->rs
));
4591 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4593 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4594 gen_store_fpr_D(dc
, a
->rd
, dst
);
4595 return advance_pc(dc
);
4598 TRANS(FqTOd
, ALL
, do_env_dq
, a
, gen_helper_fqtod
)
4599 TRANS(FqTOx
, 64, do_env_dq
, a
, gen_helper_fqtox
)
4601 static bool do_env_qf(DisasContext
*dc
, arg_r_r
*a
,
4602 void (*func
)(TCGv_env
, TCGv_i32
))
4606 if (gen_trap_ifnofpu(dc
)) {
4609 if (gen_trap_float128(dc
)) {
4613 gen_op_clear_ieee_excp_and_FTT();
4614 src
= gen_load_fpr_F(dc
, a
->rs
);
4616 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4617 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4618 return advance_pc(dc
);
4621 TRANS(FiTOq
, ALL
, do_env_qf
, a
, gen_helper_fitoq
)
4622 TRANS(FsTOq
, ALL
, do_env_qf
, a
, gen_helper_fstoq
)
4624 static bool do_env_qd(DisasContext
*dc
, arg_r_r
*a
,
4625 void (*func
)(TCGv_env
, TCGv_i64
))
4629 if (gen_trap_ifnofpu(dc
)) {
4632 if (gen_trap_float128(dc
)) {
4636 gen_op_clear_ieee_excp_and_FTT();
4637 src
= gen_load_fpr_D(dc
, a
->rs
);
4639 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4640 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4641 return advance_pc(dc
);
4644 TRANS(FdTOq
, ALL
, do_env_qd
, a
, gen_helper_fdtoq
)
4645 TRANS(FxTOq
, 64, do_env_qd
, a
, gen_helper_fxtoq
)
4647 static bool do_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4648 void (*func
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
4650 TCGv_i32 src1
, src2
;
4652 if (gen_trap_ifnofpu(dc
)) {
4656 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4657 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4658 func(src1
, src1
, src2
);
4659 gen_store_fpr_F(dc
, a
->rd
, src1
);
4660 return advance_pc(dc
);
4663 TRANS(FPADD16s
, VIS1
, do_fff
, a
, tcg_gen_vec_add16_i32
)
4664 TRANS(FPADD32s
, VIS1
, do_fff
, a
, tcg_gen_add_i32
)
4665 TRANS(FPSUB16s
, VIS1
, do_fff
, a
, tcg_gen_vec_sub16_i32
)
4666 TRANS(FPSUB32s
, VIS1
, do_fff
, a
, tcg_gen_sub_i32
)
4667 TRANS(FNORs
, VIS1
, do_fff
, a
, tcg_gen_nor_i32
)
4668 TRANS(FANDNOTs
, VIS1
, do_fff
, a
, tcg_gen_andc_i32
)
4669 TRANS(FXORs
, VIS1
, do_fff
, a
, tcg_gen_xor_i32
)
4670 TRANS(FNANDs
, VIS1
, do_fff
, a
, tcg_gen_nand_i32
)
4671 TRANS(FANDs
, VIS1
, do_fff
, a
, tcg_gen_and_i32
)
4672 TRANS(FXNORs
, VIS1
, do_fff
, a
, tcg_gen_eqv_i32
)
4673 TRANS(FORNOTs
, VIS1
, do_fff
, a
, tcg_gen_orc_i32
)
4674 TRANS(FORs
, VIS1
, do_fff
, a
, tcg_gen_or_i32
)
4676 static bool do_env_fff(DisasContext
*dc
, arg_r_r_r
*a
,
4677 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
4679 TCGv_i32 src1
, src2
;
4681 if (gen_trap_ifnofpu(dc
)) {
4685 gen_op_clear_ieee_excp_and_FTT();
4686 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4687 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4688 func(src1
, tcg_env
, src1
, src2
);
4689 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4690 gen_store_fpr_F(dc
, a
->rd
, src1
);
4691 return advance_pc(dc
);
4694 TRANS(FADDs
, ALL
, do_env_fff
, a
, gen_helper_fadds
)
4695 TRANS(FSUBs
, ALL
, do_env_fff
, a
, gen_helper_fsubs
)
4696 TRANS(FMULs
, ALL
, do_env_fff
, a
, gen_helper_fmuls
)
4697 TRANS(FDIVs
, ALL
, do_env_fff
, a
, gen_helper_fdivs
)
4699 static bool do_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4700 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
4702 TCGv_i64 dst
, src1
, src2
;
4704 if (gen_trap_ifnofpu(dc
)) {
4708 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4709 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4710 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4711 func(dst
, src1
, src2
);
4712 gen_store_fpr_D(dc
, a
->rd
, dst
);
4713 return advance_pc(dc
);
4716 TRANS(FMUL8x16
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16
)
4717 TRANS(FMUL8x16AU
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16au
)
4718 TRANS(FMUL8x16AL
, VIS1
, do_ddd
, a
, gen_helper_fmul8x16al
)
4719 TRANS(FMUL8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8sux16
)
4720 TRANS(FMUL8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmul8ulx16
)
4721 TRANS(FMULD8SUx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8sux16
)
4722 TRANS(FMULD8ULx16
, VIS1
, do_ddd
, a
, gen_helper_fmuld8ulx16
)
4723 TRANS(FPMERGE
, VIS1
, do_ddd
, a
, gen_helper_fpmerge
)
4724 TRANS(FEXPAND
, VIS1
, do_ddd
, a
, gen_helper_fexpand
)
4726 TRANS(FPADD16
, VIS1
, do_ddd
, a
, tcg_gen_vec_add16_i64
)
4727 TRANS(FPADD32
, VIS1
, do_ddd
, a
, tcg_gen_vec_add32_i64
)
4728 TRANS(FPSUB16
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub16_i64
)
4729 TRANS(FPSUB32
, VIS1
, do_ddd
, a
, tcg_gen_vec_sub32_i64
)
4730 TRANS(FNORd
, VIS1
, do_ddd
, a
, tcg_gen_nor_i64
)
4731 TRANS(FANDNOTd
, VIS1
, do_ddd
, a
, tcg_gen_andc_i64
)
4732 TRANS(FXORd
, VIS1
, do_ddd
, a
, tcg_gen_xor_i64
)
4733 TRANS(FNANDd
, VIS1
, do_ddd
, a
, tcg_gen_nand_i64
)
4734 TRANS(FANDd
, VIS1
, do_ddd
, a
, tcg_gen_and_i64
)
4735 TRANS(FXNORd
, VIS1
, do_ddd
, a
, tcg_gen_eqv_i64
)
4736 TRANS(FORNOTd
, VIS1
, do_ddd
, a
, tcg_gen_orc_i64
)
4737 TRANS(FORd
, VIS1
, do_ddd
, a
, tcg_gen_or_i64
)
4739 TRANS(FPACK32
, VIS1
, do_ddd
, a
, gen_op_fpack32
)
4740 TRANS(FALIGNDATAg
, VIS1
, do_ddd
, a
, gen_op_faligndata
)
4741 TRANS(BSHUFFLE
, VIS2
, do_ddd
, a
, gen_op_bshuffle
)
4743 static bool do_rdd(DisasContext
*dc
, arg_r_r_r
*a
,
4744 void (*func
)(TCGv
, TCGv_i64
, TCGv_i64
))
4746 TCGv_i64 src1
, src2
;
4749 if (gen_trap_ifnofpu(dc
)) {
4753 dst
= gen_dest_gpr(dc
, a
->rd
);
4754 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4755 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4756 func(dst
, src1
, src2
);
4757 gen_store_gpr(dc
, a
->rd
, dst
);
4758 return advance_pc(dc
);
4761 TRANS(FPCMPLE16
, VIS1
, do_rdd
, a
, gen_helper_fcmple16
)
4762 TRANS(FPCMPNE16
, VIS1
, do_rdd
, a
, gen_helper_fcmpne16
)
4763 TRANS(FPCMPGT16
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt16
)
4764 TRANS(FPCMPEQ16
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq16
)
4766 TRANS(FPCMPLE32
, VIS1
, do_rdd
, a
, gen_helper_fcmple32
)
4767 TRANS(FPCMPNE32
, VIS1
, do_rdd
, a
, gen_helper_fcmpne32
)
4768 TRANS(FPCMPGT32
, VIS1
, do_rdd
, a
, gen_helper_fcmpgt32
)
4769 TRANS(FPCMPEQ32
, VIS1
, do_rdd
, a
, gen_helper_fcmpeq32
)
4771 static bool do_env_ddd(DisasContext
*dc
, arg_r_r_r
*a
,
4772 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
4774 TCGv_i64 dst
, src1
, src2
;
4776 if (gen_trap_ifnofpu(dc
)) {
4780 gen_op_clear_ieee_excp_and_FTT();
4781 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4782 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4783 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4784 func(dst
, tcg_env
, src1
, src2
);
4785 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4786 gen_store_fpr_D(dc
, a
->rd
, dst
);
4787 return advance_pc(dc
);
4790 TRANS(FADDd
, ALL
, do_env_ddd
, a
, gen_helper_faddd
)
4791 TRANS(FSUBd
, ALL
, do_env_ddd
, a
, gen_helper_fsubd
)
4792 TRANS(FMULd
, ALL
, do_env_ddd
, a
, gen_helper_fmuld
)
4793 TRANS(FDIVd
, ALL
, do_env_ddd
, a
, gen_helper_fdivd
)
4795 static bool trans_FsMULd(DisasContext
*dc
, arg_r_r_r
*a
)
4798 TCGv_i32 src1
, src2
;
4800 if (gen_trap_ifnofpu(dc
)) {
4803 if (!(dc
->def
->features
& CPU_FEATURE_FSMULD
)) {
4804 return raise_unimpfpop(dc
);
4807 gen_op_clear_ieee_excp_and_FTT();
4808 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4809 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4810 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4811 gen_helper_fsmuld(dst
, tcg_env
, src1
, src2
);
4812 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4813 gen_store_fpr_D(dc
, a
->rd
, dst
);
4814 return advance_pc(dc
);
4817 static bool do_dddd(DisasContext
*dc
, arg_r_r_r
*a
,
4818 void (*func
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
4820 TCGv_i64 dst
, src0
, src1
, src2
;
4822 if (gen_trap_ifnofpu(dc
)) {
4826 dst
= gen_dest_fpr_D(dc
, a
->rd
);
4827 src0
= gen_load_fpr_D(dc
, a
->rd
);
4828 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4829 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4830 func(dst
, src0
, src1
, src2
);
4831 gen_store_fpr_D(dc
, a
->rd
, dst
);
4832 return advance_pc(dc
);
4835 TRANS(PDIST
, VIS1
, do_dddd
, a
, gen_helper_pdist
)
4837 static bool do_env_qqq(DisasContext
*dc
, arg_r_r_r
*a
,
4838 void (*func
)(TCGv_env
))
4840 if (gen_trap_ifnofpu(dc
)) {
4843 if (gen_trap_float128(dc
)) {
4847 gen_op_clear_ieee_excp_and_FTT();
4848 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
4849 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
4851 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4852 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4853 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4854 return advance_pc(dc
);
4857 TRANS(FADDq
, ALL
, do_env_qqq
, a
, gen_helper_faddq
)
4858 TRANS(FSUBq
, ALL
, do_env_qqq
, a
, gen_helper_fsubq
)
4859 TRANS(FMULq
, ALL
, do_env_qqq
, a
, gen_helper_fmulq
)
4860 TRANS(FDIVq
, ALL
, do_env_qqq
, a
, gen_helper_fdivq
)
4862 static bool trans_FdMULq(DisasContext
*dc
, arg_r_r_r
*a
)
4864 TCGv_i64 src1
, src2
;
4866 if (gen_trap_ifnofpu(dc
)) {
4869 if (gen_trap_float128(dc
)) {
4873 gen_op_clear_ieee_excp_and_FTT();
4874 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4875 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4876 gen_helper_fdmulq(tcg_env
, src1
, src2
);
4877 gen_helper_check_ieee_exceptions(cpu_fsr
, tcg_env
);
4878 gen_op_store_QT0_fpr(QFPREG(a
->rd
));
4879 gen_update_fprs_dirty(dc
, QFPREG(a
->rd
));
4880 return advance_pc(dc
);
4883 static bool do_fmovr(DisasContext
*dc
, arg_FMOVRs
*a
, bool is_128
,
4884 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
4888 if (gen_trap_ifnofpu(dc
)) {
4891 if (is_128
&& gen_trap_float128(dc
)) {
4895 gen_op_clear_ieee_excp_and_FTT();
4896 gen_compare_reg(&cmp
, a
->cond
, gen_load_gpr(dc
, a
->rs1
));
4897 func(dc
, &cmp
, a
->rd
, a
->rs2
);
4898 return advance_pc(dc
);
4901 TRANS(FMOVRs
, 64, do_fmovr
, a
, false, gen_fmovs
)
4902 TRANS(FMOVRd
, 64, do_fmovr
, a
, false, gen_fmovd
)
4903 TRANS(FMOVRq
, 64, do_fmovr
, a
, true, gen_fmovq
)
4905 static bool do_fmovcc(DisasContext
*dc
, arg_FMOVscc
*a
, bool is_128
,
4906 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
4910 if (gen_trap_ifnofpu(dc
)) {
4913 if (is_128
&& gen_trap_float128(dc
)) {
4917 gen_op_clear_ieee_excp_and_FTT();
4918 gen_compare(&cmp
, a
->cc
, a
->cond
, dc
);
4919 func(dc
, &cmp
, a
->rd
, a
->rs2
);
4920 return advance_pc(dc
);
4923 TRANS(FMOVscc
, 64, do_fmovcc
, a
, false, gen_fmovs
)
4924 TRANS(FMOVdcc
, 64, do_fmovcc
, a
, false, gen_fmovd
)
4925 TRANS(FMOVqcc
, 64, do_fmovcc
, a
, true, gen_fmovq
)
4927 static bool do_fmovfcc(DisasContext
*dc
, arg_FMOVsfcc
*a
, bool is_128
,
4928 void (*func
)(DisasContext
*, DisasCompare
*, int, int))
4932 if (gen_trap_ifnofpu(dc
)) {
4935 if (is_128
&& gen_trap_float128(dc
)) {
4939 gen_op_clear_ieee_excp_and_FTT();
4940 gen_fcompare(&cmp
, a
->cc
, a
->cond
);
4941 func(dc
, &cmp
, a
->rd
, a
->rs2
);
4942 return advance_pc(dc
);
4945 TRANS(FMOVsfcc
, 64, do_fmovfcc
, a
, false, gen_fmovs
)
4946 TRANS(FMOVdfcc
, 64, do_fmovfcc
, a
, false, gen_fmovd
)
4947 TRANS(FMOVqfcc
, 64, do_fmovfcc
, a
, true, gen_fmovq
)
4949 static bool do_fcmps(DisasContext
*dc
, arg_FCMPs
*a
, bool e
)
4951 TCGv_i32 src1
, src2
;
4953 if (avail_32(dc
) && a
->cc
!= 0) {
4956 if (gen_trap_ifnofpu(dc
)) {
4960 gen_op_clear_ieee_excp_and_FTT();
4961 src1
= gen_load_fpr_F(dc
, a
->rs1
);
4962 src2
= gen_load_fpr_F(dc
, a
->rs2
);
4964 gen_op_fcmpes(a
->cc
, src1
, src2
);
4966 gen_op_fcmps(a
->cc
, src1
, src2
);
4968 return advance_pc(dc
);
4971 TRANS(FCMPs
, ALL
, do_fcmps
, a
, false)
4972 TRANS(FCMPEs
, ALL
, do_fcmps
, a
, true)
4974 static bool do_fcmpd(DisasContext
*dc
, arg_FCMPd
*a
, bool e
)
4976 TCGv_i64 src1
, src2
;
4978 if (avail_32(dc
) && a
->cc
!= 0) {
4981 if (gen_trap_ifnofpu(dc
)) {
4985 gen_op_clear_ieee_excp_and_FTT();
4986 src1
= gen_load_fpr_D(dc
, a
->rs1
);
4987 src2
= gen_load_fpr_D(dc
, a
->rs2
);
4989 gen_op_fcmped(a
->cc
, src1
, src2
);
4991 gen_op_fcmpd(a
->cc
, src1
, src2
);
4993 return advance_pc(dc
);
4996 TRANS(FCMPd
, ALL
, do_fcmpd
, a
, false)
4997 TRANS(FCMPEd
, ALL
, do_fcmpd
, a
, true)
4999 static bool do_fcmpq(DisasContext
*dc
, arg_FCMPq
*a
, bool e
)
5001 if (avail_32(dc
) && a
->cc
!= 0) {
5004 if (gen_trap_ifnofpu(dc
)) {
5007 if (gen_trap_float128(dc
)) {
5011 gen_op_clear_ieee_excp_and_FTT();
5012 gen_op_load_fpr_QT0(QFPREG(a
->rs1
));
5013 gen_op_load_fpr_QT1(QFPREG(a
->rs2
));
5015 gen_op_fcmpeq(a
->cc
);
5017 gen_op_fcmpq(a
->cc
);
5019 return advance_pc(dc
);
5022 TRANS(FCMPq
, ALL
, do_fcmpq
, a
, false)
5023 TRANS(FCMPEq
, ALL
, do_fcmpq
, a
, true)
5025 static void sparc_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
5027 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5028 CPUSPARCState
*env
= cpu_env(cs
);
5031 dc
->pc
= dc
->base
.pc_first
;
5032 dc
->npc
= (target_ulong
)dc
->base
.tb
->cs_base
;
5033 dc
->mem_idx
= dc
->base
.tb
->flags
& TB_FLAG_MMU_MASK
;
5034 dc
->def
= &env
->def
;
5035 dc
->fpu_enabled
= tb_fpu_enabled(dc
->base
.tb
->flags
);
5036 dc
->address_mask_32bit
= tb_am_enabled(dc
->base
.tb
->flags
);
5037 #ifndef CONFIG_USER_ONLY
5038 dc
->supervisor
= (dc
->base
.tb
->flags
& TB_FLAG_SUPER
) != 0;
5040 #ifdef TARGET_SPARC64
5042 dc
->asi
= (dc
->base
.tb
->flags
>> TB_FLAG_ASI_SHIFT
) & 0xff;
5043 #ifndef CONFIG_USER_ONLY
5044 dc
->hypervisor
= (dc
->base
.tb
->flags
& TB_FLAG_HYPER
) != 0;
5048 * if we reach a page boundary, we stop generation so that the
5049 * PC of a TT_TFAULT exception is always in the right page
5051 bound
= -(dc
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
5052 dc
->base
.max_insns
= MIN(dc
->base
.max_insns
, bound
);
5055 static void sparc_tr_tb_start(DisasContextBase
*db
, CPUState
*cs
)
5059 static void sparc_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
5061 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5062 target_ulong npc
= dc
->npc
;
5067 assert(dc
->jump_pc
[1] == dc
->pc
+ 4);
5068 npc
= dc
->jump_pc
[0] | JUMP_PC
;
5071 case DYNAMIC_PC_LOOKUP
:
5075 g_assert_not_reached();
5078 tcg_gen_insn_start(dc
->pc
, npc
);
5081 static void sparc_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
5083 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5084 CPUSPARCState
*env
= cpu_env(cs
);
5087 insn
= translator_ldl(env
, &dc
->base
, dc
->pc
);
5088 dc
->base
.pc_next
+= 4;
5090 if (!decode(dc
, insn
)) {
5091 gen_exception(dc
, TT_ILL_INSN
);
5094 if (dc
->base
.is_jmp
== DISAS_NORETURN
) {
5097 if (dc
->pc
!= dc
->base
.pc_next
) {
5098 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
5102 static void sparc_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
5104 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
5105 DisasDelayException
*e
, *e_next
;
5110 switch (dc
->base
.is_jmp
) {
5112 case DISAS_TOO_MANY
:
5113 if (((dc
->pc
| dc
->npc
) & 3) == 0) {
5114 /* static PC and NPC: we can use direct chaining */
5115 gen_goto_tb(dc
, 0, dc
->pc
, dc
->npc
);
5122 case DYNAMIC_PC_LOOKUP
:
5128 g_assert_not_reached();
5131 tcg_gen_movi_tl(cpu_pc
, dc
->pc
);
5137 gen_generic_branch(dc
);
5142 case DYNAMIC_PC_LOOKUP
:
5145 g_assert_not_reached();
5148 tcg_gen_movi_tl(cpu_npc
, dc
->npc
);
5151 tcg_gen_lookup_and_goto_ptr();
5153 tcg_gen_exit_tb(NULL
, 0);
5157 case DISAS_NORETURN
:
5163 tcg_gen_exit_tb(NULL
, 0);
5167 g_assert_not_reached();
5170 for (e
= dc
->delay_excp_list
; e
; e
= e_next
) {
5171 gen_set_label(e
->lab
);
5173 tcg_gen_movi_tl(cpu_pc
, e
->pc
);
5174 if (e
->npc
% 4 == 0) {
5175 tcg_gen_movi_tl(cpu_npc
, e
->npc
);
5177 gen_helper_raise_exception(tcg_env
, e
->excp
);
5184 static void sparc_tr_disas_log(const DisasContextBase
*dcbase
,
5185 CPUState
*cpu
, FILE *logfile
)
5187 fprintf(logfile
, "IN: %s\n", lookup_symbol(dcbase
->pc_first
));
5188 target_disas(logfile
, cpu
, dcbase
->pc_first
, dcbase
->tb
->size
);
5191 static const TranslatorOps sparc_tr_ops
= {
5192 .init_disas_context
= sparc_tr_init_disas_context
,
5193 .tb_start
= sparc_tr_tb_start
,
5194 .insn_start
= sparc_tr_insn_start
,
5195 .translate_insn
= sparc_tr_translate_insn
,
5196 .tb_stop
= sparc_tr_tb_stop
,
5197 .disas_log
= sparc_tr_disas_log
,
5200 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
5201 target_ulong pc
, void *host_pc
)
5203 DisasContext dc
= {};
5205 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &sparc_tr_ops
, &dc
.base
);
5208 void sparc_tcg_init(void)
5210 static const char gregnames
[32][4] = {
5211 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5212 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5213 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5214 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5216 static const char fregnames
[32][4] = {
5217 "f0", "f2", "f4", "f6", "f8", "f10", "f12", "f14",
5218 "f16", "f18", "f20", "f22", "f24", "f26", "f28", "f30",
5219 "f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46",
5220 "f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62",
5223 static const struct { TCGv
*ptr
; int off
; const char *name
; } rtl
[] = {
5224 #ifdef TARGET_SPARC64
5225 { &cpu_gsr
, offsetof(CPUSPARCState
, gsr
), "gsr" },
5226 { &cpu_xcc_Z
, offsetof(CPUSPARCState
, xcc_Z
), "xcc_Z" },
5227 { &cpu_xcc_C
, offsetof(CPUSPARCState
, xcc_C
), "xcc_C" },
5229 { &cpu_cc_N
, offsetof(CPUSPARCState
, cc_N
), "cc_N" },
5230 { &cpu_cc_V
, offsetof(CPUSPARCState
, cc_V
), "cc_V" },
5231 { &cpu_icc_Z
, offsetof(CPUSPARCState
, icc_Z
), "icc_Z" },
5232 { &cpu_icc_C
, offsetof(CPUSPARCState
, icc_C
), "icc_C" },
5233 { &cpu_cond
, offsetof(CPUSPARCState
, cond
), "cond" },
5234 { &cpu_fsr
, offsetof(CPUSPARCState
, fsr
), "fsr" },
5235 { &cpu_pc
, offsetof(CPUSPARCState
, pc
), "pc" },
5236 { &cpu_npc
, offsetof(CPUSPARCState
, npc
), "npc" },
5237 { &cpu_y
, offsetof(CPUSPARCState
, y
), "y" },
5238 { &cpu_tbr
, offsetof(CPUSPARCState
, tbr
), "tbr" },
5243 cpu_regwptr
= tcg_global_mem_new_ptr(tcg_env
,
5244 offsetof(CPUSPARCState
, regwptr
),
5247 for (i
= 0; i
< ARRAY_SIZE(rtl
); ++i
) {
5248 *rtl
[i
].ptr
= tcg_global_mem_new(tcg_env
, rtl
[i
].off
, rtl
[i
].name
);
5252 for (i
= 1; i
< 8; ++i
) {
5253 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
5254 offsetof(CPUSPARCState
, gregs
[i
]),
5258 for (i
= 8; i
< 32; ++i
) {
5259 cpu_regs
[i
] = tcg_global_mem_new(cpu_regwptr
,
5260 (i
- 8) * sizeof(target_ulong
),
5264 for (i
= 0; i
< TARGET_DPREGS
; i
++) {
5265 cpu_fpr
[i
] = tcg_global_mem_new_i64(tcg_env
,
5266 offsetof(CPUSPARCState
, fpr
[i
]),
5270 #ifdef TARGET_SPARC64
5271 cpu_fprs
= tcg_global_mem_new_i32(tcg_env
,
5272 offsetof(CPUSPARCState
, fprs
), "fprs");
5276 void sparc_restore_state_to_opc(CPUState
*cs
,
5277 const TranslationBlock
*tb
,
5278 const uint64_t *data
)
5280 SPARCCPU
*cpu
= SPARC_CPU(cs
);
5281 CPUSPARCState
*env
= &cpu
->env
;
5282 target_ulong pc
= data
[0];
5283 target_ulong npc
= data
[1];
5286 if (npc
== DYNAMIC_PC
) {
5287 /* dynamic NPC: already stored */
5288 } else if (npc
& JUMP_PC
) {
5289 /* jump PC: use 'cond' and the jump targets of the translation */
5291 env
->npc
= npc
& ~3;