+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * LSX translate functions
- * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
- */
-
-#ifndef CONFIG_USER_ONLY
-#define CHECK_SXE do { \
- if ((ctx->base.tb->flags & HW_FLAGS_EUEN_SXE) == 0) { \
- generate_exception(ctx, EXCCODE_SXD); \
- return true; \
- } \
-} while (0)
-#else
-#define CHECK_SXE
-#endif
-
-static bool gen_vvvv(DisasContext *ctx, arg_vvvv *a,
- void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32,
- TCGv_i32, TCGv_i32))
-{
- TCGv_i32 vd = tcg_constant_i32(a->vd);
- TCGv_i32 vj = tcg_constant_i32(a->vj);
- TCGv_i32 vk = tcg_constant_i32(a->vk);
- TCGv_i32 va = tcg_constant_i32(a->va);
-
- CHECK_SXE;
- func(cpu_env, vd, vj, vk, va);
- return true;
-}
-
-static bool gen_vvv(DisasContext *ctx, arg_vvv *a,
- void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 vd = tcg_constant_i32(a->vd);
- TCGv_i32 vj = tcg_constant_i32(a->vj);
- TCGv_i32 vk = tcg_constant_i32(a->vk);
-
- CHECK_SXE;
-
- func(cpu_env, vd, vj, vk);
- return true;
-}
-
-static bool gen_vv(DisasContext *ctx, arg_vv *a,
- void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 vd = tcg_constant_i32(a->vd);
- TCGv_i32 vj = tcg_constant_i32(a->vj);
-
- CHECK_SXE;
- func(cpu_env, vd, vj);
- return true;
-}
-
-static bool gen_vv_i(DisasContext *ctx, arg_vv_i *a,
- void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 vd = tcg_constant_i32(a->vd);
- TCGv_i32 vj = tcg_constant_i32(a->vj);
- TCGv_i32 imm = tcg_constant_i32(a->imm);
-
- CHECK_SXE;
- func(cpu_env, vd, vj, imm);
- return true;
-}
-
-static bool gen_cv(DisasContext *ctx, arg_cv *a,
- void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32))
-{
- TCGv_i32 vj = tcg_constant_i32(a->vj);
- TCGv_i32 cd = tcg_constant_i32(a->cd);
-
- CHECK_SXE;
- func(cpu_env, cd, vj);
- return true;
-}
-
-static bool gvec_vvv(DisasContext *ctx, arg_vvv *a, MemOp mop,
- void (*func)(unsigned, uint32_t, uint32_t,
- uint32_t, uint32_t, uint32_t))
-{
- uint32_t vd_ofs, vj_ofs, vk_ofs;
-
- CHECK_SXE;
-
- vd_ofs = vec_full_offset(a->vd);
- vj_ofs = vec_full_offset(a->vj);
- vk_ofs = vec_full_offset(a->vk);
-
- func(mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
- return true;
-}
-
-static bool gvec_vv(DisasContext *ctx, arg_vv *a, MemOp mop,
- void (*func)(unsigned, uint32_t, uint32_t,
- uint32_t, uint32_t))
-{
- uint32_t vd_ofs, vj_ofs;
-
- CHECK_SXE;
-
- vd_ofs = vec_full_offset(a->vd);
- vj_ofs = vec_full_offset(a->vj);
-
- func(mop, vd_ofs, vj_ofs, 16, ctx->vl/8);
- return true;
-}
-
-static bool gvec_vv_i(DisasContext *ctx, arg_vv_i *a, MemOp mop,
- void (*func)(unsigned, uint32_t, uint32_t,
- int64_t, uint32_t, uint32_t))
-{
- uint32_t vd_ofs, vj_ofs;
-
- CHECK_SXE;
-
- vd_ofs = vec_full_offset(a->vd);
- vj_ofs = vec_full_offset(a->vj);
-
- func(mop, vd_ofs, vj_ofs, a->imm , 16, ctx->vl/8);
- return true;
-}
-
-static bool gvec_subi(DisasContext *ctx, arg_vv_i *a, MemOp mop)
-{
- uint32_t vd_ofs, vj_ofs;
-
- CHECK_SXE;
-
- vd_ofs = vec_full_offset(a->vd);
- vj_ofs = vec_full_offset(a->vj);
-
- tcg_gen_gvec_addi(mop, vd_ofs, vj_ofs, -a->imm, 16, ctx->vl/8);
- return true;
-}
-
-TRANS(vadd_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_add)
-TRANS(vadd_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_add)
-TRANS(vadd_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_add)
-TRANS(vadd_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_add)
-
-#define VADDSUB_Q(NAME) \
-static bool trans_v## NAME ##_q(DisasContext *ctx, arg_vvv *a) \
-{ \
- TCGv_i64 rh, rl, ah, al, bh, bl; \
- \
- if (!avail_LSX(ctx)) { \
- return false; \
- } \
- \
- CHECK_SXE; \
- \
- rh = tcg_temp_new_i64(); \
- rl = tcg_temp_new_i64(); \
- ah = tcg_temp_new_i64(); \
- al = tcg_temp_new_i64(); \
- bh = tcg_temp_new_i64(); \
- bl = tcg_temp_new_i64(); \
- \
- get_vreg64(ah, a->vj, 1); \
- get_vreg64(al, a->vj, 0); \
- get_vreg64(bh, a->vk, 1); \
- get_vreg64(bl, a->vk, 0); \
- \
- tcg_gen_## NAME ##2_i64(rl, rh, al, ah, bl, bh); \
- \
- set_vreg64(rh, a->vd, 1); \
- set_vreg64(rl, a->vd, 0); \
- \
- return true; \
-}
-
-VADDSUB_Q(add)
-VADDSUB_Q(sub)
-
-TRANS(vsub_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sub)
-TRANS(vsub_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sub)
-TRANS(vsub_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sub)
-TRANS(vsub_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sub)
-
-TRANS(vaddi_bu, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_addi)
-TRANS(vaddi_hu, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_addi)
-TRANS(vaddi_wu, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_addi)
-TRANS(vaddi_du, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_addi)
-TRANS(vsubi_bu, LSX, gvec_subi, MO_8)
-TRANS(vsubi_hu, LSX, gvec_subi, MO_16)
-TRANS(vsubi_wu, LSX, gvec_subi, MO_32)
-TRANS(vsubi_du, LSX, gvec_subi, MO_64)
-
-TRANS(vneg_b, LSX, gvec_vv, MO_8, tcg_gen_gvec_neg)
-TRANS(vneg_h, LSX, gvec_vv, MO_16, tcg_gen_gvec_neg)
-TRANS(vneg_w, LSX, gvec_vv, MO_32, tcg_gen_gvec_neg)
-TRANS(vneg_d, LSX, gvec_vv, MO_64, tcg_gen_gvec_neg)
-
-TRANS(vsadd_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_ssadd)
-TRANS(vsadd_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_ssadd)
-TRANS(vsadd_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_ssadd)
-TRANS(vsadd_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_ssadd)
-TRANS(vsadd_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_usadd)
-TRANS(vsadd_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_usadd)
-TRANS(vsadd_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_usadd)
-TRANS(vsadd_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_usadd)
-TRANS(vssub_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sssub)
-TRANS(vssub_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sssub)
-TRANS(vssub_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sssub)
-TRANS(vssub_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sssub)
-TRANS(vssub_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_ussub)
-TRANS(vssub_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_ussub)
-TRANS(vssub_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_ussub)
-TRANS(vssub_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_ussub)
-
-TRANS(vhaddw_h_b, LSX, gen_vvv, gen_helper_vhaddw_h_b)
-TRANS(vhaddw_w_h, LSX, gen_vvv, gen_helper_vhaddw_w_h)
-TRANS(vhaddw_d_w, LSX, gen_vvv, gen_helper_vhaddw_d_w)
-TRANS(vhaddw_q_d, LSX, gen_vvv, gen_helper_vhaddw_q_d)
-TRANS(vhaddw_hu_bu, LSX, gen_vvv, gen_helper_vhaddw_hu_bu)
-TRANS(vhaddw_wu_hu, LSX, gen_vvv, gen_helper_vhaddw_wu_hu)
-TRANS(vhaddw_du_wu, LSX, gen_vvv, gen_helper_vhaddw_du_wu)
-TRANS(vhaddw_qu_du, LSX, gen_vvv, gen_helper_vhaddw_qu_du)
-TRANS(vhsubw_h_b, LSX, gen_vvv, gen_helper_vhsubw_h_b)
-TRANS(vhsubw_w_h, LSX, gen_vvv, gen_helper_vhsubw_w_h)
-TRANS(vhsubw_d_w, LSX, gen_vvv, gen_helper_vhsubw_d_w)
-TRANS(vhsubw_q_d, LSX, gen_vvv, gen_helper_vhsubw_q_d)
-TRANS(vhsubw_hu_bu, LSX, gen_vvv, gen_helper_vhsubw_hu_bu)
-TRANS(vhsubw_wu_hu, LSX, gen_vvv, gen_helper_vhsubw_wu_hu)
-TRANS(vhsubw_du_wu, LSX, gen_vvv, gen_helper_vhsubw_du_wu)
-TRANS(vhsubw_qu_du, LSX, gen_vvv, gen_helper_vhsubw_qu_du)
-
-static void gen_vaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Sign-extend the even elements from a */
- tcg_gen_shli_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t1, t1, halfbits);
-
- /* Sign-extend the even elements from b */
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
-
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void gen_vaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16s_i32(t1, a);
- tcg_gen_ext16s_i32(t2, b);
- tcg_gen_add_i32(t, t1, t2);
-}
-
-static void gen_vaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32s_i64(t1, a);
- tcg_gen_ext32s_i64(t2, b);
- tcg_gen_add_i64(t, t1, t2);
-}
-
-static void do_vaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vaddwev_s,
- .fno = gen_helper_vaddwev_h_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vaddwev_w_h,
- .fniv = gen_vaddwev_s,
- .fno = gen_helper_vaddwev_w_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vaddwev_d_w,
- .fniv = gen_vaddwev_s,
- .fno = gen_helper_vaddwev_d_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vaddwev_q_d,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vaddwev_h_b, LSX, gvec_vvv, MO_8, do_vaddwev_s)
-TRANS(vaddwev_w_h, LSX, gvec_vvv, MO_16, do_vaddwev_s)
-TRANS(vaddwev_d_w, LSX, gvec_vvv, MO_32, do_vaddwev_s)
-TRANS(vaddwev_q_d, LSX, gvec_vvv, MO_64, do_vaddwev_s)
-
-static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_sari_i32(t1, a, 16);
- tcg_gen_sari_i32(t2, b, 16);
- tcg_gen_add_i32(t, t1, t2);
-}
-
-static void gen_vaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_sari_i64(t1, a, 32);
- tcg_gen_sari_i64(t2, b, 32);
- tcg_gen_add_i64(t, t1, t2);
-}
-
-static void gen_vaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Sign-extend the odd elements for vector */
- tcg_gen_sari_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t2, b, halfbits);
-
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void do_vaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sari_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vaddwod_s,
- .fno = gen_helper_vaddwod_h_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vaddwod_w_h,
- .fniv = gen_vaddwod_s,
- .fno = gen_helper_vaddwod_w_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vaddwod_d_w,
- .fniv = gen_vaddwod_s,
- .fno = gen_helper_vaddwod_d_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vaddwod_q_d,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vaddwod_h_b, LSX, gvec_vvv, MO_8, do_vaddwod_s)
-TRANS(vaddwod_w_h, LSX, gvec_vvv, MO_16, do_vaddwod_s)
-TRANS(vaddwod_d_w, LSX, gvec_vvv, MO_32, do_vaddwod_s)
-TRANS(vaddwod_q_d, LSX, gvec_vvv, MO_64, do_vaddwod_s)
-
-static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Sign-extend the even elements from a */
- tcg_gen_shli_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t1, t1, halfbits);
-
- /* Sign-extend the even elements from b */
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
-
- tcg_gen_sub_vec(vece, t, t1, t2);
-}
-
-static void gen_vsubwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16s_i32(t1, a);
- tcg_gen_ext16s_i32(t2, b);
- tcg_gen_sub_i32(t, t1, t2);
-}
-
-static void gen_vsubwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32s_i64(t1, a);
- tcg_gen_ext32s_i64(t2, b);
- tcg_gen_sub_i64(t, t1, t2);
-}
-
-static void do_vsubwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vsubwev_s,
- .fno = gen_helper_vsubwev_h_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vsubwev_w_h,
- .fniv = gen_vsubwev_s,
- .fno = gen_helper_vsubwev_w_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vsubwev_d_w,
- .fniv = gen_vsubwev_s,
- .fno = gen_helper_vsubwev_d_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vsubwev_q_d,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vsubwev_h_b, LSX, gvec_vvv, MO_8, do_vsubwev_s)
-TRANS(vsubwev_w_h, LSX, gvec_vvv, MO_16, do_vsubwev_s)
-TRANS(vsubwev_d_w, LSX, gvec_vvv, MO_32, do_vsubwev_s)
-TRANS(vsubwev_q_d, LSX, gvec_vvv, MO_64, do_vsubwev_s)
-
-static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Sign-extend the odd elements for vector */
- tcg_gen_sari_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t2, b, halfbits);
-
- tcg_gen_sub_vec(vece, t, t1, t2);
-}
-
-static void gen_vsubwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_sari_i32(t1, a, 16);
- tcg_gen_sari_i32(t2, b, 16);
- tcg_gen_sub_i32(t, t1, t2);
-}
-
-static void gen_vsubwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_sari_i64(t1, a, 32);
- tcg_gen_sari_i64(t2, b, 32);
- tcg_gen_sub_i64(t, t1, t2);
-}
-
-static void do_vsubwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sari_vec, INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vsubwod_s,
- .fno = gen_helper_vsubwod_h_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vsubwod_w_h,
- .fniv = gen_vsubwod_s,
- .fno = gen_helper_vsubwod_w_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vsubwod_d_w,
- .fniv = gen_vsubwod_s,
- .fno = gen_helper_vsubwod_d_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vsubwod_q_d,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vsubwod_h_b, LSX, gvec_vvv, MO_8, do_vsubwod_s)
-TRANS(vsubwod_w_h, LSX, gvec_vvv, MO_16, do_vsubwod_s)
-TRANS(vsubwod_d_w, LSX, gvec_vvv, MO_32, do_vsubwod_s)
-TRANS(vsubwod_q_d, LSX, gvec_vvv, MO_64, do_vsubwod_s)
-
-static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
- tcg_gen_and_vec(vece, t1, a, t3);
- tcg_gen_and_vec(vece, t2, b, t3);
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void gen_vaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(t1, a);
- tcg_gen_ext16u_i32(t2, b);
- tcg_gen_add_i32(t, t1, t2);
-}
-
-static void gen_vaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(t1, a);
- tcg_gen_ext32u_i64(t2, b);
- tcg_gen_add_i64(t, t1, t2);
-}
-
-static void do_vaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vaddwev_u,
- .fno = gen_helper_vaddwev_h_bu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vaddwev_w_hu,
- .fniv = gen_vaddwev_u,
- .fno = gen_helper_vaddwev_w_hu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vaddwev_d_wu,
- .fniv = gen_vaddwev_u,
- .fno = gen_helper_vaddwev_d_wu,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vaddwev_q_du,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vaddwev_h_bu, LSX, gvec_vvv, MO_8, do_vaddwev_u)
-TRANS(vaddwev_w_hu, LSX, gvec_vvv, MO_16, do_vaddwev_u)
-TRANS(vaddwev_d_wu, LSX, gvec_vvv, MO_32, do_vaddwev_u)
-TRANS(vaddwev_q_du, LSX, gvec_vvv, MO_64, do_vaddwev_u)
-
-static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Zero-extend the odd elements for vector */
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- tcg_gen_shri_vec(vece, t2, b, halfbits);
-
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void gen_vaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_shri_i32(t1, a, 16);
- tcg_gen_shri_i32(t2, b, 16);
- tcg_gen_add_i32(t, t1, t2);
-}
-
-static void gen_vaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_shri_i64(t1, a, 32);
- tcg_gen_shri_i64(t2, b, 32);
- tcg_gen_add_i64(t, t1, t2);
-}
-
-static void do_vaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vaddwod_u,
- .fno = gen_helper_vaddwod_h_bu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vaddwod_w_hu,
- .fniv = gen_vaddwod_u,
- .fno = gen_helper_vaddwod_w_hu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vaddwod_d_wu,
- .fniv = gen_vaddwod_u,
- .fno = gen_helper_vaddwod_d_wu,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vaddwod_q_du,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vaddwod_h_bu, LSX, gvec_vvv, MO_8, do_vaddwod_u)
-TRANS(vaddwod_w_hu, LSX, gvec_vvv, MO_16, do_vaddwod_u)
-TRANS(vaddwod_d_wu, LSX, gvec_vvv, MO_32, do_vaddwod_u)
-TRANS(vaddwod_q_du, LSX, gvec_vvv, MO_64, do_vaddwod_u)
-
-static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
- tcg_gen_and_vec(vece, t1, a, t3);
- tcg_gen_and_vec(vece, t2, b, t3);
- tcg_gen_sub_vec(vece, t, t1, t2);
-}
-
-static void gen_vsubwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(t1, a);
- tcg_gen_ext16u_i32(t2, b);
- tcg_gen_sub_i32(t, t1, t2);
-}
-
-static void gen_vsubwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(t1, a);
- tcg_gen_ext32u_i64(t2, b);
- tcg_gen_sub_i64(t, t1, t2);
-}
-
-static void do_vsubwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vsubwev_u,
- .fno = gen_helper_vsubwev_h_bu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vsubwev_w_hu,
- .fniv = gen_vsubwev_u,
- .fno = gen_helper_vsubwev_w_hu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vsubwev_d_wu,
- .fniv = gen_vsubwev_u,
- .fno = gen_helper_vsubwev_d_wu,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vsubwev_q_du,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vsubwev_h_bu, LSX, gvec_vvv, MO_8, do_vsubwev_u)
-TRANS(vsubwev_w_hu, LSX, gvec_vvv, MO_16, do_vsubwev_u)
-TRANS(vsubwev_d_wu, LSX, gvec_vvv, MO_32, do_vsubwev_u)
-TRANS(vsubwev_q_du, LSX, gvec_vvv, MO_64, do_vsubwev_u)
-
-static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Zero-extend the odd elements for vector */
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- tcg_gen_shri_vec(vece, t2, b, halfbits);
-
- tcg_gen_sub_vec(vece, t, t1, t2);
-}
-
-static void gen_vsubwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_shri_i32(t1, a, 16);
- tcg_gen_shri_i32(t2, b, 16);
- tcg_gen_sub_i32(t, t1, t2);
-}
-
-static void gen_vsubwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_shri_i64(t1, a, 32);
- tcg_gen_shri_i64(t2, b, 32);
- tcg_gen_sub_i64(t, t1, t2);
-}
-
-static void do_vsubwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vsubwod_u,
- .fno = gen_helper_vsubwod_h_bu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vsubwod_w_hu,
- .fniv = gen_vsubwod_u,
- .fno = gen_helper_vsubwod_w_hu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vsubwod_d_wu,
- .fniv = gen_vsubwod_u,
- .fno = gen_helper_vsubwod_d_wu,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vsubwod_q_du,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vsubwod_h_bu, LSX, gvec_vvv, MO_8, do_vsubwod_u)
-TRANS(vsubwod_w_hu, LSX, gvec_vvv, MO_16, do_vsubwod_u)
-TRANS(vsubwod_d_wu, LSX, gvec_vvv, MO_32, do_vsubwod_u)
-TRANS(vsubwod_q_du, LSX, gvec_vvv, MO_64, do_vsubwod_u)
-
-static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, halfbits));
-
- /* Zero-extend the even elements from a */
- tcg_gen_and_vec(vece, t1, a, t3);
-
- /* Sign-extend the even elements from b */
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
-
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void gen_vaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(t1, a);
- tcg_gen_ext16s_i32(t2, b);
- tcg_gen_add_i32(t, t1, t2);
-}
-
-static void gen_vaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(t1, a);
- tcg_gen_ext32s_i64(t2, b);
- tcg_gen_add_i64(t, t1, t2);
-}
-
-static void do_vaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vaddwev_u_s,
- .fno = gen_helper_vaddwev_h_bu_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vaddwev_w_hu_h,
- .fniv = gen_vaddwev_u_s,
- .fno = gen_helper_vaddwev_w_hu_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vaddwev_d_wu_w,
- .fniv = gen_vaddwev_u_s,
- .fno = gen_helper_vaddwev_d_wu_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vaddwev_q_du_d,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vaddwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwev_u_s)
-TRANS(vaddwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwev_u_s)
-TRANS(vaddwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwev_u_s)
-TRANS(vaddwev_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwev_u_s)
-
-static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- /* Zero-extend the odd elements from a */
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- /* Sign-extend the odd elements from b */
- tcg_gen_sari_vec(vece, t2, b, halfbits);
-
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void gen_vaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_shri_i32(t1, a, 16);
- tcg_gen_sari_i32(t2, b, 16);
- tcg_gen_add_i32(t, t1, t2);
-}
-
-static void gen_vaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_shri_i64(t1, a, 32);
- tcg_gen_sari_i64(t2, b, 32);
- tcg_gen_add_i64(t, t1, t2);
-}
-
-static void do_vaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vaddwod_u_s,
- .fno = gen_helper_vaddwod_h_bu_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vaddwod_w_hu_h,
- .fniv = gen_vaddwod_u_s,
- .fno = gen_helper_vaddwod_w_hu_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vaddwod_d_wu_w,
- .fniv = gen_vaddwod_u_s,
- .fno = gen_helper_vaddwod_d_wu_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- {
- .fno = gen_helper_vaddwod_q_du_d,
- .vece = MO_128
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vaddwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwod_u_s)
-TRANS(vaddwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwod_u_s)
-TRANS(vaddwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwod_u_s)
-TRANS(vaddwod_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwod_u_s)
-
-static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
- void (*gen_shr_vec)(unsigned, TCGv_vec,
- TCGv_vec, int64_t),
- void (*gen_round_vec)(unsigned, TCGv_vec,
- TCGv_vec, TCGv_vec))
-{
- TCGv_vec tmp = tcg_temp_new_vec_matching(t);
- gen_round_vec(vece, tmp, a, b);
- tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
- gen_shr_vec(vece, a, a, 1);
- gen_shr_vec(vece, b, b, 1);
- tcg_gen_add_vec(vece, t, a, b);
- tcg_gen_add_vec(vece, t, t, tmp);
-}
-
-static void gen_vavg_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_and_vec);
-}
-
-static void gen_vavg_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_and_vec);
-}
-
-static void gen_vavgr_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_or_vec);
-}
-
-static void gen_vavgr_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_or_vec);
-}
-
-static void do_vavg_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sari_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vavg_s,
- .fno = gen_helper_vavg_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vavg_s,
- .fno = gen_helper_vavg_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vavg_s,
- .fno = gen_helper_vavg_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vavg_s,
- .fno = gen_helper_vavg_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-static void do_vavg_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vavg_u,
- .fno = gen_helper_vavg_bu,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vavg_u,
- .fno = gen_helper_vavg_hu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vavg_u,
- .fno = gen_helper_vavg_wu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vavg_u,
- .fno = gen_helper_vavg_du,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vavg_b, LSX, gvec_vvv, MO_8, do_vavg_s)
-TRANS(vavg_h, LSX, gvec_vvv, MO_16, do_vavg_s)
-TRANS(vavg_w, LSX, gvec_vvv, MO_32, do_vavg_s)
-TRANS(vavg_d, LSX, gvec_vvv, MO_64, do_vavg_s)
-TRANS(vavg_bu, LSX, gvec_vvv, MO_8, do_vavg_u)
-TRANS(vavg_hu, LSX, gvec_vvv, MO_16, do_vavg_u)
-TRANS(vavg_wu, LSX, gvec_vvv, MO_32, do_vavg_u)
-TRANS(vavg_du, LSX, gvec_vvv, MO_64, do_vavg_u)
-
-static void do_vavgr_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sari_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vavgr_s,
- .fno = gen_helper_vavgr_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vavgr_s,
- .fno = gen_helper_vavgr_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vavgr_s,
- .fno = gen_helper_vavgr_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vavgr_s,
- .fno = gen_helper_vavgr_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-static void do_vavgr_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vavgr_u,
- .fno = gen_helper_vavgr_bu,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vavgr_u,
- .fno = gen_helper_vavgr_hu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vavgr_u,
- .fno = gen_helper_vavgr_wu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vavgr_u,
- .fno = gen_helper_vavgr_du,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vavgr_b, LSX, gvec_vvv, MO_8, do_vavgr_s)
-TRANS(vavgr_h, LSX, gvec_vvv, MO_16, do_vavgr_s)
-TRANS(vavgr_w, LSX, gvec_vvv, MO_32, do_vavgr_s)
-TRANS(vavgr_d, LSX, gvec_vvv, MO_64, do_vavgr_s)
-TRANS(vavgr_bu, LSX, gvec_vvv, MO_8, do_vavgr_u)
-TRANS(vavgr_hu, LSX, gvec_vvv, MO_16, do_vavgr_u)
-TRANS(vavgr_wu, LSX, gvec_vvv, MO_32, do_vavgr_u)
-TRANS(vavgr_du, LSX, gvec_vvv, MO_64, do_vavgr_u)
-
-static void gen_vabsd_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- tcg_gen_smax_vec(vece, t, a, b);
- tcg_gen_smin_vec(vece, a, a, b);
- tcg_gen_sub_vec(vece, t, t, a);
-}
-
-static void do_vabsd_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_smax_vec, INDEX_op_smin_vec, INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vabsd_s,
- .fno = gen_helper_vabsd_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vabsd_s,
- .fno = gen_helper_vabsd_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vabsd_s,
- .fno = gen_helper_vabsd_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vabsd_s,
- .fno = gen_helper_vabsd_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-static void gen_vabsd_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- tcg_gen_umax_vec(vece, t, a, b);
- tcg_gen_umin_vec(vece, a, a, b);
- tcg_gen_sub_vec(vece, t, t, a);
-}
-
-static void do_vabsd_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vabsd_u,
- .fno = gen_helper_vabsd_bu,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vabsd_u,
- .fno = gen_helper_vabsd_hu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vabsd_u,
- .fno = gen_helper_vabsd_wu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vabsd_u,
- .fno = gen_helper_vabsd_du,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vabsd_b, LSX, gvec_vvv, MO_8, do_vabsd_s)
-TRANS(vabsd_h, LSX, gvec_vvv, MO_16, do_vabsd_s)
-TRANS(vabsd_w, LSX, gvec_vvv, MO_32, do_vabsd_s)
-TRANS(vabsd_d, LSX, gvec_vvv, MO_64, do_vabsd_s)
-TRANS(vabsd_bu, LSX, gvec_vvv, MO_8, do_vabsd_u)
-TRANS(vabsd_hu, LSX, gvec_vvv, MO_16, do_vabsd_u)
-TRANS(vabsd_wu, LSX, gvec_vvv, MO_32, do_vabsd_u)
-TRANS(vabsd_du, LSX, gvec_vvv, MO_64, do_vabsd_u)
-
-static void gen_vadda(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
-
- tcg_gen_abs_vec(vece, t1, a);
- tcg_gen_abs_vec(vece, t2, b);
- tcg_gen_add_vec(vece, t, t1, t2);
-}
-
-static void do_vadda(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_abs_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vadda,
- .fno = gen_helper_vadda_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vadda,
- .fno = gen_helper_vadda_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vadda,
- .fno = gen_helper_vadda_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vadda,
- .fno = gen_helper_vadda_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vadda_b, LSX, gvec_vvv, MO_8, do_vadda)
-TRANS(vadda_h, LSX, gvec_vvv, MO_16, do_vadda)
-TRANS(vadda_w, LSX, gvec_vvv, MO_32, do_vadda)
-TRANS(vadda_d, LSX, gvec_vvv, MO_64, do_vadda)
-
-TRANS(vmax_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_smax)
-TRANS(vmax_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_smax)
-TRANS(vmax_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_smax)
-TRANS(vmax_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_smax)
-TRANS(vmax_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_umax)
-TRANS(vmax_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_umax)
-TRANS(vmax_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_umax)
-TRANS(vmax_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_umax)
-
-TRANS(vmin_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_smin)
-TRANS(vmin_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_smin)
-TRANS(vmin_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_smin)
-TRANS(vmin_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_smin)
-TRANS(vmin_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_umin)
-TRANS(vmin_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_umin)
-TRANS(vmin_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_umin)
-TRANS(vmin_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_umin)
-
-static void gen_vmini_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- tcg_gen_smin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
-}
-
-static void gen_vmini_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- tcg_gen_umin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
-}
-
-static void gen_vmaxi_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- tcg_gen_smax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
-}
-
-static void gen_vmaxi_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- tcg_gen_umax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
-}
-
-static void do_vmini_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_smin_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vmini_s,
- .fnoi = gen_helper_vmini_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vmini_s,
- .fnoi = gen_helper_vmini_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vmini_s,
- .fnoi = gen_helper_vmini_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vmini_s,
- .fnoi = gen_helper_vmini_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-static void do_vmini_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_umin_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vmini_u,
- .fnoi = gen_helper_vmini_bu,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vmini_u,
- .fnoi = gen_helper_vmini_hu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vmini_u,
- .fnoi = gen_helper_vmini_wu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vmini_u,
- .fnoi = gen_helper_vmini_du,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-TRANS(vmini_b, LSX, gvec_vv_i, MO_8, do_vmini_s)
-TRANS(vmini_h, LSX, gvec_vv_i, MO_16, do_vmini_s)
-TRANS(vmini_w, LSX, gvec_vv_i, MO_32, do_vmini_s)
-TRANS(vmini_d, LSX, gvec_vv_i, MO_64, do_vmini_s)
-TRANS(vmini_bu, LSX, gvec_vv_i, MO_8, do_vmini_u)
-TRANS(vmini_hu, LSX, gvec_vv_i, MO_16, do_vmini_u)
-TRANS(vmini_wu, LSX, gvec_vv_i, MO_32, do_vmini_u)
-TRANS(vmini_du, LSX, gvec_vv_i, MO_64, do_vmini_u)
-
-static void do_vmaxi_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_smax_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vmaxi_s,
- .fnoi = gen_helper_vmaxi_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vmaxi_s,
- .fnoi = gen_helper_vmaxi_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vmaxi_s,
- .fnoi = gen_helper_vmaxi_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vmaxi_s,
- .fnoi = gen_helper_vmaxi_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-static void do_vmaxi_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_umax_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vmaxi_u,
- .fnoi = gen_helper_vmaxi_bu,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vmaxi_u,
- .fnoi = gen_helper_vmaxi_hu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vmaxi_u,
- .fnoi = gen_helper_vmaxi_wu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vmaxi_u,
- .fnoi = gen_helper_vmaxi_du,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-TRANS(vmaxi_b, LSX, gvec_vv_i, MO_8, do_vmaxi_s)
-TRANS(vmaxi_h, LSX, gvec_vv_i, MO_16, do_vmaxi_s)
-TRANS(vmaxi_w, LSX, gvec_vv_i, MO_32, do_vmaxi_s)
-TRANS(vmaxi_d, LSX, gvec_vv_i, MO_64, do_vmaxi_s)
-TRANS(vmaxi_bu, LSX, gvec_vv_i, MO_8, do_vmaxi_u)
-TRANS(vmaxi_hu, LSX, gvec_vv_i, MO_16, do_vmaxi_u)
-TRANS(vmaxi_wu, LSX, gvec_vv_i, MO_32, do_vmaxi_u)
-TRANS(vmaxi_du, LSX, gvec_vv_i, MO_64, do_vmaxi_u)
-
-TRANS(vmul_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_mul)
-TRANS(vmul_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_mul)
-TRANS(vmul_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_mul)
-TRANS(vmul_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_mul)
-
-static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 discard = tcg_temp_new_i32();
- tcg_gen_muls2_i32(discard, t, a, b);
-}
-
-static void gen_vmuh_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 discard = tcg_temp_new_i64();
- tcg_gen_muls2_i64(discard, t, a, b);
-}
-
-static void do_vmuh_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const GVecGen3 op[4] = {
- {
- .fno = gen_helper_vmuh_b,
- .vece = MO_8
- },
- {
- .fno = gen_helper_vmuh_h,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmuh_w,
- .fno = gen_helper_vmuh_w,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmuh_d,
- .fno = gen_helper_vmuh_d,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmuh_b, LSX, gvec_vvv, MO_8, do_vmuh_s)
-TRANS(vmuh_h, LSX, gvec_vvv, MO_16, do_vmuh_s)
-TRANS(vmuh_w, LSX, gvec_vvv, MO_32, do_vmuh_s)
-TRANS(vmuh_d, LSX, gvec_vvv, MO_64, do_vmuh_s)
-
-static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 discard = tcg_temp_new_i32();
- tcg_gen_mulu2_i32(discard, t, a, b);
-}
-
-static void gen_vmuh_du(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 discard = tcg_temp_new_i64();
- tcg_gen_mulu2_i64(discard, t, a, b);
-}
-
-static void do_vmuh_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const GVecGen3 op[4] = {
- {
- .fno = gen_helper_vmuh_bu,
- .vece = MO_8
- },
- {
- .fno = gen_helper_vmuh_hu,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmuh_wu,
- .fno = gen_helper_vmuh_wu,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmuh_du,
- .fno = gen_helper_vmuh_du,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmuh_bu, LSX, gvec_vvv, MO_8, do_vmuh_u)
-TRANS(vmuh_hu, LSX, gvec_vvv, MO_16, do_vmuh_u)
-TRANS(vmuh_wu, LSX, gvec_vvv, MO_32, do_vmuh_u)
-TRANS(vmuh_du, LSX, gvec_vvv, MO_64, do_vmuh_u)
-
-static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- tcg_gen_shli_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t1, t1, halfbits);
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
- tcg_gen_mul_vec(vece, t, t1, t2);
-}
-
-static void gen_vmulwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16s_i32(t1, a);
- tcg_gen_ext16s_i32(t2, b);
- tcg_gen_mul_i32(t, t1, t2);
-}
-
-static void gen_vmulwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32s_i64(t1, a);
- tcg_gen_ext32s_i64(t2, b);
- tcg_gen_mul_i64(t, t1, t2);
-}
-
-static void do_vmulwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmulwev_s,
- .fno = gen_helper_vmulwev_h_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmulwev_w_h,
- .fniv = gen_vmulwev_s,
- .fno = gen_helper_vmulwev_w_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmulwev_d_w,
- .fniv = gen_vmulwev_s,
- .fno = gen_helper_vmulwev_d_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmulwev_h_b, LSX, gvec_vvv, MO_8, do_vmulwev_s)
-TRANS(vmulwev_w_h, LSX, gvec_vvv, MO_16, do_vmulwev_s)
-TRANS(vmulwev_d_w, LSX, gvec_vvv, MO_32, do_vmulwev_s)
-
-static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh,
- TCGv_i64 arg1, TCGv_i64 arg2)
-{
- tcg_gen_mulsu2_i64(rl, rh, arg2, arg1);
-}
-
-#define VMUL_Q(NAME, FN, idx1, idx2) \
-static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
-{ \
- TCGv_i64 rh, rl, arg1, arg2; \
- \
- if (!avail_LSX(ctx)) { \
- return false; \
- } \
- \
- rh = tcg_temp_new_i64(); \
- rl = tcg_temp_new_i64(); \
- arg1 = tcg_temp_new_i64(); \
- arg2 = tcg_temp_new_i64(); \
- \
- get_vreg64(arg1, a->vj, idx1); \
- get_vreg64(arg2, a->vk, idx2); \
- \
- tcg_gen_## FN ##_i64(rl, rh, arg1, arg2); \
- \
- set_vreg64(rh, a->vd, 1); \
- set_vreg64(rl, a->vd, 0); \
- \
- return true; \
-}
-
-VMUL_Q(vmulwev_q_d, muls2, 0, 0)
-VMUL_Q(vmulwod_q_d, muls2, 1, 1)
-VMUL_Q(vmulwev_q_du, mulu2, 0, 0)
-VMUL_Q(vmulwod_q_du, mulu2, 1, 1)
-VMUL_Q(vmulwev_q_du_d, mulus2, 0, 0)
-VMUL_Q(vmulwod_q_du_d, mulus2, 1, 1)
-
-static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- tcg_gen_sari_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t2, b, halfbits);
- tcg_gen_mul_vec(vece, t, t1, t2);
-}
-
-static void gen_vmulwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_sari_i32(t1, a, 16);
- tcg_gen_sari_i32(t2, b, 16);
- tcg_gen_mul_i32(t, t1, t2);
-}
-
-static void gen_vmulwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_sari_i64(t1, a, 32);
- tcg_gen_sari_i64(t2, b, 32);
- tcg_gen_mul_i64(t, t1, t2);
-}
-
-static void do_vmulwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sari_vec, INDEX_op_mul_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmulwod_s,
- .fno = gen_helper_vmulwod_h_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmulwod_w_h,
- .fniv = gen_vmulwod_s,
- .fno = gen_helper_vmulwod_w_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmulwod_d_w,
- .fniv = gen_vmulwod_s,
- .fno = gen_helper_vmulwod_d_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmulwod_h_b, LSX, gvec_vvv, MO_8, do_vmulwod_s)
-TRANS(vmulwod_w_h, LSX, gvec_vvv, MO_16, do_vmulwod_s)
-TRANS(vmulwod_d_w, LSX, gvec_vvv, MO_32, do_vmulwod_s)
-
-static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, mask;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
- tcg_gen_and_vec(vece, t1, a, mask);
- tcg_gen_and_vec(vece, t2, b, mask);
- tcg_gen_mul_vec(vece, t, t1, t2);
-}
-
-static void gen_vmulwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(t1, a);
- tcg_gen_ext16u_i32(t2, b);
- tcg_gen_mul_i32(t, t1, t2);
-}
-
-static void gen_vmulwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(t1, a);
- tcg_gen_ext32u_i64(t2, b);
- tcg_gen_mul_i64(t, t1, t2);
-}
-
-static void do_vmulwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_mul_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmulwev_u,
- .fno = gen_helper_vmulwev_h_bu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmulwev_w_hu,
- .fniv = gen_vmulwev_u,
- .fno = gen_helper_vmulwev_w_hu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmulwev_d_wu,
- .fniv = gen_vmulwev_u,
- .fno = gen_helper_vmulwev_d_wu,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmulwev_h_bu, LSX, gvec_vvv, MO_8, do_vmulwev_u)
-TRANS(vmulwev_w_hu, LSX, gvec_vvv, MO_16, do_vmulwev_u)
-TRANS(vmulwev_d_wu, LSX, gvec_vvv, MO_32, do_vmulwev_u)
-
-static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- tcg_gen_shri_vec(vece, t2, b, halfbits);
- tcg_gen_mul_vec(vece, t, t1, t2);
-}
-
-static void gen_vmulwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_shri_i32(t1, a, 16);
- tcg_gen_shri_i32(t2, b, 16);
- tcg_gen_mul_i32(t, t1, t2);
-}
-
-static void gen_vmulwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_shri_i64(t1, a, 32);
- tcg_gen_shri_i64(t2, b, 32);
- tcg_gen_mul_i64(t, t1, t2);
-}
-
-static void do_vmulwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_mul_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmulwod_u,
- .fno = gen_helper_vmulwod_h_bu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmulwod_w_hu,
- .fniv = gen_vmulwod_u,
- .fno = gen_helper_vmulwod_w_hu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmulwod_d_wu,
- .fniv = gen_vmulwod_u,
- .fno = gen_helper_vmulwod_d_wu,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmulwod_h_bu, LSX, gvec_vvv, MO_8, do_vmulwod_u)
-TRANS(vmulwod_w_hu, LSX, gvec_vvv, MO_16, do_vmulwod_u)
-TRANS(vmulwod_d_wu, LSX, gvec_vvv, MO_32, do_vmulwod_u)
-
-static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, mask;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
- tcg_gen_and_vec(vece, t1, a, mask);
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
- tcg_gen_mul_vec(vece, t, t1, t2);
-}
-
-static void gen_vmulwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(t1, a);
- tcg_gen_ext16s_i32(t2, b);
- tcg_gen_mul_i32(t, t1, t2);
-}
-
-static void gen_vmulwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_ext32u_i64(t1, a);
- tcg_gen_ext32s_i64(t2, b);
- tcg_gen_mul_i64(t, t1, t2);
-}
-
-static void do_vmulwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmulwev_u_s,
- .fno = gen_helper_vmulwev_h_bu_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmulwev_w_hu_h,
- .fniv = gen_vmulwev_u_s,
- .fno = gen_helper_vmulwev_w_hu_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmulwev_d_wu_w,
- .fniv = gen_vmulwev_u_s,
- .fno = gen_helper_vmulwev_d_wu_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmulwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwev_u_s)
-TRANS(vmulwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwev_u_s)
-TRANS(vmulwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwev_u_s)
-
-static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t2, b, halfbits);
- tcg_gen_mul_vec(vece, t, t1, t2);
-}
-
-static void gen_vmulwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1, t2;
-
- t1 = tcg_temp_new_i32();
- t2 = tcg_temp_new_i32();
- tcg_gen_shri_i32(t1, a, 16);
- tcg_gen_sari_i32(t2, b, 16);
- tcg_gen_mul_i32(t, t1, t2);
-}
-static void gen_vmulwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1, t2;
-
- t1 = tcg_temp_new_i64();
- t2 = tcg_temp_new_i64();
- tcg_gen_shri_i64(t1, a, 32);
- tcg_gen_sari_i64(t2, b, 32);
- tcg_gen_mul_i64(t, t1, t2);
-}
-
-static void do_vmulwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmulwod_u_s,
- .fno = gen_helper_vmulwod_h_bu_b,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmulwod_w_hu_h,
- .fniv = gen_vmulwod_u_s,
- .fno = gen_helper_vmulwod_w_hu_h,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmulwod_d_wu_w,
- .fniv = gen_vmulwod_u_s,
- .fno = gen_helper_vmulwod_d_wu_w,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmulwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwod_u_s)
-TRANS(vmulwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwod_u_s)
-TRANS(vmulwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwod_u_s)
-
-static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1;
-
- t1 = tcg_temp_new_vec_matching(t);
- tcg_gen_mul_vec(vece, t1, a, b);
- tcg_gen_add_vec(vece, t, t, t1);
-}
-
-static void gen_vmadd_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- tcg_gen_mul_i32(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmadd_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- tcg_gen_mul_i64(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmadd(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vmadd,
- .fno = gen_helper_vmadd_b,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vmadd,
- .fno = gen_helper_vmadd_h,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmadd_w,
- .fniv = gen_vmadd,
- .fno = gen_helper_vmadd_w,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmadd_d,
- .fniv = gen_vmadd,
- .fno = gen_helper_vmadd_d,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmadd_b, LSX, gvec_vvv, MO_8, do_vmadd)
-TRANS(vmadd_h, LSX, gvec_vvv, MO_16, do_vmadd)
-TRANS(vmadd_w, LSX, gvec_vvv, MO_32, do_vmadd)
-TRANS(vmadd_d, LSX, gvec_vvv, MO_64, do_vmadd)
-
-static void gen_vmsub(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1;
-
- t1 = tcg_temp_new_vec_matching(t);
- tcg_gen_mul_vec(vece, t1, a, b);
- tcg_gen_sub_vec(vece, t, t, t1);
-}
-
-static void gen_vmsub_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- tcg_gen_mul_i32(t1, a, b);
- tcg_gen_sub_i32(t, t, t1);
-}
-
-static void gen_vmsub_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- tcg_gen_mul_i64(t1, a, b);
- tcg_gen_sub_i64(t, t, t1);
-}
-
-static void do_vmsub(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_mul_vec, INDEX_op_sub_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vmsub,
- .fno = gen_helper_vmsub_b,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vmsub,
- .fno = gen_helper_vmsub_h,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmsub_w,
- .fniv = gen_vmsub,
- .fno = gen_helper_vmsub_w,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmsub_d,
- .fniv = gen_vmsub,
- .fno = gen_helper_vmsub_d,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmsub_b, LSX, gvec_vvv, MO_8, do_vmsub)
-TRANS(vmsub_h, LSX, gvec_vvv, MO_16, do_vmsub)
-TRANS(vmsub_w, LSX, gvec_vvv, MO_32, do_vmsub)
-TRANS(vmsub_d, LSX, gvec_vvv, MO_64, do_vmsub)
-
-static void gen_vmaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_temp_new_vec_matching(t);
- tcg_gen_shli_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t1, t1, halfbits);
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
- tcg_gen_mul_vec(vece, t3, t1, t2);
- tcg_gen_add_vec(vece, t, t, t3);
-}
-
-static void gen_vmaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- gen_vmulwev_w_h(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- gen_vmulwev_d_w(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec,
- INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmaddwev_s,
- .fno = gen_helper_vmaddwev_h_b,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmaddwev_w_h,
- .fniv = gen_vmaddwev_s,
- .fno = gen_helper_vmaddwev_w_h,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmaddwev_d_w,
- .fniv = gen_vmaddwev_s,
- .fno = gen_helper_vmaddwev_d_w,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmaddwev_h_b, LSX, gvec_vvv, MO_8, do_vmaddwev_s)
-TRANS(vmaddwev_w_h, LSX, gvec_vvv, MO_16, do_vmaddwev_s)
-TRANS(vmaddwev_d_w, LSX, gvec_vvv, MO_32, do_vmaddwev_s)
-
-#define VMADD_Q(NAME, FN, idx1, idx2) \
-static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
-{ \
- TCGv_i64 rh, rl, arg1, arg2, th, tl; \
- \
- if (!avail_LSX(ctx)) { \
- return false; \
- } \
- \
- rh = tcg_temp_new_i64(); \
- rl = tcg_temp_new_i64(); \
- arg1 = tcg_temp_new_i64(); \
- arg2 = tcg_temp_new_i64(); \
- th = tcg_temp_new_i64(); \
- tl = tcg_temp_new_i64(); \
- \
- get_vreg64(arg1, a->vj, idx1); \
- get_vreg64(arg2, a->vk, idx2); \
- get_vreg64(rh, a->vd, 1); \
- get_vreg64(rl, a->vd, 0); \
- \
- tcg_gen_## FN ##_i64(tl, th, arg1, arg2); \
- tcg_gen_add2_i64(rl, rh, rl, rh, tl, th); \
- \
- set_vreg64(rh, a->vd, 1); \
- set_vreg64(rl, a->vd, 0); \
- \
- return true; \
-}
-
-VMADD_Q(vmaddwev_q_d, muls2, 0, 0)
-VMADD_Q(vmaddwod_q_d, muls2, 1, 1)
-VMADD_Q(vmaddwev_q_du, mulu2, 0, 0)
-VMADD_Q(vmaddwod_q_du, mulu2, 1, 1)
-VMADD_Q(vmaddwev_q_du_d, mulus2, 0, 0)
-VMADD_Q(vmaddwod_q_du_d, mulus2, 1, 1)
-
-static void gen_vmaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_temp_new_vec_matching(t);
- tcg_gen_sari_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t2, b, halfbits);
- tcg_gen_mul_vec(vece, t3, t1, t2);
- tcg_gen_add_vec(vece, t, t, t3);
-}
-
-static void gen_vmaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- gen_vmulwod_w_h(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- gen_vmulwod_d_w(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_sari_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmaddwod_s,
- .fno = gen_helper_vmaddwod_h_b,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmaddwod_w_h,
- .fniv = gen_vmaddwod_s,
- .fno = gen_helper_vmaddwod_w_h,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmaddwod_d_w,
- .fniv = gen_vmaddwod_s,
- .fno = gen_helper_vmaddwod_d_w,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmaddwod_h_b, LSX, gvec_vvv, MO_8, do_vmaddwod_s)
-TRANS(vmaddwod_w_h, LSX, gvec_vvv, MO_16, do_vmaddwod_s)
-TRANS(vmaddwod_d_w, LSX, gvec_vvv, MO_32, do_vmaddwod_s)
-
-static void gen_vmaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, mask;
-
- t1 = tcg_temp_new_vec_matching(t);
- t2 = tcg_temp_new_vec_matching(b);
- mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
- tcg_gen_and_vec(vece, t1, a, mask);
- tcg_gen_and_vec(vece, t2, b, mask);
- tcg_gen_mul_vec(vece, t1, t1, t2);
- tcg_gen_add_vec(vece, t, t, t1);
-}
-
-static void gen_vmaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- gen_vmulwev_w_hu(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- gen_vmulwev_d_wu(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmaddwev_u,
- .fno = gen_helper_vmaddwev_h_bu,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmaddwev_w_hu,
- .fniv = gen_vmaddwev_u,
- .fno = gen_helper_vmaddwev_w_hu,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmaddwev_d_wu,
- .fniv = gen_vmaddwev_u,
- .fno = gen_helper_vmaddwev_d_wu,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmaddwev_h_bu, LSX, gvec_vvv, MO_8, do_vmaddwev_u)
-TRANS(vmaddwev_w_hu, LSX, gvec_vvv, MO_16, do_vmaddwev_u)
-TRANS(vmaddwev_d_wu, LSX, gvec_vvv, MO_32, do_vmaddwev_u)
-
-static void gen_vmaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_temp_new_vec_matching(t);
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- tcg_gen_shri_vec(vece, t2, b, halfbits);
- tcg_gen_mul_vec(vece, t3, t1, t2);
- tcg_gen_add_vec(vece, t, t, t3);
-}
-
-static void gen_vmaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- gen_vmulwod_w_hu(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- gen_vmulwod_d_wu(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmaddwod_u,
- .fno = gen_helper_vmaddwod_h_bu,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmaddwod_w_hu,
- .fniv = gen_vmaddwod_u,
- .fno = gen_helper_vmaddwod_w_hu,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmaddwod_d_wu,
- .fniv = gen_vmaddwod_u,
- .fno = gen_helper_vmaddwod_d_wu,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmaddwod_h_bu, LSX, gvec_vvv, MO_8, do_vmaddwod_u)
-TRANS(vmaddwod_w_hu, LSX, gvec_vvv, MO_16, do_vmaddwod_u)
-TRANS(vmaddwod_d_wu, LSX, gvec_vvv, MO_32, do_vmaddwod_u)
-
-static void gen_vmaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, mask;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
- tcg_gen_and_vec(vece, t1, a, mask);
- tcg_gen_shli_vec(vece, t2, b, halfbits);
- tcg_gen_sari_vec(vece, t2, t2, halfbits);
- tcg_gen_mul_vec(vece, t1, t1, t2);
- tcg_gen_add_vec(vece, t, t, t1);
-}
-
-static void gen_vmaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- gen_vmulwev_w_hu_h(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- gen_vmulwev_d_wu_w(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_sari_vec,
- INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmaddwev_u_s,
- .fno = gen_helper_vmaddwev_h_bu_b,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmaddwev_w_hu_h,
- .fniv = gen_vmaddwev_u_s,
- .fno = gen_helper_vmaddwev_w_hu_h,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmaddwev_d_wu_w,
- .fniv = gen_vmaddwev_u_s,
- .fno = gen_helper_vmaddwev_d_wu_w,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmaddwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vmaddwev_u_s)
-TRANS(vmaddwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vmaddwev_u_s)
-TRANS(vmaddwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vmaddwev_u_s)
-
-static void gen_vmaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, t2, t3;
- int halfbits = 4 << vece;
-
- t1 = tcg_temp_new_vec_matching(a);
- t2 = tcg_temp_new_vec_matching(b);
- t3 = tcg_temp_new_vec_matching(t);
- tcg_gen_shri_vec(vece, t1, a, halfbits);
- tcg_gen_sari_vec(vece, t2, b, halfbits);
- tcg_gen_mul_vec(vece, t3, t1, t2);
- tcg_gen_add_vec(vece, t, t, t3);
-}
-
-static void gen_vmaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
-{
- TCGv_i32 t1;
-
- t1 = tcg_temp_new_i32();
- gen_vmulwod_w_hu_h(t1, a, b);
- tcg_gen_add_i32(t, t, t1);
-}
-
-static void gen_vmaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
-{
- TCGv_i64 t1;
-
- t1 = tcg_temp_new_i64();
- gen_vmulwod_d_wu_w(t1, a, b);
- tcg_gen_add_i64(t, t, t1);
-}
-
-static void do_vmaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shri_vec, INDEX_op_sari_vec,
- INDEX_op_mul_vec, INDEX_op_add_vec, 0
- };
- static const GVecGen3 op[3] = {
- {
- .fniv = gen_vmaddwod_u_s,
- .fno = gen_helper_vmaddwod_h_bu_b,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fni4 = gen_vmaddwod_w_hu_h,
- .fniv = gen_vmaddwod_u_s,
- .fno = gen_helper_vmaddwod_w_hu_h,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fni8 = gen_vmaddwod_d_wu_w,
- .fniv = gen_vmaddwod_u_s,
- .fno = gen_helper_vmaddwod_d_wu_w,
- .load_dest = true,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vmaddwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vmaddwod_u_s)
-TRANS(vmaddwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vmaddwod_u_s)
-TRANS(vmaddwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vmaddwod_u_s)
-
-TRANS(vdiv_b, LSX, gen_vvv, gen_helper_vdiv_b)
-TRANS(vdiv_h, LSX, gen_vvv, gen_helper_vdiv_h)
-TRANS(vdiv_w, LSX, gen_vvv, gen_helper_vdiv_w)
-TRANS(vdiv_d, LSX, gen_vvv, gen_helper_vdiv_d)
-TRANS(vdiv_bu, LSX, gen_vvv, gen_helper_vdiv_bu)
-TRANS(vdiv_hu, LSX, gen_vvv, gen_helper_vdiv_hu)
-TRANS(vdiv_wu, LSX, gen_vvv, gen_helper_vdiv_wu)
-TRANS(vdiv_du, LSX, gen_vvv, gen_helper_vdiv_du)
-TRANS(vmod_b, LSX, gen_vvv, gen_helper_vmod_b)
-TRANS(vmod_h, LSX, gen_vvv, gen_helper_vmod_h)
-TRANS(vmod_w, LSX, gen_vvv, gen_helper_vmod_w)
-TRANS(vmod_d, LSX, gen_vvv, gen_helper_vmod_d)
-TRANS(vmod_bu, LSX, gen_vvv, gen_helper_vmod_bu)
-TRANS(vmod_hu, LSX, gen_vvv, gen_helper_vmod_hu)
-TRANS(vmod_wu, LSX, gen_vvv, gen_helper_vmod_wu)
-TRANS(vmod_du, LSX, gen_vvv, gen_helper_vmod_du)
-
-static void gen_vsat_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
-{
- TCGv_vec min;
-
- min = tcg_temp_new_vec_matching(t);
- tcg_gen_not_vec(vece, min, max);
- tcg_gen_smax_vec(vece, t, a, min);
- tcg_gen_smin_vec(vece, t, t, max);
-}
-
-static void do_vsat_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_smax_vec, INDEX_op_smin_vec, 0
- };
- static const GVecGen2s op[4] = {
- {
- .fniv = gen_vsat_s,
- .fno = gen_helper_vsat_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vsat_s,
- .fno = gen_helper_vsat_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vsat_s,
- .fno = gen_helper_vsat_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vsat_s,
- .fno = gen_helper_vsat_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz,
- tcg_constant_i64((1ll<< imm) -1), &op[vece]);
-}
-
-TRANS(vsat_b, LSX, gvec_vv_i, MO_8, do_vsat_s)
-TRANS(vsat_h, LSX, gvec_vv_i, MO_16, do_vsat_s)
-TRANS(vsat_w, LSX, gvec_vv_i, MO_32, do_vsat_s)
-TRANS(vsat_d, LSX, gvec_vv_i, MO_64, do_vsat_s)
-
-static void gen_vsat_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
-{
- tcg_gen_umin_vec(vece, t, a, max);
-}
-
-static void do_vsat_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- uint64_t max;
- static const TCGOpcode vecop_list[] = {
- INDEX_op_umin_vec, 0
- };
- static const GVecGen2s op[4] = {
- {
- .fniv = gen_vsat_u,
- .fno = gen_helper_vsat_bu,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vsat_u,
- .fno = gen_helper_vsat_hu,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vsat_u,
- .fno = gen_helper_vsat_wu,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vsat_u,
- .fno = gen_helper_vsat_du,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- max = (imm == 0x3f) ? UINT64_MAX : (1ull << (imm + 1)) - 1;
- tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz,
- tcg_constant_i64(max), &op[vece]);
-}
-
-TRANS(vsat_bu, LSX, gvec_vv_i, MO_8, do_vsat_u)
-TRANS(vsat_hu, LSX, gvec_vv_i, MO_16, do_vsat_u)
-TRANS(vsat_wu, LSX, gvec_vv_i, MO_32, do_vsat_u)
-TRANS(vsat_du, LSX, gvec_vv_i, MO_64, do_vsat_u)
-
-TRANS(vexth_h_b, LSX, gen_vv, gen_helper_vexth_h_b)
-TRANS(vexth_w_h, LSX, gen_vv, gen_helper_vexth_w_h)
-TRANS(vexth_d_w, LSX, gen_vv, gen_helper_vexth_d_w)
-TRANS(vexth_q_d, LSX, gen_vv, gen_helper_vexth_q_d)
-TRANS(vexth_hu_bu, LSX, gen_vv, gen_helper_vexth_hu_bu)
-TRANS(vexth_wu_hu, LSX, gen_vv, gen_helper_vexth_wu_hu)
-TRANS(vexth_du_wu, LSX, gen_vv, gen_helper_vexth_du_wu)
-TRANS(vexth_qu_du, LSX, gen_vv, gen_helper_vexth_qu_du)
-
-static void gen_vsigncov(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- TCGv_vec t1, zero;
-
- t1 = tcg_temp_new_vec_matching(t);
- zero = tcg_constant_vec_matching(t, vece, 0);
-
- tcg_gen_neg_vec(vece, t1, b);
- tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, a, zero, t1, b);
- tcg_gen_cmpsel_vec(TCG_COND_EQ, vece, t, a, zero, zero, t);
-}
-
-static void do_vsigncov(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_neg_vec, INDEX_op_cmpsel_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vsigncov,
- .fno = gen_helper_vsigncov_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vsigncov,
- .fno = gen_helper_vsigncov_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vsigncov,
- .fno = gen_helper_vsigncov_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vsigncov,
- .fno = gen_helper_vsigncov_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vsigncov_b, LSX, gvec_vvv, MO_8, do_vsigncov)
-TRANS(vsigncov_h, LSX, gvec_vvv, MO_16, do_vsigncov)
-TRANS(vsigncov_w, LSX, gvec_vvv, MO_32, do_vsigncov)
-TRANS(vsigncov_d, LSX, gvec_vvv, MO_64, do_vsigncov)
-
-TRANS(vmskltz_b, LSX, gen_vv, gen_helper_vmskltz_b)
-TRANS(vmskltz_h, LSX, gen_vv, gen_helper_vmskltz_h)
-TRANS(vmskltz_w, LSX, gen_vv, gen_helper_vmskltz_w)
-TRANS(vmskltz_d, LSX, gen_vv, gen_helper_vmskltz_d)
-TRANS(vmskgez_b, LSX, gen_vv, gen_helper_vmskgez_b)
-TRANS(vmsknz_b, LSX, gen_vv, gen_helper_vmsknz_b)
-
-#define EXPAND_BYTE(bit) ((uint64_t)(bit ? 0xff : 0))
-
-static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
-{
- int mode;
- uint64_t data, t;
-
- /*
- * imm bit [11:8] is mode, mode value is 0-12.
- * other values are invalid.
- */
- mode = (imm >> 8) & 0xf;
- t = imm & 0xff;
- switch (mode) {
- case 0:
- /* data: {2{24'0, imm[7:0]}} */
- data = (t << 32) | t ;
- break;
- case 1:
- /* data: {2{16'0, imm[7:0], 8'0}} */
- data = (t << 24) | (t << 8);
- break;
- case 2:
- /* data: {2{8'0, imm[7:0], 16'0}} */
- data = (t << 48) | (t << 16);
- break;
- case 3:
- /* data: {2{imm[7:0], 24'0}} */
- data = (t << 56) | (t << 24);
- break;
- case 4:
- /* data: {4{8'0, imm[7:0]}} */
- data = (t << 48) | (t << 32) | (t << 16) | t;
- break;
- case 5:
- /* data: {4{imm[7:0], 8'0}} */
- data = (t << 56) |(t << 40) | (t << 24) | (t << 8);
- break;
- case 6:
- /* data: {2{16'0, imm[7:0], 8'1}} */
- data = (t << 40) | ((uint64_t)0xff << 32) | (t << 8) | 0xff;
- break;
- case 7:
- /* data: {2{8'0, imm[7:0], 16'1}} */
- data = (t << 48) | ((uint64_t)0xffff << 32) | (t << 16) | 0xffff;
- break;
- case 8:
- /* data: {8{imm[7:0]}} */
- data =(t << 56) | (t << 48) | (t << 40) | (t << 32) |
- (t << 24) | (t << 16) | (t << 8) | t;
- break;
- case 9:
- /* data: {{8{imm[7]}, ..., 8{imm[0]}}} */
- {
- uint64_t b0,b1,b2,b3,b4,b5,b6,b7;
- b0 = t& 0x1;
- b1 = (t & 0x2) >> 1;
- b2 = (t & 0x4) >> 2;
- b3 = (t & 0x8) >> 3;
- b4 = (t & 0x10) >> 4;
- b5 = (t & 0x20) >> 5;
- b6 = (t & 0x40) >> 6;
- b7 = (t & 0x80) >> 7;
- data = (EXPAND_BYTE(b7) << 56) |
- (EXPAND_BYTE(b6) << 48) |
- (EXPAND_BYTE(b5) << 40) |
- (EXPAND_BYTE(b4) << 32) |
- (EXPAND_BYTE(b3) << 24) |
- (EXPAND_BYTE(b2) << 16) |
- (EXPAND_BYTE(b1) << 8) |
- EXPAND_BYTE(b0);
- }
- break;
- case 10:
- /* data: {2{imm[7], ~imm[6], {5{imm[6]}}, imm[5:0], 19'0}} */
- {
- uint64_t b6, b7;
- uint64_t t0, t1;
- b6 = (imm & 0x40) >> 6;
- b7 = (imm & 0x80) >> 7;
- t0 = (imm & 0x3f);
- t1 = (b7 << 6) | ((1-b6) << 5) | (uint64_t)(b6 ? 0x1f : 0);
- data = (t1 << 57) | (t0 << 51) | (t1 << 25) | (t0 << 19);
- }
- break;
- case 11:
- /* data: {32'0, imm[7], ~{imm[6]}, 5{imm[6]}, imm[5:0], 19'0} */
- {
- uint64_t b6,b7;
- uint64_t t0, t1;
- b6 = (imm & 0x40) >> 6;
- b7 = (imm & 0x80) >> 7;
- t0 = (imm & 0x3f);
- t1 = (b7 << 6) | ((1-b6) << 5) | (b6 ? 0x1f : 0);
- data = (t1 << 25) | (t0 << 19);
- }
- break;
- case 12:
- /* data: {imm[7], ~imm[6], 8{imm[6]}, imm[5:0], 48'0} */
- {
- uint64_t b6,b7;
- uint64_t t0, t1;
- b6 = (imm & 0x40) >> 6;
- b7 = (imm & 0x80) >> 7;
- t0 = (imm & 0x3f);
- t1 = (b7 << 9) | ((1-b6) << 8) | (b6 ? 0xff : 0);
- data = (t1 << 54) | (t0 << 48);
- }
- break;
- default:
- generate_exception(ctx, EXCCODE_INE);
- g_assert_not_reached();
- }
- return data;
-}
-
-static bool trans_vldi(DisasContext *ctx, arg_vldi *a)
-{
- int sel, vece;
- uint64_t value;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- sel = (a->imm >> 12) & 0x1;
-
- if (sel) {
- value = vldi_get_value(ctx, a->imm);
- vece = MO_64;
- } else {
- value = ((int32_t)(a->imm << 22)) >> 22;
- vece = (a->imm >> 10) & 0x3;
- }
-
- tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8,
- tcg_constant_i64(value));
- return true;
-}
-
-TRANS(vand_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_and)
-TRANS(vor_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_or)
-TRANS(vxor_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_xor)
-TRANS(vnor_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_nor)
-
-static bool trans_vandn_v(DisasContext *ctx, arg_vvv *a)
-{
- uint32_t vd_ofs, vj_ofs, vk_ofs;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- vd_ofs = vec_full_offset(a->vd);
- vj_ofs = vec_full_offset(a->vj);
- vk_ofs = vec_full_offset(a->vk);
-
- tcg_gen_gvec_andc(MO_64, vd_ofs, vk_ofs, vj_ofs, 16, ctx->vl/8);
- return true;
-}
-TRANS(vorn_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_orc)
-TRANS(vandi_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_andi)
-TRANS(vori_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_ori)
-TRANS(vxori_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_xori)
-
-static void gen_vnori(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- TCGv_vec t1;
-
- t1 = tcg_constant_vec_matching(t, vece, imm);
- tcg_gen_nor_vec(vece, t, a, t1);
-}
-
-static void gen_vnori_b(TCGv_i64 t, TCGv_i64 a, int64_t imm)
-{
- tcg_gen_movi_i64(t, dup_const(MO_8, imm));
- tcg_gen_nor_i64(t, a, t);
-}
-
-static void do_vnori_b(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_nor_vec, 0
- };
- static const GVecGen2i op = {
- .fni8 = gen_vnori_b,
- .fniv = gen_vnori,
- .fnoi = gen_helper_vnori_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op);
-}
-
-TRANS(vnori_b, LSX, gvec_vv_i, MO_8, do_vnori_b)
-
-TRANS(vsll_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_shlv)
-TRANS(vsll_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_shlv)
-TRANS(vsll_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_shlv)
-TRANS(vsll_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_shlv)
-TRANS(vslli_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_shli)
-TRANS(vslli_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_shli)
-TRANS(vslli_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_shli)
-TRANS(vslli_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_shli)
-
-TRANS(vsrl_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_shrv)
-TRANS(vsrl_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_shrv)
-TRANS(vsrl_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_shrv)
-TRANS(vsrl_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_shrv)
-TRANS(vsrli_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_shri)
-TRANS(vsrli_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_shri)
-TRANS(vsrli_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_shri)
-TRANS(vsrli_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_shri)
-
-TRANS(vsra_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sarv)
-TRANS(vsra_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sarv)
-TRANS(vsra_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sarv)
-TRANS(vsra_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sarv)
-TRANS(vsrai_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_sari)
-TRANS(vsrai_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_sari)
-TRANS(vsrai_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_sari)
-TRANS(vsrai_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_sari)
-
-TRANS(vrotr_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_rotrv)
-TRANS(vrotr_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_rotrv)
-TRANS(vrotr_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_rotrv)
-TRANS(vrotr_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_rotrv)
-TRANS(vrotri_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_rotri)
-TRANS(vrotri_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_rotri)
-TRANS(vrotri_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_rotri)
-TRANS(vrotri_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_rotri)
-
-TRANS(vsllwil_h_b, LSX, gen_vv_i, gen_helper_vsllwil_h_b)
-TRANS(vsllwil_w_h, LSX, gen_vv_i, gen_helper_vsllwil_w_h)
-TRANS(vsllwil_d_w, LSX, gen_vv_i, gen_helper_vsllwil_d_w)
-TRANS(vextl_q_d, LSX, gen_vv, gen_helper_vextl_q_d)
-TRANS(vsllwil_hu_bu, LSX, gen_vv_i, gen_helper_vsllwil_hu_bu)
-TRANS(vsllwil_wu_hu, LSX, gen_vv_i, gen_helper_vsllwil_wu_hu)
-TRANS(vsllwil_du_wu, LSX, gen_vv_i, gen_helper_vsllwil_du_wu)
-TRANS(vextl_qu_du, LSX, gen_vv, gen_helper_vextl_qu_du)
-
-TRANS(vsrlr_b, LSX, gen_vvv, gen_helper_vsrlr_b)
-TRANS(vsrlr_h, LSX, gen_vvv, gen_helper_vsrlr_h)
-TRANS(vsrlr_w, LSX, gen_vvv, gen_helper_vsrlr_w)
-TRANS(vsrlr_d, LSX, gen_vvv, gen_helper_vsrlr_d)
-TRANS(vsrlri_b, LSX, gen_vv_i, gen_helper_vsrlri_b)
-TRANS(vsrlri_h, LSX, gen_vv_i, gen_helper_vsrlri_h)
-TRANS(vsrlri_w, LSX, gen_vv_i, gen_helper_vsrlri_w)
-TRANS(vsrlri_d, LSX, gen_vv_i, gen_helper_vsrlri_d)
-
-TRANS(vsrar_b, LSX, gen_vvv, gen_helper_vsrar_b)
-TRANS(vsrar_h, LSX, gen_vvv, gen_helper_vsrar_h)
-TRANS(vsrar_w, LSX, gen_vvv, gen_helper_vsrar_w)
-TRANS(vsrar_d, LSX, gen_vvv, gen_helper_vsrar_d)
-TRANS(vsrari_b, LSX, gen_vv_i, gen_helper_vsrari_b)
-TRANS(vsrari_h, LSX, gen_vv_i, gen_helper_vsrari_h)
-TRANS(vsrari_w, LSX, gen_vv_i, gen_helper_vsrari_w)
-TRANS(vsrari_d, LSX, gen_vv_i, gen_helper_vsrari_d)
-
-TRANS(vsrln_b_h, LSX, gen_vvv, gen_helper_vsrln_b_h)
-TRANS(vsrln_h_w, LSX, gen_vvv, gen_helper_vsrln_h_w)
-TRANS(vsrln_w_d, LSX, gen_vvv, gen_helper_vsrln_w_d)
-TRANS(vsran_b_h, LSX, gen_vvv, gen_helper_vsran_b_h)
-TRANS(vsran_h_w, LSX, gen_vvv, gen_helper_vsran_h_w)
-TRANS(vsran_w_d, LSX, gen_vvv, gen_helper_vsran_w_d)
-
-TRANS(vsrlni_b_h, LSX, gen_vv_i, gen_helper_vsrlni_b_h)
-TRANS(vsrlni_h_w, LSX, gen_vv_i, gen_helper_vsrlni_h_w)
-TRANS(vsrlni_w_d, LSX, gen_vv_i, gen_helper_vsrlni_w_d)
-TRANS(vsrlni_d_q, LSX, gen_vv_i, gen_helper_vsrlni_d_q)
-TRANS(vsrani_b_h, LSX, gen_vv_i, gen_helper_vsrani_b_h)
-TRANS(vsrani_h_w, LSX, gen_vv_i, gen_helper_vsrani_h_w)
-TRANS(vsrani_w_d, LSX, gen_vv_i, gen_helper_vsrani_w_d)
-TRANS(vsrani_d_q, LSX, gen_vv_i, gen_helper_vsrani_d_q)
-
-TRANS(vsrlrn_b_h, LSX, gen_vvv, gen_helper_vsrlrn_b_h)
-TRANS(vsrlrn_h_w, LSX, gen_vvv, gen_helper_vsrlrn_h_w)
-TRANS(vsrlrn_w_d, LSX, gen_vvv, gen_helper_vsrlrn_w_d)
-TRANS(vsrarn_b_h, LSX, gen_vvv, gen_helper_vsrarn_b_h)
-TRANS(vsrarn_h_w, LSX, gen_vvv, gen_helper_vsrarn_h_w)
-TRANS(vsrarn_w_d, LSX, gen_vvv, gen_helper_vsrarn_w_d)
-
-TRANS(vsrlrni_b_h, LSX, gen_vv_i, gen_helper_vsrlrni_b_h)
-TRANS(vsrlrni_h_w, LSX, gen_vv_i, gen_helper_vsrlrni_h_w)
-TRANS(vsrlrni_w_d, LSX, gen_vv_i, gen_helper_vsrlrni_w_d)
-TRANS(vsrlrni_d_q, LSX, gen_vv_i, gen_helper_vsrlrni_d_q)
-TRANS(vsrarni_b_h, LSX, gen_vv_i, gen_helper_vsrarni_b_h)
-TRANS(vsrarni_h_w, LSX, gen_vv_i, gen_helper_vsrarni_h_w)
-TRANS(vsrarni_w_d, LSX, gen_vv_i, gen_helper_vsrarni_w_d)
-TRANS(vsrarni_d_q, LSX, gen_vv_i, gen_helper_vsrarni_d_q)
-
-TRANS(vssrln_b_h, LSX, gen_vvv, gen_helper_vssrln_b_h)
-TRANS(vssrln_h_w, LSX, gen_vvv, gen_helper_vssrln_h_w)
-TRANS(vssrln_w_d, LSX, gen_vvv, gen_helper_vssrln_w_d)
-TRANS(vssran_b_h, LSX, gen_vvv, gen_helper_vssran_b_h)
-TRANS(vssran_h_w, LSX, gen_vvv, gen_helper_vssran_h_w)
-TRANS(vssran_w_d, LSX, gen_vvv, gen_helper_vssran_w_d)
-TRANS(vssrln_bu_h, LSX, gen_vvv, gen_helper_vssrln_bu_h)
-TRANS(vssrln_hu_w, LSX, gen_vvv, gen_helper_vssrln_hu_w)
-TRANS(vssrln_wu_d, LSX, gen_vvv, gen_helper_vssrln_wu_d)
-TRANS(vssran_bu_h, LSX, gen_vvv, gen_helper_vssran_bu_h)
-TRANS(vssran_hu_w, LSX, gen_vvv, gen_helper_vssran_hu_w)
-TRANS(vssran_wu_d, LSX, gen_vvv, gen_helper_vssran_wu_d)
-
-TRANS(vssrlni_b_h, LSX, gen_vv_i, gen_helper_vssrlni_b_h)
-TRANS(vssrlni_h_w, LSX, gen_vv_i, gen_helper_vssrlni_h_w)
-TRANS(vssrlni_w_d, LSX, gen_vv_i, gen_helper_vssrlni_w_d)
-TRANS(vssrlni_d_q, LSX, gen_vv_i, gen_helper_vssrlni_d_q)
-TRANS(vssrani_b_h, LSX, gen_vv_i, gen_helper_vssrani_b_h)
-TRANS(vssrani_h_w, LSX, gen_vv_i, gen_helper_vssrani_h_w)
-TRANS(vssrani_w_d, LSX, gen_vv_i, gen_helper_vssrani_w_d)
-TRANS(vssrani_d_q, LSX, gen_vv_i, gen_helper_vssrani_d_q)
-TRANS(vssrlni_bu_h, LSX, gen_vv_i, gen_helper_vssrlni_bu_h)
-TRANS(vssrlni_hu_w, LSX, gen_vv_i, gen_helper_vssrlni_hu_w)
-TRANS(vssrlni_wu_d, LSX, gen_vv_i, gen_helper_vssrlni_wu_d)
-TRANS(vssrlni_du_q, LSX, gen_vv_i, gen_helper_vssrlni_du_q)
-TRANS(vssrani_bu_h, LSX, gen_vv_i, gen_helper_vssrani_bu_h)
-TRANS(vssrani_hu_w, LSX, gen_vv_i, gen_helper_vssrani_hu_w)
-TRANS(vssrani_wu_d, LSX, gen_vv_i, gen_helper_vssrani_wu_d)
-TRANS(vssrani_du_q, LSX, gen_vv_i, gen_helper_vssrani_du_q)
-
-TRANS(vssrlrn_b_h, LSX, gen_vvv, gen_helper_vssrlrn_b_h)
-TRANS(vssrlrn_h_w, LSX, gen_vvv, gen_helper_vssrlrn_h_w)
-TRANS(vssrlrn_w_d, LSX, gen_vvv, gen_helper_vssrlrn_w_d)
-TRANS(vssrarn_b_h, LSX, gen_vvv, gen_helper_vssrarn_b_h)
-TRANS(vssrarn_h_w, LSX, gen_vvv, gen_helper_vssrarn_h_w)
-TRANS(vssrarn_w_d, LSX, gen_vvv, gen_helper_vssrarn_w_d)
-TRANS(vssrlrn_bu_h, LSX, gen_vvv, gen_helper_vssrlrn_bu_h)
-TRANS(vssrlrn_hu_w, LSX, gen_vvv, gen_helper_vssrlrn_hu_w)
-TRANS(vssrlrn_wu_d, LSX, gen_vvv, gen_helper_vssrlrn_wu_d)
-TRANS(vssrarn_bu_h, LSX, gen_vvv, gen_helper_vssrarn_bu_h)
-TRANS(vssrarn_hu_w, LSX, gen_vvv, gen_helper_vssrarn_hu_w)
-TRANS(vssrarn_wu_d, LSX, gen_vvv, gen_helper_vssrarn_wu_d)
-
-TRANS(vssrlrni_b_h, LSX, gen_vv_i, gen_helper_vssrlrni_b_h)
-TRANS(vssrlrni_h_w, LSX, gen_vv_i, gen_helper_vssrlrni_h_w)
-TRANS(vssrlrni_w_d, LSX, gen_vv_i, gen_helper_vssrlrni_w_d)
-TRANS(vssrlrni_d_q, LSX, gen_vv_i, gen_helper_vssrlrni_d_q)
-TRANS(vssrarni_b_h, LSX, gen_vv_i, gen_helper_vssrarni_b_h)
-TRANS(vssrarni_h_w, LSX, gen_vv_i, gen_helper_vssrarni_h_w)
-TRANS(vssrarni_w_d, LSX, gen_vv_i, gen_helper_vssrarni_w_d)
-TRANS(vssrarni_d_q, LSX, gen_vv_i, gen_helper_vssrarni_d_q)
-TRANS(vssrlrni_bu_h, LSX, gen_vv_i, gen_helper_vssrlrni_bu_h)
-TRANS(vssrlrni_hu_w, LSX, gen_vv_i, gen_helper_vssrlrni_hu_w)
-TRANS(vssrlrni_wu_d, LSX, gen_vv_i, gen_helper_vssrlrni_wu_d)
-TRANS(vssrlrni_du_q, LSX, gen_vv_i, gen_helper_vssrlrni_du_q)
-TRANS(vssrarni_bu_h, LSX, gen_vv_i, gen_helper_vssrarni_bu_h)
-TRANS(vssrarni_hu_w, LSX, gen_vv_i, gen_helper_vssrarni_hu_w)
-TRANS(vssrarni_wu_d, LSX, gen_vv_i, gen_helper_vssrarni_wu_d)
-TRANS(vssrarni_du_q, LSX, gen_vv_i, gen_helper_vssrarni_du_q)
-
-TRANS(vclo_b, LSX, gen_vv, gen_helper_vclo_b)
-TRANS(vclo_h, LSX, gen_vv, gen_helper_vclo_h)
-TRANS(vclo_w, LSX, gen_vv, gen_helper_vclo_w)
-TRANS(vclo_d, LSX, gen_vv, gen_helper_vclo_d)
-TRANS(vclz_b, LSX, gen_vv, gen_helper_vclz_b)
-TRANS(vclz_h, LSX, gen_vv, gen_helper_vclz_h)
-TRANS(vclz_w, LSX, gen_vv, gen_helper_vclz_w)
-TRANS(vclz_d, LSX, gen_vv, gen_helper_vclz_d)
-
-TRANS(vpcnt_b, LSX, gen_vv, gen_helper_vpcnt_b)
-TRANS(vpcnt_h, LSX, gen_vv, gen_helper_vpcnt_h)
-TRANS(vpcnt_w, LSX, gen_vv, gen_helper_vpcnt_w)
-TRANS(vpcnt_d, LSX, gen_vv, gen_helper_vpcnt_d)
-
-static void do_vbit(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
- void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
-{
- TCGv_vec mask, lsh, t1, one;
-
- lsh = tcg_temp_new_vec_matching(t);
- t1 = tcg_temp_new_vec_matching(t);
- mask = tcg_constant_vec_matching(t, vece, (8 << vece) - 1);
- one = tcg_constant_vec_matching(t, vece, 1);
-
- tcg_gen_and_vec(vece, lsh, b, mask);
- tcg_gen_shlv_vec(vece, t1, one, lsh);
- func(vece, t, a, t1);
-}
-
-static void gen_vbitclr(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vbit(vece, t, a, b, tcg_gen_andc_vec);
-}
-
-static void gen_vbitset(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vbit(vece, t, a, b, tcg_gen_or_vec);
-}
-
-static void gen_vbitrev(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
-{
- do_vbit(vece, t, a, b, tcg_gen_xor_vec);
-}
-
-static void do_vbitclr(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shlv_vec, INDEX_op_andc_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vbitclr,
- .fno = gen_helper_vbitclr_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vbitclr,
- .fno = gen_helper_vbitclr_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vbitclr,
- .fno = gen_helper_vbitclr_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vbitclr,
- .fno = gen_helper_vbitclr_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vbitclr_b, LSX, gvec_vvv, MO_8, do_vbitclr)
-TRANS(vbitclr_h, LSX, gvec_vvv, MO_16, do_vbitclr)
-TRANS(vbitclr_w, LSX, gvec_vvv, MO_32, do_vbitclr)
-TRANS(vbitclr_d, LSX, gvec_vvv, MO_64, do_vbitclr)
-
-static void do_vbiti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm,
- void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
-{
- int lsh;
- TCGv_vec t1, one;
-
- lsh = imm & ((8 << vece) -1);
- t1 = tcg_temp_new_vec_matching(t);
- one = tcg_constant_vec_matching(t, vece, 1);
-
- tcg_gen_shli_vec(vece, t1, one, lsh);
- func(vece, t, a, t1);
-}
-
-static void gen_vbitclri(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_vbiti(vece, t, a, imm, tcg_gen_andc_vec);
-}
-
-static void gen_vbitseti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_vbiti(vece, t, a, imm, tcg_gen_or_vec);
-}
-
-static void gen_vbitrevi(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_vbiti(vece, t, a, imm, tcg_gen_xor_vec);
-}
-
-static void do_vbitclri(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, INDEX_op_andc_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vbitclri,
- .fnoi = gen_helper_vbitclri_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vbitclri,
- .fnoi = gen_helper_vbitclri_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vbitclri,
- .fnoi = gen_helper_vbitclri_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vbitclri,
- .fnoi = gen_helper_vbitclri_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-TRANS(vbitclri_b, LSX, gvec_vv_i, MO_8, do_vbitclri)
-TRANS(vbitclri_h, LSX, gvec_vv_i, MO_16, do_vbitclri)
-TRANS(vbitclri_w, LSX, gvec_vv_i, MO_32, do_vbitclri)
-TRANS(vbitclri_d, LSX, gvec_vv_i, MO_64, do_vbitclri)
-
-static void do_vbitset(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shlv_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vbitset,
- .fno = gen_helper_vbitset_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vbitset,
- .fno = gen_helper_vbitset_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vbitset,
- .fno = gen_helper_vbitset_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vbitset,
- .fno = gen_helper_vbitset_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vbitset_b, LSX, gvec_vvv, MO_8, do_vbitset)
-TRANS(vbitset_h, LSX, gvec_vvv, MO_16, do_vbitset)
-TRANS(vbitset_w, LSX, gvec_vvv, MO_32, do_vbitset)
-TRANS(vbitset_d, LSX, gvec_vvv, MO_64, do_vbitset)
-
-static void do_vbitseti(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vbitseti,
- .fnoi = gen_helper_vbitseti_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vbitseti,
- .fnoi = gen_helper_vbitseti_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vbitseti,
- .fnoi = gen_helper_vbitseti_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vbitseti,
- .fnoi = gen_helper_vbitseti_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-TRANS(vbitseti_b, LSX, gvec_vv_i, MO_8, do_vbitseti)
-TRANS(vbitseti_h, LSX, gvec_vv_i, MO_16, do_vbitseti)
-TRANS(vbitseti_w, LSX, gvec_vv_i, MO_32, do_vbitseti)
-TRANS(vbitseti_d, LSX, gvec_vv_i, MO_64, do_vbitseti)
-
-static void do_vbitrev(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shlv_vec, 0
- };
- static const GVecGen3 op[4] = {
- {
- .fniv = gen_vbitrev,
- .fno = gen_helper_vbitrev_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vbitrev,
- .fno = gen_helper_vbitrev_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vbitrev,
- .fno = gen_helper_vbitrev_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vbitrev,
- .fno = gen_helper_vbitrev_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
-}
-
-TRANS(vbitrev_b, LSX, gvec_vvv, MO_8, do_vbitrev)
-TRANS(vbitrev_h, LSX, gvec_vvv, MO_16, do_vbitrev)
-TRANS(vbitrev_w, LSX, gvec_vvv, MO_32, do_vbitrev)
-TRANS(vbitrev_d, LSX, gvec_vvv, MO_64, do_vbitrev)
-
-static void do_vbitrevi(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
- int64_t imm, uint32_t oprsz, uint32_t maxsz)
-{
- static const TCGOpcode vecop_list[] = {
- INDEX_op_shli_vec, 0
- };
- static const GVecGen2i op[4] = {
- {
- .fniv = gen_vbitrevi,
- .fnoi = gen_helper_vbitrevi_b,
- .opt_opc = vecop_list,
- .vece = MO_8
- },
- {
- .fniv = gen_vbitrevi,
- .fnoi = gen_helper_vbitrevi_h,
- .opt_opc = vecop_list,
- .vece = MO_16
- },
- {
- .fniv = gen_vbitrevi,
- .fnoi = gen_helper_vbitrevi_w,
- .opt_opc = vecop_list,
- .vece = MO_32
- },
- {
- .fniv = gen_vbitrevi,
- .fnoi = gen_helper_vbitrevi_d,
- .opt_opc = vecop_list,
- .vece = MO_64
- },
- };
-
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
-}
-
-TRANS(vbitrevi_b, LSX, gvec_vv_i, MO_8, do_vbitrevi)
-TRANS(vbitrevi_h, LSX, gvec_vv_i, MO_16, do_vbitrevi)
-TRANS(vbitrevi_w, LSX, gvec_vv_i, MO_32, do_vbitrevi)
-TRANS(vbitrevi_d, LSX, gvec_vv_i, MO_64, do_vbitrevi)
-
-TRANS(vfrstp_b, LSX, gen_vvv, gen_helper_vfrstp_b)
-TRANS(vfrstp_h, LSX, gen_vvv, gen_helper_vfrstp_h)
-TRANS(vfrstpi_b, LSX, gen_vv_i, gen_helper_vfrstpi_b)
-TRANS(vfrstpi_h, LSX, gen_vv_i, gen_helper_vfrstpi_h)
-
-TRANS(vfadd_s, LSX, gen_vvv, gen_helper_vfadd_s)
-TRANS(vfadd_d, LSX, gen_vvv, gen_helper_vfadd_d)
-TRANS(vfsub_s, LSX, gen_vvv, gen_helper_vfsub_s)
-TRANS(vfsub_d, LSX, gen_vvv, gen_helper_vfsub_d)
-TRANS(vfmul_s, LSX, gen_vvv, gen_helper_vfmul_s)
-TRANS(vfmul_d, LSX, gen_vvv, gen_helper_vfmul_d)
-TRANS(vfdiv_s, LSX, gen_vvv, gen_helper_vfdiv_s)
-TRANS(vfdiv_d, LSX, gen_vvv, gen_helper_vfdiv_d)
-
-TRANS(vfmadd_s, LSX, gen_vvvv, gen_helper_vfmadd_s)
-TRANS(vfmadd_d, LSX, gen_vvvv, gen_helper_vfmadd_d)
-TRANS(vfmsub_s, LSX, gen_vvvv, gen_helper_vfmsub_s)
-TRANS(vfmsub_d, LSX, gen_vvvv, gen_helper_vfmsub_d)
-TRANS(vfnmadd_s, LSX, gen_vvvv, gen_helper_vfnmadd_s)
-TRANS(vfnmadd_d, LSX, gen_vvvv, gen_helper_vfnmadd_d)
-TRANS(vfnmsub_s, LSX, gen_vvvv, gen_helper_vfnmsub_s)
-TRANS(vfnmsub_d, LSX, gen_vvvv, gen_helper_vfnmsub_d)
-
-TRANS(vfmax_s, LSX, gen_vvv, gen_helper_vfmax_s)
-TRANS(vfmax_d, LSX, gen_vvv, gen_helper_vfmax_d)
-TRANS(vfmin_s, LSX, gen_vvv, gen_helper_vfmin_s)
-TRANS(vfmin_d, LSX, gen_vvv, gen_helper_vfmin_d)
-
-TRANS(vfmaxa_s, LSX, gen_vvv, gen_helper_vfmaxa_s)
-TRANS(vfmaxa_d, LSX, gen_vvv, gen_helper_vfmaxa_d)
-TRANS(vfmina_s, LSX, gen_vvv, gen_helper_vfmina_s)
-TRANS(vfmina_d, LSX, gen_vvv, gen_helper_vfmina_d)
-
-TRANS(vflogb_s, LSX, gen_vv, gen_helper_vflogb_s)
-TRANS(vflogb_d, LSX, gen_vv, gen_helper_vflogb_d)
-
-TRANS(vfclass_s, LSX, gen_vv, gen_helper_vfclass_s)
-TRANS(vfclass_d, LSX, gen_vv, gen_helper_vfclass_d)
-
-TRANS(vfsqrt_s, LSX, gen_vv, gen_helper_vfsqrt_s)
-TRANS(vfsqrt_d, LSX, gen_vv, gen_helper_vfsqrt_d)
-TRANS(vfrecip_s, LSX, gen_vv, gen_helper_vfrecip_s)
-TRANS(vfrecip_d, LSX, gen_vv, gen_helper_vfrecip_d)
-TRANS(vfrsqrt_s, LSX, gen_vv, gen_helper_vfrsqrt_s)
-TRANS(vfrsqrt_d, LSX, gen_vv, gen_helper_vfrsqrt_d)
-
-TRANS(vfcvtl_s_h, LSX, gen_vv, gen_helper_vfcvtl_s_h)
-TRANS(vfcvth_s_h, LSX, gen_vv, gen_helper_vfcvth_s_h)
-TRANS(vfcvtl_d_s, LSX, gen_vv, gen_helper_vfcvtl_d_s)
-TRANS(vfcvth_d_s, LSX, gen_vv, gen_helper_vfcvth_d_s)
-TRANS(vfcvt_h_s, LSX, gen_vvv, gen_helper_vfcvt_h_s)
-TRANS(vfcvt_s_d, LSX, gen_vvv, gen_helper_vfcvt_s_d)
-
-TRANS(vfrintrne_s, LSX, gen_vv, gen_helper_vfrintrne_s)
-TRANS(vfrintrne_d, LSX, gen_vv, gen_helper_vfrintrne_d)
-TRANS(vfrintrz_s, LSX, gen_vv, gen_helper_vfrintrz_s)
-TRANS(vfrintrz_d, LSX, gen_vv, gen_helper_vfrintrz_d)
-TRANS(vfrintrp_s, LSX, gen_vv, gen_helper_vfrintrp_s)
-TRANS(vfrintrp_d, LSX, gen_vv, gen_helper_vfrintrp_d)
-TRANS(vfrintrm_s, LSX, gen_vv, gen_helper_vfrintrm_s)
-TRANS(vfrintrm_d, LSX, gen_vv, gen_helper_vfrintrm_d)
-TRANS(vfrint_s, LSX, gen_vv, gen_helper_vfrint_s)
-TRANS(vfrint_d, LSX, gen_vv, gen_helper_vfrint_d)
-
-TRANS(vftintrne_w_s, LSX, gen_vv, gen_helper_vftintrne_w_s)
-TRANS(vftintrne_l_d, LSX, gen_vv, gen_helper_vftintrne_l_d)
-TRANS(vftintrz_w_s, LSX, gen_vv, gen_helper_vftintrz_w_s)
-TRANS(vftintrz_l_d, LSX, gen_vv, gen_helper_vftintrz_l_d)
-TRANS(vftintrp_w_s, LSX, gen_vv, gen_helper_vftintrp_w_s)
-TRANS(vftintrp_l_d, LSX, gen_vv, gen_helper_vftintrp_l_d)
-TRANS(vftintrm_w_s, LSX, gen_vv, gen_helper_vftintrm_w_s)
-TRANS(vftintrm_l_d, LSX, gen_vv, gen_helper_vftintrm_l_d)
-TRANS(vftint_w_s, LSX, gen_vv, gen_helper_vftint_w_s)
-TRANS(vftint_l_d, LSX, gen_vv, gen_helper_vftint_l_d)
-TRANS(vftintrz_wu_s, LSX, gen_vv, gen_helper_vftintrz_wu_s)
-TRANS(vftintrz_lu_d, LSX, gen_vv, gen_helper_vftintrz_lu_d)
-TRANS(vftint_wu_s, LSX, gen_vv, gen_helper_vftint_wu_s)
-TRANS(vftint_lu_d, LSX, gen_vv, gen_helper_vftint_lu_d)
-TRANS(vftintrne_w_d, LSX, gen_vvv, gen_helper_vftintrne_w_d)
-TRANS(vftintrz_w_d, LSX, gen_vvv, gen_helper_vftintrz_w_d)
-TRANS(vftintrp_w_d, LSX, gen_vvv, gen_helper_vftintrp_w_d)
-TRANS(vftintrm_w_d, LSX, gen_vvv, gen_helper_vftintrm_w_d)
-TRANS(vftint_w_d, LSX, gen_vvv, gen_helper_vftint_w_d)
-TRANS(vftintrnel_l_s, LSX, gen_vv, gen_helper_vftintrnel_l_s)
-TRANS(vftintrneh_l_s, LSX, gen_vv, gen_helper_vftintrneh_l_s)
-TRANS(vftintrzl_l_s, LSX, gen_vv, gen_helper_vftintrzl_l_s)
-TRANS(vftintrzh_l_s, LSX, gen_vv, gen_helper_vftintrzh_l_s)
-TRANS(vftintrpl_l_s, LSX, gen_vv, gen_helper_vftintrpl_l_s)
-TRANS(vftintrph_l_s, LSX, gen_vv, gen_helper_vftintrph_l_s)
-TRANS(vftintrml_l_s, LSX, gen_vv, gen_helper_vftintrml_l_s)
-TRANS(vftintrmh_l_s, LSX, gen_vv, gen_helper_vftintrmh_l_s)
-TRANS(vftintl_l_s, LSX, gen_vv, gen_helper_vftintl_l_s)
-TRANS(vftinth_l_s, LSX, gen_vv, gen_helper_vftinth_l_s)
-
-TRANS(vffint_s_w, LSX, gen_vv, gen_helper_vffint_s_w)
-TRANS(vffint_d_l, LSX, gen_vv, gen_helper_vffint_d_l)
-TRANS(vffint_s_wu, LSX, gen_vv, gen_helper_vffint_s_wu)
-TRANS(vffint_d_lu, LSX, gen_vv, gen_helper_vffint_d_lu)
-TRANS(vffintl_d_w, LSX, gen_vv, gen_helper_vffintl_d_w)
-TRANS(vffinth_d_w, LSX, gen_vv, gen_helper_vffinth_d_w)
-TRANS(vffint_s_l, LSX, gen_vvv, gen_helper_vffint_s_l)
-
-static bool do_cmp(DisasContext *ctx, arg_vvv *a, MemOp mop, TCGCond cond)
-{
- uint32_t vd_ofs, vj_ofs, vk_ofs;
-
- CHECK_SXE;
-
- vd_ofs = vec_full_offset(a->vd);
- vj_ofs = vec_full_offset(a->vj);
- vk_ofs = vec_full_offset(a->vk);
-
- tcg_gen_gvec_cmp(cond, mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
- return true;
-}
-
-static void do_cmpi_vec(TCGCond cond,
- unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- tcg_gen_cmp_vec(cond, vece, t, a, tcg_constant_vec_matching(t, vece, imm));
-}
-
-static void gen_vseqi_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_cmpi_vec(TCG_COND_EQ, vece, t, a, imm);
-}
-
-static void gen_vslei_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_cmpi_vec(TCG_COND_LE, vece, t, a, imm);
-}
-
-static void gen_vslti_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_cmpi_vec(TCG_COND_LT, vece, t, a, imm);
-}
-
-static void gen_vslei_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_cmpi_vec(TCG_COND_LEU, vece, t, a, imm);
-}
-
-static void gen_vslti_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
-{
- do_cmpi_vec(TCG_COND_LTU, vece, t, a, imm);
-}
-
-#define DO_CMPI_S(NAME) \
-static bool do_## NAME ##_s(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
-{ \
- uint32_t vd_ofs, vj_ofs; \
- \
- CHECK_SXE; \
- \
- static const TCGOpcode vecop_list[] = { \
- INDEX_op_cmp_vec, 0 \
- }; \
- static const GVecGen2i op[4] = { \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_b, \
- .opt_opc = vecop_list, \
- .vece = MO_8 \
- }, \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_h, \
- .opt_opc = vecop_list, \
- .vece = MO_16 \
- }, \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_w, \
- .opt_opc = vecop_list, \
- .vece = MO_32 \
- }, \
- { \
- .fniv = gen_## NAME ##_s_vec, \
- .fnoi = gen_helper_## NAME ##_d, \
- .opt_opc = vecop_list, \
- .vece = MO_64 \
- } \
- }; \
- \
- vd_ofs = vec_full_offset(a->vd); \
- vj_ofs = vec_full_offset(a->vj); \
- \
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
- \
- return true; \
-}
-
-DO_CMPI_S(vseqi)
-DO_CMPI_S(vslei)
-DO_CMPI_S(vslti)
-
-#define DO_CMPI_U(NAME) \
-static bool do_## NAME ##_u(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
-{ \
- uint32_t vd_ofs, vj_ofs; \
- \
- CHECK_SXE; \
- \
- static const TCGOpcode vecop_list[] = { \
- INDEX_op_cmp_vec, 0 \
- }; \
- static const GVecGen2i op[4] = { \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_bu, \
- .opt_opc = vecop_list, \
- .vece = MO_8 \
- }, \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_hu, \
- .opt_opc = vecop_list, \
- .vece = MO_16 \
- }, \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_wu, \
- .opt_opc = vecop_list, \
- .vece = MO_32 \
- }, \
- { \
- .fniv = gen_## NAME ##_u_vec, \
- .fnoi = gen_helper_## NAME ##_du, \
- .opt_opc = vecop_list, \
- .vece = MO_64 \
- } \
- }; \
- \
- vd_ofs = vec_full_offset(a->vd); \
- vj_ofs = vec_full_offset(a->vj); \
- \
- tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
- \
- return true; \
-}
-
-DO_CMPI_U(vslei)
-DO_CMPI_U(vslti)
-
-TRANS(vseq_b, LSX, do_cmp, MO_8, TCG_COND_EQ)
-TRANS(vseq_h, LSX, do_cmp, MO_16, TCG_COND_EQ)
-TRANS(vseq_w, LSX, do_cmp, MO_32, TCG_COND_EQ)
-TRANS(vseq_d, LSX, do_cmp, MO_64, TCG_COND_EQ)
-TRANS(vseqi_b, LSX, do_vseqi_s, MO_8)
-TRANS(vseqi_h, LSX, do_vseqi_s, MO_16)
-TRANS(vseqi_w, LSX, do_vseqi_s, MO_32)
-TRANS(vseqi_d, LSX, do_vseqi_s, MO_64)
-
-TRANS(vsle_b, LSX, do_cmp, MO_8, TCG_COND_LE)
-TRANS(vsle_h, LSX, do_cmp, MO_16, TCG_COND_LE)
-TRANS(vsle_w, LSX, do_cmp, MO_32, TCG_COND_LE)
-TRANS(vsle_d, LSX, do_cmp, MO_64, TCG_COND_LE)
-TRANS(vslei_b, LSX, do_vslei_s, MO_8)
-TRANS(vslei_h, LSX, do_vslei_s, MO_16)
-TRANS(vslei_w, LSX, do_vslei_s, MO_32)
-TRANS(vslei_d, LSX, do_vslei_s, MO_64)
-TRANS(vsle_bu, LSX, do_cmp, MO_8, TCG_COND_LEU)
-TRANS(vsle_hu, LSX, do_cmp, MO_16, TCG_COND_LEU)
-TRANS(vsle_wu, LSX, do_cmp, MO_32, TCG_COND_LEU)
-TRANS(vsle_du, LSX, do_cmp, MO_64, TCG_COND_LEU)
-TRANS(vslei_bu, LSX, do_vslei_u, MO_8)
-TRANS(vslei_hu, LSX, do_vslei_u, MO_16)
-TRANS(vslei_wu, LSX, do_vslei_u, MO_32)
-TRANS(vslei_du, LSX, do_vslei_u, MO_64)
-
-TRANS(vslt_b, LSX, do_cmp, MO_8, TCG_COND_LT)
-TRANS(vslt_h, LSX, do_cmp, MO_16, TCG_COND_LT)
-TRANS(vslt_w, LSX, do_cmp, MO_32, TCG_COND_LT)
-TRANS(vslt_d, LSX, do_cmp, MO_64, TCG_COND_LT)
-TRANS(vslti_b, LSX, do_vslti_s, MO_8)
-TRANS(vslti_h, LSX, do_vslti_s, MO_16)
-TRANS(vslti_w, LSX, do_vslti_s, MO_32)
-TRANS(vslti_d, LSX, do_vslti_s, MO_64)
-TRANS(vslt_bu, LSX, do_cmp, MO_8, TCG_COND_LTU)
-TRANS(vslt_hu, LSX, do_cmp, MO_16, TCG_COND_LTU)
-TRANS(vslt_wu, LSX, do_cmp, MO_32, TCG_COND_LTU)
-TRANS(vslt_du, LSX, do_cmp, MO_64, TCG_COND_LTU)
-TRANS(vslti_bu, LSX, do_vslti_u, MO_8)
-TRANS(vslti_hu, LSX, do_vslti_u, MO_16)
-TRANS(vslti_wu, LSX, do_vslti_u, MO_32)
-TRANS(vslti_du, LSX, do_vslti_u, MO_64)
-
-static bool trans_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a)
-{
- uint32_t flags;
- void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
- TCGv_i32 vd = tcg_constant_i32(a->vd);
- TCGv_i32 vj = tcg_constant_i32(a->vj);
- TCGv_i32 vk = tcg_constant_i32(a->vk);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s);
- flags = get_fcmp_flags(a->fcond >> 1);
- fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags));
-
- return true;
-}
-
-static bool trans_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a)
-{
- uint32_t flags;
- void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
- TCGv_i32 vd = tcg_constant_i32(a->vd);
- TCGv_i32 vj = tcg_constant_i32(a->vj);
- TCGv_i32 vk = tcg_constant_i32(a->vk);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d);
- flags = get_fcmp_flags(a->fcond >> 1);
- fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags));
-
- return true;
-}
-
-static bool trans_vbitsel_v(DisasContext *ctx, arg_vvvv *a)
-{
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- tcg_gen_gvec_bitsel(MO_64, vec_full_offset(a->vd), vec_full_offset(a->va),
- vec_full_offset(a->vk), vec_full_offset(a->vj),
- 16, ctx->vl/8);
- return true;
-}
-
-static void gen_vbitseli(unsigned vece, TCGv_vec a, TCGv_vec b, int64_t imm)
-{
- tcg_gen_bitsel_vec(vece, a, a, tcg_constant_vec_matching(a, vece, imm), b);
-}
-
-static bool trans_vbitseli_b(DisasContext *ctx, arg_vv_i *a)
-{
- static const GVecGen2i op = {
- .fniv = gen_vbitseli,
- .fnoi = gen_helper_vbitseli_b,
- .vece = MO_8,
- .load_dest = true
- };
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- tcg_gen_gvec_2i(vec_full_offset(a->vd), vec_full_offset(a->vj),
- 16, ctx->vl/8, a->imm, &op);
- return true;
-}
-
-#define VSET(NAME, COND) \
-static bool trans_## NAME (DisasContext *ctx, arg_cv *a) \
-{ \
- TCGv_i64 t1, al, ah; \
- \
- al = tcg_temp_new_i64(); \
- ah = tcg_temp_new_i64(); \
- t1 = tcg_temp_new_i64(); \
- \
- get_vreg64(ah, a->vj, 1); \
- get_vreg64(al, a->vj, 0); \
- \
- if (!avail_LSX(ctx)) { \
- return false; \
- } \
- \
- CHECK_SXE; \
- tcg_gen_or_i64(t1, al, ah); \
- tcg_gen_setcondi_i64(COND, t1, t1, 0); \
- tcg_gen_st8_tl(t1, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
- \
- return true; \
-}
-
-VSET(vseteqz_v, TCG_COND_EQ)
-VSET(vsetnez_v, TCG_COND_NE)
-
-TRANS(vsetanyeqz_b, LSX, gen_cv, gen_helper_vsetanyeqz_b)
-TRANS(vsetanyeqz_h, LSX, gen_cv, gen_helper_vsetanyeqz_h)
-TRANS(vsetanyeqz_w, LSX, gen_cv, gen_helper_vsetanyeqz_w)
-TRANS(vsetanyeqz_d, LSX, gen_cv, gen_helper_vsetanyeqz_d)
-TRANS(vsetallnez_b, LSX, gen_cv, gen_helper_vsetallnez_b)
-TRANS(vsetallnez_h, LSX, gen_cv, gen_helper_vsetallnez_h)
-TRANS(vsetallnez_w, LSX, gen_cv, gen_helper_vsetallnez_w)
-TRANS(vsetallnez_d, LSX, gen_cv, gen_helper_vsetallnez_d)
-
-static bool trans_vinsgr2vr_b(DisasContext *ctx, arg_vr_i *a)
-{
- TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_st8_i64(src, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vd].vreg.B(a->imm)));
- return true;
-}
-
-static bool trans_vinsgr2vr_h(DisasContext *ctx, arg_vr_i *a)
-{
- TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_st16_i64(src, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vd].vreg.H(a->imm)));
- return true;
-}
-
-static bool trans_vinsgr2vr_w(DisasContext *ctx, arg_vr_i *a)
-{
- TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_st32_i64(src, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vd].vreg.W(a->imm)));
- return true;
-}
-
-static bool trans_vinsgr2vr_d(DisasContext *ctx, arg_vr_i *a)
-{
- TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_st_i64(src, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vd].vreg.D(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_b(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld8s_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_h(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld16s_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_w(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld32s_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_d(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_bu(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld8u_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_hu(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld16u_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_wu(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld32u_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm)));
- return true;
-}
-
-static bool trans_vpickve2gr_du(DisasContext *ctx, arg_rv_i *a)
-{
- TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_ld_i64(dst, cpu_env,
- offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm)));
- return true;
-}
-
-static bool gvec_dup(DisasContext *ctx, arg_vr *a, MemOp mop)
-{
- TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- tcg_gen_gvec_dup_i64(mop, vec_full_offset(a->vd),
- 16, ctx->vl/8, src);
- return true;
-}
-
-TRANS(vreplgr2vr_b, LSX, gvec_dup, MO_8)
-TRANS(vreplgr2vr_h, LSX, gvec_dup, MO_16)
-TRANS(vreplgr2vr_w, LSX, gvec_dup, MO_32)
-TRANS(vreplgr2vr_d, LSX, gvec_dup, MO_64)
-
-static bool trans_vreplvei_b(DisasContext *ctx, arg_vv_i *a)
-{
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_gvec_dup_mem(MO_8,vec_full_offset(a->vd),
- offsetof(CPULoongArchState,
- fpr[a->vj].vreg.B((a->imm))),
- 16, ctx->vl/8);
- return true;
-}
-
-static bool trans_vreplvei_h(DisasContext *ctx, arg_vv_i *a)
-{
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_gvec_dup_mem(MO_16, vec_full_offset(a->vd),
- offsetof(CPULoongArchState,
- fpr[a->vj].vreg.H((a->imm))),
- 16, ctx->vl/8);
- return true;
-}
-static bool trans_vreplvei_w(DisasContext *ctx, arg_vv_i *a)
-{
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_gvec_dup_mem(MO_32, vec_full_offset(a->vd),
- offsetof(CPULoongArchState,
- fpr[a->vj].vreg.W((a->imm))),
- 16, ctx->vl/8);
- return true;
-}
-static bool trans_vreplvei_d(DisasContext *ctx, arg_vv_i *a)
-{
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
- tcg_gen_gvec_dup_mem(MO_64, vec_full_offset(a->vd),
- offsetof(CPULoongArchState,
- fpr[a->vj].vreg.D((a->imm))),
- 16, ctx->vl/8);
- return true;
-}
-
-static bool gen_vreplve(DisasContext *ctx, arg_vvr *a, int vece, int bit,
- void (*func)(TCGv_i64, TCGv_ptr, tcg_target_long))
-{
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_ptr t1 = tcg_temp_new_ptr();
- TCGv_i64 t2 = tcg_temp_new_i64();
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- tcg_gen_andi_i64(t0, gpr_src(ctx, a->rk, EXT_NONE), (LSX_LEN/bit) -1);
- tcg_gen_shli_i64(t0, t0, vece);
- if (HOST_BIG_ENDIAN) {
- tcg_gen_xori_i64(t0, t0, vece << ((LSX_LEN/bit) -1));
- }
-
- tcg_gen_trunc_i64_ptr(t1, t0);
- tcg_gen_add_ptr(t1, t1, cpu_env);
- func(t2, t1, vec_full_offset(a->vj));
- tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8, t2);
-
- return true;
-}
-
-TRANS(vreplve_b, LSX, gen_vreplve, MO_8, 8, tcg_gen_ld8u_i64)
-TRANS(vreplve_h, LSX, gen_vreplve, MO_16, 16, tcg_gen_ld16u_i64)
-TRANS(vreplve_w, LSX, gen_vreplve, MO_32, 32, tcg_gen_ld32u_i64)
-TRANS(vreplve_d, LSX, gen_vreplve, MO_64, 64, tcg_gen_ld_i64)
-
-static bool trans_vbsll_v(DisasContext *ctx, arg_vv_i *a)
-{
- int ofs;
- TCGv_i64 desthigh, destlow, high, low;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- desthigh = tcg_temp_new_i64();
- destlow = tcg_temp_new_i64();
- high = tcg_temp_new_i64();
- low = tcg_temp_new_i64();
-
- get_vreg64(low, a->vj, 0);
-
- ofs = ((a->imm) & 0xf) * 8;
- if (ofs < 64) {
- get_vreg64(high, a->vj, 1);
- tcg_gen_extract2_i64(desthigh, low, high, 64 - ofs);
- tcg_gen_shli_i64(destlow, low, ofs);
- } else {
- tcg_gen_shli_i64(desthigh, low, ofs - 64);
- destlow = tcg_constant_i64(0);
- }
-
- set_vreg64(desthigh, a->vd, 1);
- set_vreg64(destlow, a->vd, 0);
-
- return true;
-}
-
-static bool trans_vbsrl_v(DisasContext *ctx, arg_vv_i *a)
-{
- TCGv_i64 desthigh, destlow, high, low;
- int ofs;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- desthigh = tcg_temp_new_i64();
- destlow = tcg_temp_new_i64();
- high = tcg_temp_new_i64();
- low = tcg_temp_new_i64();
-
- get_vreg64(high, a->vj, 1);
-
- ofs = ((a->imm) & 0xf) * 8;
- if (ofs < 64) {
- get_vreg64(low, a->vj, 0);
- tcg_gen_extract2_i64(destlow, low, high, ofs);
- tcg_gen_shri_i64(desthigh, high, ofs);
- } else {
- tcg_gen_shri_i64(destlow, high, ofs - 64);
- desthigh = tcg_constant_i64(0);
- }
-
- set_vreg64(desthigh, a->vd, 1);
- set_vreg64(destlow, a->vd, 0);
-
- return true;
-}
-
-TRANS(vpackev_b, LSX, gen_vvv, gen_helper_vpackev_b)
-TRANS(vpackev_h, LSX, gen_vvv, gen_helper_vpackev_h)
-TRANS(vpackev_w, LSX, gen_vvv, gen_helper_vpackev_w)
-TRANS(vpackev_d, LSX, gen_vvv, gen_helper_vpackev_d)
-TRANS(vpackod_b, LSX, gen_vvv, gen_helper_vpackod_b)
-TRANS(vpackod_h, LSX, gen_vvv, gen_helper_vpackod_h)
-TRANS(vpackod_w, LSX, gen_vvv, gen_helper_vpackod_w)
-TRANS(vpackod_d, LSX, gen_vvv, gen_helper_vpackod_d)
-
-TRANS(vpickev_b, LSX, gen_vvv, gen_helper_vpickev_b)
-TRANS(vpickev_h, LSX, gen_vvv, gen_helper_vpickev_h)
-TRANS(vpickev_w, LSX, gen_vvv, gen_helper_vpickev_w)
-TRANS(vpickev_d, LSX, gen_vvv, gen_helper_vpickev_d)
-TRANS(vpickod_b, LSX, gen_vvv, gen_helper_vpickod_b)
-TRANS(vpickod_h, LSX, gen_vvv, gen_helper_vpickod_h)
-TRANS(vpickod_w, LSX, gen_vvv, gen_helper_vpickod_w)
-TRANS(vpickod_d, LSX, gen_vvv, gen_helper_vpickod_d)
-
-TRANS(vilvl_b, LSX, gen_vvv, gen_helper_vilvl_b)
-TRANS(vilvl_h, LSX, gen_vvv, gen_helper_vilvl_h)
-TRANS(vilvl_w, LSX, gen_vvv, gen_helper_vilvl_w)
-TRANS(vilvl_d, LSX, gen_vvv, gen_helper_vilvl_d)
-TRANS(vilvh_b, LSX, gen_vvv, gen_helper_vilvh_b)
-TRANS(vilvh_h, LSX, gen_vvv, gen_helper_vilvh_h)
-TRANS(vilvh_w, LSX, gen_vvv, gen_helper_vilvh_w)
-TRANS(vilvh_d, LSX, gen_vvv, gen_helper_vilvh_d)
-
-TRANS(vshuf_b, LSX, gen_vvvv, gen_helper_vshuf_b)
-TRANS(vshuf_h, LSX, gen_vvv, gen_helper_vshuf_h)
-TRANS(vshuf_w, LSX, gen_vvv, gen_helper_vshuf_w)
-TRANS(vshuf_d, LSX, gen_vvv, gen_helper_vshuf_d)
-TRANS(vshuf4i_b, LSX, gen_vv_i, gen_helper_vshuf4i_b)
-TRANS(vshuf4i_h, LSX, gen_vv_i, gen_helper_vshuf4i_h)
-TRANS(vshuf4i_w, LSX, gen_vv_i, gen_helper_vshuf4i_w)
-TRANS(vshuf4i_d, LSX, gen_vv_i, gen_helper_vshuf4i_d)
-
-TRANS(vpermi_w, LSX, gen_vv_i, gen_helper_vpermi_w)
-
-TRANS(vextrins_b, LSX, gen_vv_i, gen_helper_vextrins_b)
-TRANS(vextrins_h, LSX, gen_vv_i, gen_helper_vextrins_h)
-TRANS(vextrins_w, LSX, gen_vv_i, gen_helper_vextrins_w)
-TRANS(vextrins_d, LSX, gen_vv_i, gen_helper_vextrins_d)
-
-static bool trans_vld(DisasContext *ctx, arg_vr_i *a)
-{
- TCGv addr;
- TCGv_i64 rl, rh;
- TCGv_i128 val;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- addr = gpr_src(ctx, a->rj, EXT_NONE);
- val = tcg_temp_new_i128();
- rl = tcg_temp_new_i64();
- rh = tcg_temp_new_i64();
-
- addr = make_address_i(ctx, addr, a->imm);
-
- tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
- tcg_gen_extr_i128_i64(rl, rh, val);
- set_vreg64(rh, a->vd, 1);
- set_vreg64(rl, a->vd, 0);
-
- return true;
-}
-
-static bool trans_vst(DisasContext *ctx, arg_vr_i *a)
-{
- TCGv addr;
- TCGv_i128 val;
- TCGv_i64 ah, al;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- addr = gpr_src(ctx, a->rj, EXT_NONE);
- val = tcg_temp_new_i128();
- ah = tcg_temp_new_i64();
- al = tcg_temp_new_i64();
-
- addr = make_address_i(ctx, addr, a->imm);
-
- get_vreg64(ah, a->vd, 1);
- get_vreg64(al, a->vd, 0);
- tcg_gen_concat_i64_i128(val, al, ah);
- tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
-
- return true;
-}
-
-static bool trans_vldx(DisasContext *ctx, arg_vrr *a)
-{
- TCGv addr, src1, src2;
- TCGv_i64 rl, rh;
- TCGv_i128 val;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- src1 = gpr_src(ctx, a->rj, EXT_NONE);
- src2 = gpr_src(ctx, a->rk, EXT_NONE);
- val = tcg_temp_new_i128();
- rl = tcg_temp_new_i64();
- rh = tcg_temp_new_i64();
-
- addr = make_address_x(ctx, src1, src2);
- tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
- tcg_gen_extr_i128_i64(rl, rh, val);
- set_vreg64(rh, a->vd, 1);
- set_vreg64(rl, a->vd, 0);
-
- return true;
-}
-
-static bool trans_vstx(DisasContext *ctx, arg_vrr *a)
-{
- TCGv addr, src1, src2;
- TCGv_i64 ah, al;
- TCGv_i128 val;
-
- if (!avail_LSX(ctx)) {
- return false;
- }
-
- CHECK_SXE;
-
- src1 = gpr_src(ctx, a->rj, EXT_NONE);
- src2 = gpr_src(ctx, a->rk, EXT_NONE);
- val = tcg_temp_new_i128();
- ah = tcg_temp_new_i64();
- al = tcg_temp_new_i64();
-
- addr = make_address_x(ctx, src1, src2);
- get_vreg64(ah, a->vd, 1);
- get_vreg64(al, a->vd, 0);
- tcg_gen_concat_i64_i128(val, al, ah);
- tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
-
- return true;
-}
-
-#define VLDREPL(NAME, MO) \
-static bool trans_## NAME (DisasContext *ctx, arg_vr_i *a) \
-{ \
- TCGv addr; \
- TCGv_i64 val; \
- \
- if (!avail_LSX(ctx)) { \
- return false; \
- } \
- \
- CHECK_SXE; \
- \
- addr = gpr_src(ctx, a->rj, EXT_NONE); \
- val = tcg_temp_new_i64(); \
- \
- addr = make_address_i(ctx, addr, a->imm); \
- \
- tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, MO); \
- tcg_gen_gvec_dup_i64(MO, vec_full_offset(a->vd), 16, ctx->vl/8, val); \
- \
- return true; \
-}
-
-VLDREPL(vldrepl_b, MO_8)
-VLDREPL(vldrepl_h, MO_16)
-VLDREPL(vldrepl_w, MO_32)
-VLDREPL(vldrepl_d, MO_64)
-
-#define VSTELM(NAME, MO, E) \
-static bool trans_## NAME (DisasContext *ctx, arg_vr_ii *a) \
-{ \
- TCGv addr; \
- TCGv_i64 val; \
- \
- if (!avail_LSX(ctx)) { \
- return false; \
- } \
- \
- CHECK_SXE; \
- \
- addr = gpr_src(ctx, a->rj, EXT_NONE); \
- val = tcg_temp_new_i64(); \
- \
- addr = make_address_i(ctx, addr, a->imm); \
- \
- tcg_gen_ld_i64(val, cpu_env, \
- offsetof(CPULoongArchState, fpr[a->vd].vreg.E(a->imm2))); \
- tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, MO); \
- \
- return true; \
-}
-
-VSTELM(vstelm_b, MO_8, B)
-VSTELM(vstelm_h, MO_16, H)
-VSTELM(vstelm_w, MO_32, W)
-VSTELM(vstelm_d, MO_64, D)
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * LoongArch vector translate functions
+ * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef CONFIG_USER_ONLY
+#define CHECK_SXE do { \
+ if ((ctx->base.tb->flags & HW_FLAGS_EUEN_SXE) == 0) { \
+ generate_exception(ctx, EXCCODE_SXD); \
+ return true; \
+ } \
+} while (0)
+#else
+#define CHECK_SXE
+#endif
+
+static bool gen_vvvv(DisasContext *ctx, arg_vvvv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32,
+ TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+ TCGv_i32 va = tcg_constant_i32(a->va);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj, vk, va);
+ return true;
+}
+
+static bool gen_vvv(DisasContext *ctx, arg_vvv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ CHECK_SXE;
+
+ func(cpu_env, vd, vj, vk);
+ return true;
+}
+
+static bool gen_vv(DisasContext *ctx, arg_vv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj);
+ return true;
+}
+
+static bool gen_vv_i(DisasContext *ctx, arg_vv_i *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 imm = tcg_constant_i32(a->imm);
+
+ CHECK_SXE;
+ func(cpu_env, vd, vj, imm);
+ return true;
+}
+
+static bool gen_cv(DisasContext *ctx, arg_cv *a,
+ void (*func)(TCGv_ptr, TCGv_i32, TCGv_i32))
+{
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 cd = tcg_constant_i32(a->cd);
+
+ CHECK_SXE;
+ func(cpu_env, cd, vj);
+ return true;
+}
+
+static bool gvec_vvv(DisasContext *ctx, arg_vvv *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t, uint32_t))
+{
+ uint32_t vd_ofs, vj_ofs, vk_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+ vk_ofs = vec_full_offset(a->vk);
+
+ func(mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
+ return true;
+}
+
+static bool gvec_vv(DisasContext *ctx, arg_vv *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ uint32_t, uint32_t))
+{
+ uint32_t vd_ofs, vj_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+
+ func(mop, vd_ofs, vj_ofs, 16, ctx->vl/8);
+ return true;
+}
+
+static bool gvec_vv_i(DisasContext *ctx, arg_vv_i *a, MemOp mop,
+ void (*func)(unsigned, uint32_t, uint32_t,
+ int64_t, uint32_t, uint32_t))
+{
+ uint32_t vd_ofs, vj_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+
+ func(mop, vd_ofs, vj_ofs, a->imm , 16, ctx->vl/8);
+ return true;
+}
+
+static bool gvec_subi(DisasContext *ctx, arg_vv_i *a, MemOp mop)
+{
+ uint32_t vd_ofs, vj_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+
+ tcg_gen_gvec_addi(mop, vd_ofs, vj_ofs, -a->imm, 16, ctx->vl/8);
+ return true;
+}
+
+TRANS(vadd_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_add)
+TRANS(vadd_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_add)
+TRANS(vadd_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_add)
+TRANS(vadd_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_add)
+
+#define VADDSUB_Q(NAME) \
+static bool trans_v## NAME ##_q(DisasContext *ctx, arg_vvv *a) \
+{ \
+ TCGv_i64 rh, rl, ah, al, bh, bl; \
+ \
+ if (!avail_LSX(ctx)) { \
+ return false; \
+ } \
+ \
+ CHECK_SXE; \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ ah = tcg_temp_new_i64(); \
+ al = tcg_temp_new_i64(); \
+ bh = tcg_temp_new_i64(); \
+ bl = tcg_temp_new_i64(); \
+ \
+ get_vreg64(ah, a->vj, 1); \
+ get_vreg64(al, a->vj, 0); \
+ get_vreg64(bh, a->vk, 1); \
+ get_vreg64(bl, a->vk, 0); \
+ \
+ tcg_gen_## NAME ##2_i64(rl, rh, al, ah, bl, bh); \
+ \
+ set_vreg64(rh, a->vd, 1); \
+ set_vreg64(rl, a->vd, 0); \
+ \
+ return true; \
+}
+
+VADDSUB_Q(add)
+VADDSUB_Q(sub)
+
+TRANS(vsub_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sub)
+TRANS(vsub_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sub)
+TRANS(vsub_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sub)
+TRANS(vsub_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sub)
+
+TRANS(vaddi_bu, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_addi)
+TRANS(vaddi_hu, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_addi)
+TRANS(vaddi_wu, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_addi)
+TRANS(vaddi_du, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_addi)
+TRANS(vsubi_bu, LSX, gvec_subi, MO_8)
+TRANS(vsubi_hu, LSX, gvec_subi, MO_16)
+TRANS(vsubi_wu, LSX, gvec_subi, MO_32)
+TRANS(vsubi_du, LSX, gvec_subi, MO_64)
+
+TRANS(vneg_b, LSX, gvec_vv, MO_8, tcg_gen_gvec_neg)
+TRANS(vneg_h, LSX, gvec_vv, MO_16, tcg_gen_gvec_neg)
+TRANS(vneg_w, LSX, gvec_vv, MO_32, tcg_gen_gvec_neg)
+TRANS(vneg_d, LSX, gvec_vv, MO_64, tcg_gen_gvec_neg)
+
+TRANS(vsadd_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_ssadd)
+TRANS(vsadd_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_ssadd)
+TRANS(vsadd_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_ssadd)
+TRANS(vsadd_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_ssadd)
+TRANS(vsadd_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_usadd)
+TRANS(vsadd_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_usadd)
+TRANS(vsadd_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_usadd)
+TRANS(vsadd_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_usadd)
+TRANS(vssub_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sssub)
+TRANS(vssub_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sssub)
+TRANS(vssub_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sssub)
+TRANS(vssub_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sssub)
+TRANS(vssub_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_ussub)
+TRANS(vssub_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_ussub)
+TRANS(vssub_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_ussub)
+TRANS(vssub_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_ussub)
+
+TRANS(vhaddw_h_b, LSX, gen_vvv, gen_helper_vhaddw_h_b)
+TRANS(vhaddw_w_h, LSX, gen_vvv, gen_helper_vhaddw_w_h)
+TRANS(vhaddw_d_w, LSX, gen_vvv, gen_helper_vhaddw_d_w)
+TRANS(vhaddw_q_d, LSX, gen_vvv, gen_helper_vhaddw_q_d)
+TRANS(vhaddw_hu_bu, LSX, gen_vvv, gen_helper_vhaddw_hu_bu)
+TRANS(vhaddw_wu_hu, LSX, gen_vvv, gen_helper_vhaddw_wu_hu)
+TRANS(vhaddw_du_wu, LSX, gen_vvv, gen_helper_vhaddw_du_wu)
+TRANS(vhaddw_qu_du, LSX, gen_vvv, gen_helper_vhaddw_qu_du)
+TRANS(vhsubw_h_b, LSX, gen_vvv, gen_helper_vhsubw_h_b)
+TRANS(vhsubw_w_h, LSX, gen_vvv, gen_helper_vhsubw_w_h)
+TRANS(vhsubw_d_w, LSX, gen_vvv, gen_helper_vhsubw_d_w)
+TRANS(vhsubw_q_d, LSX, gen_vvv, gen_helper_vhsubw_q_d)
+TRANS(vhsubw_hu_bu, LSX, gen_vvv, gen_helper_vhsubw_hu_bu)
+TRANS(vhsubw_wu_hu, LSX, gen_vvv, gen_helper_vhsubw_wu_hu)
+TRANS(vhsubw_du_wu, LSX, gen_vvv, gen_helper_vhsubw_du_wu)
+TRANS(vhsubw_qu_du, LSX, gen_vvv, gen_helper_vhsubw_qu_du)
+
+static void gen_vaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the even elements from a */
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+
+ /* Sign-extend the even elements from b */
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwev_s,
+ .fno = gen_helper_vaddwev_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwev_w_h,
+ .fniv = gen_vaddwev_s,
+ .fno = gen_helper_vaddwev_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwev_d_w,
+ .fniv = gen_vaddwev_s,
+ .fno = gen_helper_vaddwev_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwev_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwev_h_b, LSX, gvec_vvv, MO_8, do_vaddwev_s)
+TRANS(vaddwev_w_h, LSX, gvec_vvv, MO_16, do_vaddwev_s)
+TRANS(vaddwev_d_w, LSX, gvec_vvv, MO_32, do_vaddwev_s)
+TRANS(vaddwev_q_d, LSX, gvec_vvv, MO_64, do_vaddwev_s)
+
+static void gen_vaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void gen_vaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the odd elements for vector */
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void do_vaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwod_s,
+ .fno = gen_helper_vaddwod_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwod_w_h,
+ .fniv = gen_vaddwod_s,
+ .fno = gen_helper_vaddwod_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwod_d_w,
+ .fniv = gen_vaddwod_s,
+ .fno = gen_helper_vaddwod_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwod_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwod_h_b, LSX, gvec_vvv, MO_8, do_vaddwod_s)
+TRANS(vaddwod_w_h, LSX, gvec_vvv, MO_16, do_vaddwod_s)
+TRANS(vaddwod_d_w, LSX, gvec_vvv, MO_32, do_vaddwod_s)
+TRANS(vaddwod_q_d, LSX, gvec_vvv, MO_64, do_vaddwod_s)
+
+static void gen_vsubwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the even elements from a */
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+
+ /* Sign-extend the even elements from b */
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwev_s,
+ .fno = gen_helper_vsubwev_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwev_w_h,
+ .fniv = gen_vsubwev_s,
+ .fno = gen_helper_vsubwev_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwev_d_w,
+ .fniv = gen_vsubwev_s,
+ .fno = gen_helper_vsubwev_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwev_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwev_h_b, LSX, gvec_vvv, MO_8, do_vsubwev_s)
+TRANS(vsubwev_w_h, LSX, gvec_vvv, MO_16, do_vsubwev_s)
+TRANS(vsubwev_d_w, LSX, gvec_vvv, MO_32, do_vsubwev_s)
+TRANS(vsubwev_q_d, LSX, gvec_vvv, MO_64, do_vsubwev_s)
+
+static void gen_vsubwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Sign-extend the odd elements for vector */
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwod_s,
+ .fno = gen_helper_vsubwod_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwod_w_h,
+ .fniv = gen_vsubwod_s,
+ .fno = gen_helper_vsubwod_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwod_d_w,
+ .fniv = gen_vsubwod_s,
+ .fno = gen_helper_vsubwod_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwod_q_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwod_h_b, LSX, gvec_vvv, MO_8, do_vsubwod_s)
+TRANS(vsubwod_w_h, LSX, gvec_vvv, MO_16, do_vsubwod_s)
+TRANS(vsubwod_d_w, LSX, gvec_vvv, MO_32, do_vsubwod_s)
+TRANS(vsubwod_q_d, LSX, gvec_vvv, MO_64, do_vsubwod_s)
+
+static void gen_vaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, t3);
+ tcg_gen_and_vec(vece, t2, b, t3);
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16u_i32(t2, b);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32u_i64(t2, b);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwev_u,
+ .fno = gen_helper_vaddwev_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwev_w_hu,
+ .fniv = gen_vaddwev_u,
+ .fno = gen_helper_vaddwev_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwev_d_wu,
+ .fniv = gen_vaddwev_u,
+ .fno = gen_helper_vaddwev_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwev_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwev_h_bu, LSX, gvec_vvv, MO_8, do_vaddwev_u)
+TRANS(vaddwev_w_hu, LSX, gvec_vvv, MO_16, do_vaddwev_u)
+TRANS(vaddwev_d_wu, LSX, gvec_vvv, MO_32, do_vaddwev_u)
+TRANS(vaddwev_q_du, LSX, gvec_vvv, MO_64, do_vaddwev_u)
+
+static void gen_vaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Zero-extend the odd elements for vector */
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_shri_i32(t2, b, 16);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_shri_i64(t2, b, 32);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwod_u,
+ .fno = gen_helper_vaddwod_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwod_w_hu,
+ .fniv = gen_vaddwod_u,
+ .fno = gen_helper_vaddwod_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwod_d_wu,
+ .fniv = gen_vaddwod_u,
+ .fno = gen_helper_vaddwod_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwod_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwod_h_bu, LSX, gvec_vvv, MO_8, do_vaddwod_u)
+TRANS(vaddwod_w_hu, LSX, gvec_vvv, MO_16, do_vaddwod_u)
+TRANS(vaddwod_d_wu, LSX, gvec_vvv, MO_32, do_vaddwod_u)
+TRANS(vaddwod_q_du, LSX, gvec_vvv, MO_64, do_vaddwod_u)
+
+static void gen_vsubwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, t3);
+ tcg_gen_and_vec(vece, t2, b, t3);
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16u_i32(t2, b);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32u_i64(t2, b);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwev_u,
+ .fno = gen_helper_vsubwev_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwev_w_hu,
+ .fniv = gen_vsubwev_u,
+ .fno = gen_helper_vsubwev_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwev_d_wu,
+ .fniv = gen_vsubwev_u,
+ .fno = gen_helper_vsubwev_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwev_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwev_h_bu, LSX, gvec_vvv, MO_8, do_vsubwev_u)
+TRANS(vsubwev_w_hu, LSX, gvec_vvv, MO_16, do_vsubwev_u)
+TRANS(vsubwev_d_wu, LSX, gvec_vvv, MO_32, do_vsubwev_u)
+TRANS(vsubwev_q_du, LSX, gvec_vvv, MO_64, do_vsubwev_u)
+
+static void gen_vsubwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Zero-extend the odd elements for vector */
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+
+ tcg_gen_sub_vec(vece, t, t1, t2);
+}
+
+static void gen_vsubwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_shri_i32(t2, b, 16);
+ tcg_gen_sub_i32(t, t1, t2);
+}
+
+static void gen_vsubwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_shri_i64(t2, b, 32);
+ tcg_gen_sub_i64(t, t1, t2);
+}
+
+static void do_vsubwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsubwod_u,
+ .fno = gen_helper_vsubwod_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vsubwod_w_hu,
+ .fniv = gen_vsubwod_u,
+ .fno = gen_helper_vsubwod_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vsubwod_d_wu,
+ .fniv = gen_vsubwod_u,
+ .fno = gen_helper_vsubwod_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vsubwod_q_du,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsubwod_h_bu, LSX, gvec_vvv, MO_8, do_vsubwod_u)
+TRANS(vsubwod_w_hu, LSX, gvec_vvv, MO_16, do_vsubwod_u)
+TRANS(vsubwod_d_wu, LSX, gvec_vvv, MO_32, do_vsubwod_u)
+TRANS(vsubwod_q_du, LSX, gvec_vvv, MO_64, do_vsubwod_u)
+
+static void gen_vaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, halfbits));
+
+ /* Zero-extend the even elements from a */
+ tcg_gen_and_vec(vece, t1, a, t3);
+
+ /* Sign-extend the even elements from b */
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwev_u_s,
+ .fno = gen_helper_vaddwev_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwev_w_hu_h,
+ .fniv = gen_vaddwev_u_s,
+ .fno = gen_helper_vaddwev_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwev_d_wu_w,
+ .fniv = gen_vaddwev_u_s,
+ .fno = gen_helper_vaddwev_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwev_q_du_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwev_u_s)
+TRANS(vaddwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwev_u_s)
+TRANS(vaddwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwev_u_s)
+TRANS(vaddwev_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwev_u_s)
+
+static void gen_vaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ /* Zero-extend the odd elements from a */
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ /* Sign-extend the odd elements from b */
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void gen_vaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_add_i32(t, t1, t2);
+}
+
+static void gen_vaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_add_i64(t, t1, t2);
+}
+
+static void do_vaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vaddwod_u_s,
+ .fno = gen_helper_vaddwod_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vaddwod_w_hu_h,
+ .fniv = gen_vaddwod_u_s,
+ .fno = gen_helper_vaddwod_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vaddwod_d_wu_w,
+ .fniv = gen_vaddwod_u_s,
+ .fno = gen_helper_vaddwod_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ {
+ .fno = gen_helper_vaddwod_q_du_d,
+ .vece = MO_128
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vaddwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vaddwod_u_s)
+TRANS(vaddwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vaddwod_u_s)
+TRANS(vaddwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vaddwod_u_s)
+TRANS(vaddwod_q_du_d, LSX, gvec_vvv, MO_64, do_vaddwod_u_s)
+
+static void do_vavg(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ void (*gen_shr_vec)(unsigned, TCGv_vec,
+ TCGv_vec, int64_t),
+ void (*gen_round_vec)(unsigned, TCGv_vec,
+ TCGv_vec, TCGv_vec))
+{
+ TCGv_vec tmp = tcg_temp_new_vec_matching(t);
+ gen_round_vec(vece, tmp, a, b);
+ tcg_gen_and_vec(vece, tmp, tmp, tcg_constant_vec_matching(t, vece, 1));
+ gen_shr_vec(vece, a, a, 1);
+ gen_shr_vec(vece, b, b, 1);
+ tcg_gen_add_vec(vece, t, a, b);
+ tcg_gen_add_vec(vece, t, t, tmp);
+}
+
+static void gen_vavg_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_and_vec);
+}
+
+static void gen_vavg_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_and_vec);
+}
+
+static void gen_vavgr_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_sari_vec, tcg_gen_or_vec);
+}
+
+static void gen_vavgr_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vavg(vece, t, a, b, tcg_gen_shri_vec, tcg_gen_or_vec);
+}
+
+static void do_vavg_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavg_s,
+ .fno = gen_helper_vavg_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+static void do_vavg_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavg_u,
+ .fno = gen_helper_vavg_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vavg_b, LSX, gvec_vvv, MO_8, do_vavg_s)
+TRANS(vavg_h, LSX, gvec_vvv, MO_16, do_vavg_s)
+TRANS(vavg_w, LSX, gvec_vvv, MO_32, do_vavg_s)
+TRANS(vavg_d, LSX, gvec_vvv, MO_64, do_vavg_s)
+TRANS(vavg_bu, LSX, gvec_vvv, MO_8, do_vavg_u)
+TRANS(vavg_hu, LSX, gvec_vvv, MO_16, do_vavg_u)
+TRANS(vavg_wu, LSX, gvec_vvv, MO_32, do_vavg_u)
+TRANS(vavg_du, LSX, gvec_vvv, MO_64, do_vavg_u)
+
+static void do_vavgr_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavgr_s,
+ .fno = gen_helper_vavgr_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+static void do_vavgr_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vavgr_u,
+ .fno = gen_helper_vavgr_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vavgr_b, LSX, gvec_vvv, MO_8, do_vavgr_s)
+TRANS(vavgr_h, LSX, gvec_vvv, MO_16, do_vavgr_s)
+TRANS(vavgr_w, LSX, gvec_vvv, MO_32, do_vavgr_s)
+TRANS(vavgr_d, LSX, gvec_vvv, MO_64, do_vavgr_s)
+TRANS(vavgr_bu, LSX, gvec_vvv, MO_8, do_vavgr_u)
+TRANS(vavgr_hu, LSX, gvec_vvv, MO_16, do_vavgr_u)
+TRANS(vavgr_wu, LSX, gvec_vvv, MO_32, do_vavgr_u)
+TRANS(vavgr_du, LSX, gvec_vvv, MO_64, do_vavgr_u)
+
+static void gen_vabsd_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_smax_vec(vece, t, a, b);
+ tcg_gen_smin_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, t, t, a);
+}
+
+static void do_vabsd_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, INDEX_op_smin_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vabsd_s,
+ .fno = gen_helper_vabsd_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+static void gen_vabsd_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ tcg_gen_umax_vec(vece, t, a, b);
+ tcg_gen_umin_vec(vece, a, a, b);
+ tcg_gen_sub_vec(vece, t, t, a);
+}
+
+static void do_vabsd_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umax_vec, INDEX_op_umin_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vabsd_u,
+ .fno = gen_helper_vabsd_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vabsd_b, LSX, gvec_vvv, MO_8, do_vabsd_s)
+TRANS(vabsd_h, LSX, gvec_vvv, MO_16, do_vabsd_s)
+TRANS(vabsd_w, LSX, gvec_vvv, MO_32, do_vabsd_s)
+TRANS(vabsd_d, LSX, gvec_vvv, MO_64, do_vabsd_s)
+TRANS(vabsd_bu, LSX, gvec_vvv, MO_8, do_vabsd_u)
+TRANS(vabsd_hu, LSX, gvec_vvv, MO_16, do_vabsd_u)
+TRANS(vabsd_wu, LSX, gvec_vvv, MO_32, do_vabsd_u)
+TRANS(vabsd_du, LSX, gvec_vvv, MO_64, do_vabsd_u)
+
+static void gen_vadda(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+
+ tcg_gen_abs_vec(vece, t1, a);
+ tcg_gen_abs_vec(vece, t2, b);
+ tcg_gen_add_vec(vece, t, t1, t2);
+}
+
+static void do_vadda(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_abs_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vadda,
+ .fno = gen_helper_vadda_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vadda_b, LSX, gvec_vvv, MO_8, do_vadda)
+TRANS(vadda_h, LSX, gvec_vvv, MO_16, do_vadda)
+TRANS(vadda_w, LSX, gvec_vvv, MO_32, do_vadda)
+TRANS(vadda_d, LSX, gvec_vvv, MO_64, do_vadda)
+
+TRANS(vmax_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_smax)
+TRANS(vmax_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_smax)
+TRANS(vmax_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_smax)
+TRANS(vmax_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_smax)
+TRANS(vmax_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_umax)
+TRANS(vmax_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_umax)
+TRANS(vmax_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_umax)
+TRANS(vmax_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_umax)
+
+TRANS(vmin_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_smin)
+TRANS(vmin_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_smin)
+TRANS(vmin_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_smin)
+TRANS(vmin_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_smin)
+TRANS(vmin_bu, LSX, gvec_vvv, MO_8, tcg_gen_gvec_umin)
+TRANS(vmin_hu, LSX, gvec_vvv, MO_16, tcg_gen_gvec_umin)
+TRANS(vmin_wu, LSX, gvec_vvv, MO_32, tcg_gen_gvec_umin)
+TRANS(vmin_du, LSX, gvec_vvv, MO_64, tcg_gen_gvec_umin)
+
+static void gen_vmini_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_smin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmini_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_umin_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmaxi_s(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_smax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vmaxi_u(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_umax_vec(vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void do_vmini_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmini_s,
+ .fnoi = gen_helper_vmini_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+static void do_vmini_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmini_u,
+ .fnoi = gen_helper_vmini_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vmini_b, LSX, gvec_vv_i, MO_8, do_vmini_s)
+TRANS(vmini_h, LSX, gvec_vv_i, MO_16, do_vmini_s)
+TRANS(vmini_w, LSX, gvec_vv_i, MO_32, do_vmini_s)
+TRANS(vmini_d, LSX, gvec_vv_i, MO_64, do_vmini_s)
+TRANS(vmini_bu, LSX, gvec_vv_i, MO_8, do_vmini_u)
+TRANS(vmini_hu, LSX, gvec_vv_i, MO_16, do_vmini_u)
+TRANS(vmini_wu, LSX, gvec_vv_i, MO_32, do_vmini_u)
+TRANS(vmini_du, LSX, gvec_vv_i, MO_64, do_vmini_u)
+
+static void do_vmaxi_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmaxi_s,
+ .fnoi = gen_helper_vmaxi_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+static void do_vmaxi_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umax_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vmaxi_u,
+ .fnoi = gen_helper_vmaxi_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vmaxi_b, LSX, gvec_vv_i, MO_8, do_vmaxi_s)
+TRANS(vmaxi_h, LSX, gvec_vv_i, MO_16, do_vmaxi_s)
+TRANS(vmaxi_w, LSX, gvec_vv_i, MO_32, do_vmaxi_s)
+TRANS(vmaxi_d, LSX, gvec_vv_i, MO_64, do_vmaxi_s)
+TRANS(vmaxi_bu, LSX, gvec_vv_i, MO_8, do_vmaxi_u)
+TRANS(vmaxi_hu, LSX, gvec_vv_i, MO_16, do_vmaxi_u)
+TRANS(vmaxi_wu, LSX, gvec_vv_i, MO_32, do_vmaxi_u)
+TRANS(vmaxi_du, LSX, gvec_vv_i, MO_64, do_vmaxi_u)
+
+TRANS(vmul_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_mul)
+TRANS(vmul_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_mul)
+TRANS(vmul_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_mul)
+TRANS(vmul_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_mul)
+
+static void gen_vmuh_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 discard = tcg_temp_new_i32();
+ tcg_gen_muls2_i32(discard, t, a, b);
+}
+
+static void gen_vmuh_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 discard = tcg_temp_new_i64();
+ tcg_gen_muls2_i64(discard, t, a, b);
+}
+
+static void do_vmuh_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen3 op[4] = {
+ {
+ .fno = gen_helper_vmuh_b,
+ .vece = MO_8
+ },
+ {
+ .fno = gen_helper_vmuh_h,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmuh_w,
+ .fno = gen_helper_vmuh_w,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmuh_d,
+ .fno = gen_helper_vmuh_d,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmuh_b, LSX, gvec_vvv, MO_8, do_vmuh_s)
+TRANS(vmuh_h, LSX, gvec_vvv, MO_16, do_vmuh_s)
+TRANS(vmuh_w, LSX, gvec_vvv, MO_32, do_vmuh_s)
+TRANS(vmuh_d, LSX, gvec_vvv, MO_64, do_vmuh_s)
+
+static void gen_vmuh_wu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 discard = tcg_temp_new_i32();
+ tcg_gen_mulu2_i32(discard, t, a, b);
+}
+
+static void gen_vmuh_du(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 discard = tcg_temp_new_i64();
+ tcg_gen_mulu2_i64(discard, t, a, b);
+}
+
+static void do_vmuh_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const GVecGen3 op[4] = {
+ {
+ .fno = gen_helper_vmuh_bu,
+ .vece = MO_8
+ },
+ {
+ .fno = gen_helper_vmuh_hu,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmuh_wu,
+ .fno = gen_helper_vmuh_wu,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmuh_du,
+ .fno = gen_helper_vmuh_du,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmuh_bu, LSX, gvec_vvv, MO_8, do_vmuh_u)
+TRANS(vmuh_hu, LSX, gvec_vvv, MO_16, do_vmuh_u)
+TRANS(vmuh_wu, LSX, gvec_vvv, MO_32, do_vmuh_u)
+TRANS(vmuh_du, LSX, gvec_vvv, MO_64, do_vmuh_u)
+
+static void gen_vmulwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16s_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32s_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwev_s,
+ .fno = gen_helper_vmulwev_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwev_w_h,
+ .fniv = gen_vmulwev_s,
+ .fno = gen_helper_vmulwev_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwev_d_w,
+ .fniv = gen_vmulwev_s,
+ .fno = gen_helper_vmulwev_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwev_h_b, LSX, gvec_vvv, MO_8, do_vmulwev_s)
+TRANS(vmulwev_w_h, LSX, gvec_vvv, MO_16, do_vmulwev_s)
+TRANS(vmulwev_d_w, LSX, gvec_vvv, MO_32, do_vmulwev_s)
+
+static void tcg_gen_mulus2_i64(TCGv_i64 rl, TCGv_i64 rh,
+ TCGv_i64 arg1, TCGv_i64 arg2)
+{
+ tcg_gen_mulsu2_i64(rl, rh, arg2, arg1);
+}
+
+#define VMUL_Q(NAME, FN, idx1, idx2) \
+static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
+{ \
+ TCGv_i64 rh, rl, arg1, arg2; \
+ \
+ if (!avail_LSX(ctx)) { \
+ return false; \
+ } \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ arg1 = tcg_temp_new_i64(); \
+ arg2 = tcg_temp_new_i64(); \
+ \
+ get_vreg64(arg1, a->vj, idx1); \
+ get_vreg64(arg2, a->vk, idx2); \
+ \
+ tcg_gen_## FN ##_i64(rl, rh, arg1, arg2); \
+ \
+ set_vreg64(rh, a->vd, 1); \
+ set_vreg64(rl, a->vd, 0); \
+ \
+ return true; \
+}
+
+VMUL_Q(vmulwev_q_d, muls2, 0, 0)
+VMUL_Q(vmulwod_q_d, muls2, 1, 1)
+VMUL_Q(vmulwev_q_du, mulu2, 0, 0)
+VMUL_Q(vmulwod_q_du, mulu2, 1, 1)
+VMUL_Q(vmulwev_q_du_d, mulus2, 0, 0)
+VMUL_Q(vmulwod_q_du_d, mulus2, 1, 1)
+
+static void gen_vmulwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_sari_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwod_s,
+ .fno = gen_helper_vmulwod_h_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwod_w_h,
+ .fniv = gen_vmulwod_s,
+ .fno = gen_helper_vmulwod_w_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwod_d_w,
+ .fniv = gen_vmulwod_s,
+ .fno = gen_helper_vmulwod_d_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwod_h_b, LSX, gvec_vvv, MO_8, do_vmulwod_s)
+TRANS(vmulwod_w_h, LSX, gvec_vvv, MO_16, do_vmulwod_s)
+TRANS(vmulwod_d_w, LSX, gvec_vvv, MO_32, do_vmulwod_s)
+
+static void gen_vmulwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_and_vec(vece, t2, b, mask);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16u_i32(t2, b);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32u_i64(t2, b);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwev_u,
+ .fno = gen_helper_vmulwev_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwev_w_hu,
+ .fniv = gen_vmulwev_u,
+ .fno = gen_helper_vmulwev_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwev_d_wu,
+ .fniv = gen_vmulwev_u,
+ .fno = gen_helper_vmulwev_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwev_h_bu, LSX, gvec_vvv, MO_8, do_vmulwev_u)
+TRANS(vmulwev_w_hu, LSX, gvec_vvv, MO_16, do_vmulwev_u)
+TRANS(vmulwev_d_wu, LSX, gvec_vvv, MO_32, do_vmulwev_u)
+
+static void gen_vmulwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_shri_i32(t2, b, 16);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_shri_i64(t2, b, 32);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwod_u,
+ .fno = gen_helper_vmulwod_h_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwod_w_hu,
+ .fniv = gen_vmulwod_u,
+ .fno = gen_helper_vmulwod_w_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwod_d_wu,
+ .fniv = gen_vmulwod_u,
+ .fno = gen_helper_vmulwod_d_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwod_h_bu, LSX, gvec_vvv, MO_8, do_vmulwod_u)
+TRANS(vmulwod_w_hu, LSX, gvec_vvv, MO_16, do_vmulwod_u)
+TRANS(vmulwod_d_wu, LSX, gvec_vvv, MO_32, do_vmulwod_u)
+
+static void gen_vmulwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_ext16u_i32(t1, a);
+ tcg_gen_ext16s_i32(t2, b);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+
+static void gen_vmulwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_ext32u_i64(t1, a);
+ tcg_gen_ext32s_i64(t2, b);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwev_u_s,
+ .fno = gen_helper_vmulwev_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwev_w_hu_h,
+ .fniv = gen_vmulwev_u_s,
+ .fno = gen_helper_vmulwev_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwev_d_wu_w,
+ .fniv = gen_vmulwev_u_s,
+ .fno = gen_helper_vmulwev_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwev_u_s)
+TRANS(vmulwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwev_u_s)
+TRANS(vmulwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwev_u_s)
+
+static void gen_vmulwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t, t1, t2);
+}
+
+static void gen_vmulwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1, t2;
+
+ t1 = tcg_temp_new_i32();
+ t2 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t1, a, 16);
+ tcg_gen_sari_i32(t2, b, 16);
+ tcg_gen_mul_i32(t, t1, t2);
+}
+static void gen_vmulwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1, t2;
+
+ t1 = tcg_temp_new_i64();
+ t2 = tcg_temp_new_i64();
+ tcg_gen_shri_i64(t1, a, 32);
+ tcg_gen_sari_i64(t2, b, 32);
+ tcg_gen_mul_i64(t, t1, t2);
+}
+
+static void do_vmulwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_mul_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmulwod_u_s,
+ .fno = gen_helper_vmulwod_h_bu_b,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmulwod_w_hu_h,
+ .fniv = gen_vmulwod_u_s,
+ .fno = gen_helper_vmulwod_w_hu_h,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmulwod_d_wu_w,
+ .fniv = gen_vmulwod_u_s,
+ .fno = gen_helper_vmulwod_d_wu_w,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmulwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vmulwod_u_s)
+TRANS(vmulwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vmulwod_u_s)
+TRANS(vmulwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vmulwod_u_s)
+
+static void gen_vmadd(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ tcg_gen_mul_vec(vece, t1, a, b);
+ tcg_gen_add_vec(vece, t, t, t1);
+}
+
+static void gen_vmadd_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ tcg_gen_mul_i32(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmadd_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ tcg_gen_mul_i64(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmadd(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmadd_w,
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmadd_d,
+ .fniv = gen_vmadd,
+ .fno = gen_helper_vmadd_d,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmadd_b, LSX, gvec_vvv, MO_8, do_vmadd)
+TRANS(vmadd_h, LSX, gvec_vvv, MO_16, do_vmadd)
+TRANS(vmadd_w, LSX, gvec_vvv, MO_32, do_vmadd)
+TRANS(vmadd_d, LSX, gvec_vvv, MO_64, do_vmadd)
+
+static void gen_vmsub(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ tcg_gen_mul_vec(vece, t1, a, b);
+ tcg_gen_sub_vec(vece, t, t, t1);
+}
+
+static void gen_vmsub_w(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ tcg_gen_mul_i32(t1, a, b);
+ tcg_gen_sub_i32(t, t, t1);
+}
+
+static void gen_vmsub_d(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ tcg_gen_mul_i64(t1, a, b);
+ tcg_gen_sub_i64(t, t, t1);
+}
+
+static void do_vmsub(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_sub_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmsub_w,
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmsub_d,
+ .fniv = gen_vmsub,
+ .fno = gen_helper_vmsub_d,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmsub_b, LSX, gvec_vvv, MO_8, do_vmsub)
+TRANS(vmsub_h, LSX, gvec_vvv, MO_16, do_vmsub)
+TRANS(vmsub_w, LSX, gvec_vvv, MO_32, do_vmsub)
+TRANS(vmsub_d, LSX, gvec_vvv, MO_64, do_vmsub)
+
+static void gen_vmaddwev_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_shli_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t1, t1, halfbits);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwev_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwev_w_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwev_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwev_d_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwev_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwev_s,
+ .fno = gen_helper_vmaddwev_h_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwev_w_h,
+ .fniv = gen_vmaddwev_s,
+ .fno = gen_helper_vmaddwev_w_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwev_d_w,
+ .fniv = gen_vmaddwev_s,
+ .fno = gen_helper_vmaddwev_d_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwev_h_b, LSX, gvec_vvv, MO_8, do_vmaddwev_s)
+TRANS(vmaddwev_w_h, LSX, gvec_vvv, MO_16, do_vmaddwev_s)
+TRANS(vmaddwev_d_w, LSX, gvec_vvv, MO_32, do_vmaddwev_s)
+
+#define VMADD_Q(NAME, FN, idx1, idx2) \
+static bool trans_## NAME (DisasContext *ctx, arg_vvv *a) \
+{ \
+ TCGv_i64 rh, rl, arg1, arg2, th, tl; \
+ \
+ if (!avail_LSX(ctx)) { \
+ return false; \
+ } \
+ \
+ rh = tcg_temp_new_i64(); \
+ rl = tcg_temp_new_i64(); \
+ arg1 = tcg_temp_new_i64(); \
+ arg2 = tcg_temp_new_i64(); \
+ th = tcg_temp_new_i64(); \
+ tl = tcg_temp_new_i64(); \
+ \
+ get_vreg64(arg1, a->vj, idx1); \
+ get_vreg64(arg2, a->vk, idx2); \
+ get_vreg64(rh, a->vd, 1); \
+ get_vreg64(rl, a->vd, 0); \
+ \
+ tcg_gen_## FN ##_i64(tl, th, arg1, arg2); \
+ tcg_gen_add2_i64(rl, rh, rl, rh, tl, th); \
+ \
+ set_vreg64(rh, a->vd, 1); \
+ set_vreg64(rl, a->vd, 0); \
+ \
+ return true; \
+}
+
+VMADD_Q(vmaddwev_q_d, muls2, 0, 0)
+VMADD_Q(vmaddwod_q_d, muls2, 1, 1)
+VMADD_Q(vmaddwev_q_du, mulu2, 0, 0)
+VMADD_Q(vmaddwod_q_du, mulu2, 1, 1)
+VMADD_Q(vmaddwev_q_du_d, mulus2, 0, 0)
+VMADD_Q(vmaddwod_q_du_d, mulus2, 1, 1)
+
+static void gen_vmaddwod_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_sari_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwod_w_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwod_w_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwod_d_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwod_d_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwod_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_sari_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwod_s,
+ .fno = gen_helper_vmaddwod_h_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwod_w_h,
+ .fniv = gen_vmaddwod_s,
+ .fno = gen_helper_vmaddwod_w_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwod_d_w,
+ .fniv = gen_vmaddwod_s,
+ .fno = gen_helper_vmaddwod_d_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwod_h_b, LSX, gvec_vvv, MO_8, do_vmaddwod_s)
+TRANS(vmaddwod_w_h, LSX, gvec_vvv, MO_16, do_vmaddwod_s)
+TRANS(vmaddwod_d_w, LSX, gvec_vvv, MO_32, do_vmaddwod_s)
+
+static void gen_vmaddwev_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_and_vec(vece, t2, b, mask);
+ tcg_gen_mul_vec(vece, t1, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t1);
+}
+
+static void gen_vmaddwev_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwev_w_hu(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwev_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwev_d_wu(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwev_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwev_u,
+ .fno = gen_helper_vmaddwev_h_bu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwev_w_hu,
+ .fniv = gen_vmaddwev_u,
+ .fno = gen_helper_vmaddwev_w_hu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwev_d_wu,
+ .fniv = gen_vmaddwev_u,
+ .fno = gen_helper_vmaddwev_d_wu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwev_h_bu, LSX, gvec_vvv, MO_8, do_vmaddwev_u)
+TRANS(vmaddwev_w_hu, LSX, gvec_vvv, MO_16, do_vmaddwev_u)
+TRANS(vmaddwev_d_wu, LSX, gvec_vvv, MO_32, do_vmaddwev_u)
+
+static void gen_vmaddwod_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_shri_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwod_w_hu(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwod_w_hu(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwod_d_wu(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwod_d_wu(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwod_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwod_u,
+ .fno = gen_helper_vmaddwod_h_bu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwod_w_hu,
+ .fniv = gen_vmaddwod_u,
+ .fno = gen_helper_vmaddwod_w_hu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwod_d_wu,
+ .fniv = gen_vmaddwod_u,
+ .fno = gen_helper_vmaddwod_d_wu,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwod_h_bu, LSX, gvec_vvv, MO_8, do_vmaddwod_u)
+TRANS(vmaddwod_w_hu, LSX, gvec_vvv, MO_16, do_vmaddwod_u)
+TRANS(vmaddwod_d_wu, LSX, gvec_vvv, MO_32, do_vmaddwod_u)
+
+static void gen_vmaddwev_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, mask;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ mask = tcg_constant_vec_matching(t, vece, MAKE_64BIT_MASK(0, 4 << vece));
+ tcg_gen_and_vec(vece, t1, a, mask);
+ tcg_gen_shli_vec(vece, t2, b, halfbits);
+ tcg_gen_sari_vec(vece, t2, t2, halfbits);
+ tcg_gen_mul_vec(vece, t1, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t1);
+}
+
+static void gen_vmaddwev_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwev_w_hu_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwev_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwev_d_wu_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwev_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_sari_vec,
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwev_u_s,
+ .fno = gen_helper_vmaddwev_h_bu_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwev_w_hu_h,
+ .fniv = gen_vmaddwev_u_s,
+ .fno = gen_helper_vmaddwev_w_hu_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwev_d_wu_w,
+ .fniv = gen_vmaddwev_u_s,
+ .fno = gen_helper_vmaddwev_d_wu_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwev_h_bu_b, LSX, gvec_vvv, MO_8, do_vmaddwev_u_s)
+TRANS(vmaddwev_w_hu_h, LSX, gvec_vvv, MO_16, do_vmaddwev_u_s)
+TRANS(vmaddwev_d_wu_w, LSX, gvec_vvv, MO_32, do_vmaddwev_u_s)
+
+static void gen_vmaddwod_u_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, t2, t3;
+ int halfbits = 4 << vece;
+
+ t1 = tcg_temp_new_vec_matching(a);
+ t2 = tcg_temp_new_vec_matching(b);
+ t3 = tcg_temp_new_vec_matching(t);
+ tcg_gen_shri_vec(vece, t1, a, halfbits);
+ tcg_gen_sari_vec(vece, t2, b, halfbits);
+ tcg_gen_mul_vec(vece, t3, t1, t2);
+ tcg_gen_add_vec(vece, t, t, t3);
+}
+
+static void gen_vmaddwod_w_hu_h(TCGv_i32 t, TCGv_i32 a, TCGv_i32 b)
+{
+ TCGv_i32 t1;
+
+ t1 = tcg_temp_new_i32();
+ gen_vmulwod_w_hu_h(t1, a, b);
+ tcg_gen_add_i32(t, t, t1);
+}
+
+static void gen_vmaddwod_d_wu_w(TCGv_i64 t, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1;
+
+ t1 = tcg_temp_new_i64();
+ gen_vmulwod_d_wu_w(t1, a, b);
+ tcg_gen_add_i64(t, t, t1);
+}
+
+static void do_vmaddwod_u_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shri_vec, INDEX_op_sari_vec,
+ INDEX_op_mul_vec, INDEX_op_add_vec, 0
+ };
+ static const GVecGen3 op[3] = {
+ {
+ .fniv = gen_vmaddwod_u_s,
+ .fno = gen_helper_vmaddwod_h_bu_b,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fni4 = gen_vmaddwod_w_hu_h,
+ .fniv = gen_vmaddwod_u_s,
+ .fno = gen_helper_vmaddwod_w_hu_h,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fni8 = gen_vmaddwod_d_wu_w,
+ .fniv = gen_vmaddwod_u_s,
+ .fno = gen_helper_vmaddwod_d_wu_w,
+ .load_dest = true,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vmaddwod_h_bu_b, LSX, gvec_vvv, MO_8, do_vmaddwod_u_s)
+TRANS(vmaddwod_w_hu_h, LSX, gvec_vvv, MO_16, do_vmaddwod_u_s)
+TRANS(vmaddwod_d_wu_w, LSX, gvec_vvv, MO_32, do_vmaddwod_u_s)
+
+TRANS(vdiv_b, LSX, gen_vvv, gen_helper_vdiv_b)
+TRANS(vdiv_h, LSX, gen_vvv, gen_helper_vdiv_h)
+TRANS(vdiv_w, LSX, gen_vvv, gen_helper_vdiv_w)
+TRANS(vdiv_d, LSX, gen_vvv, gen_helper_vdiv_d)
+TRANS(vdiv_bu, LSX, gen_vvv, gen_helper_vdiv_bu)
+TRANS(vdiv_hu, LSX, gen_vvv, gen_helper_vdiv_hu)
+TRANS(vdiv_wu, LSX, gen_vvv, gen_helper_vdiv_wu)
+TRANS(vdiv_du, LSX, gen_vvv, gen_helper_vdiv_du)
+TRANS(vmod_b, LSX, gen_vvv, gen_helper_vmod_b)
+TRANS(vmod_h, LSX, gen_vvv, gen_helper_vmod_h)
+TRANS(vmod_w, LSX, gen_vvv, gen_helper_vmod_w)
+TRANS(vmod_d, LSX, gen_vvv, gen_helper_vmod_d)
+TRANS(vmod_bu, LSX, gen_vvv, gen_helper_vmod_bu)
+TRANS(vmod_hu, LSX, gen_vvv, gen_helper_vmod_hu)
+TRANS(vmod_wu, LSX, gen_vvv, gen_helper_vmod_wu)
+TRANS(vmod_du, LSX, gen_vvv, gen_helper_vmod_du)
+
+static void gen_vsat_s(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
+{
+ TCGv_vec min;
+
+ min = tcg_temp_new_vec_matching(t);
+ tcg_gen_not_vec(vece, min, max);
+ tcg_gen_smax_vec(vece, t, a, min);
+ tcg_gen_smin_vec(vece, t, t, max);
+}
+
+static void do_vsat_s(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_smax_vec, INDEX_op_smin_vec, 0
+ };
+ static const GVecGen2s op[4] = {
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vsat_s,
+ .fno = gen_helper_vsat_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz,
+ tcg_constant_i64((1ll<< imm) -1), &op[vece]);
+}
+
+TRANS(vsat_b, LSX, gvec_vv_i, MO_8, do_vsat_s)
+TRANS(vsat_h, LSX, gvec_vv_i, MO_16, do_vsat_s)
+TRANS(vsat_w, LSX, gvec_vv_i, MO_32, do_vsat_s)
+TRANS(vsat_d, LSX, gvec_vv_i, MO_64, do_vsat_s)
+
+static void gen_vsat_u(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec max)
+{
+ tcg_gen_umin_vec(vece, t, a, max);
+}
+
+static void do_vsat_u(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ uint64_t max;
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_umin_vec, 0
+ };
+ static const GVecGen2s op[4] = {
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_bu,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_hu,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_wu,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vsat_u,
+ .fno = gen_helper_vsat_du,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ max = (imm == 0x3f) ? UINT64_MAX : (1ull << (imm + 1)) - 1;
+ tcg_gen_gvec_2s(vd_ofs, vj_ofs, oprsz, maxsz,
+ tcg_constant_i64(max), &op[vece]);
+}
+
+TRANS(vsat_bu, LSX, gvec_vv_i, MO_8, do_vsat_u)
+TRANS(vsat_hu, LSX, gvec_vv_i, MO_16, do_vsat_u)
+TRANS(vsat_wu, LSX, gvec_vv_i, MO_32, do_vsat_u)
+TRANS(vsat_du, LSX, gvec_vv_i, MO_64, do_vsat_u)
+
+TRANS(vexth_h_b, LSX, gen_vv, gen_helper_vexth_h_b)
+TRANS(vexth_w_h, LSX, gen_vv, gen_helper_vexth_w_h)
+TRANS(vexth_d_w, LSX, gen_vv, gen_helper_vexth_d_w)
+TRANS(vexth_q_d, LSX, gen_vv, gen_helper_vexth_q_d)
+TRANS(vexth_hu_bu, LSX, gen_vv, gen_helper_vexth_hu_bu)
+TRANS(vexth_wu_hu, LSX, gen_vv, gen_helper_vexth_wu_hu)
+TRANS(vexth_du_wu, LSX, gen_vv, gen_helper_vexth_du_wu)
+TRANS(vexth_qu_du, LSX, gen_vv, gen_helper_vexth_qu_du)
+
+static void gen_vsigncov(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ TCGv_vec t1, zero;
+
+ t1 = tcg_temp_new_vec_matching(t);
+ zero = tcg_constant_vec_matching(t, vece, 0);
+
+ tcg_gen_neg_vec(vece, t1, b);
+ tcg_gen_cmpsel_vec(TCG_COND_LT, vece, t, a, zero, t1, b);
+ tcg_gen_cmpsel_vec(TCG_COND_EQ, vece, t, a, zero, zero, t);
+}
+
+static void do_vsigncov(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_neg_vec, INDEX_op_cmpsel_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vsigncov,
+ .fno = gen_helper_vsigncov_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vsigncov_b, LSX, gvec_vvv, MO_8, do_vsigncov)
+TRANS(vsigncov_h, LSX, gvec_vvv, MO_16, do_vsigncov)
+TRANS(vsigncov_w, LSX, gvec_vvv, MO_32, do_vsigncov)
+TRANS(vsigncov_d, LSX, gvec_vvv, MO_64, do_vsigncov)
+
+TRANS(vmskltz_b, LSX, gen_vv, gen_helper_vmskltz_b)
+TRANS(vmskltz_h, LSX, gen_vv, gen_helper_vmskltz_h)
+TRANS(vmskltz_w, LSX, gen_vv, gen_helper_vmskltz_w)
+TRANS(vmskltz_d, LSX, gen_vv, gen_helper_vmskltz_d)
+TRANS(vmskgez_b, LSX, gen_vv, gen_helper_vmskgez_b)
+TRANS(vmsknz_b, LSX, gen_vv, gen_helper_vmsknz_b)
+
+#define EXPAND_BYTE(bit) ((uint64_t)(bit ? 0xff : 0))
+
+static uint64_t vldi_get_value(DisasContext *ctx, uint32_t imm)
+{
+ int mode;
+ uint64_t data, t;
+
+ /*
+ * imm bit [11:8] is mode, mode value is 0-12.
+ * other values are invalid.
+ */
+ mode = (imm >> 8) & 0xf;
+ t = imm & 0xff;
+ switch (mode) {
+ case 0:
+ /* data: {2{24'0, imm[7:0]}} */
+ data = (t << 32) | t ;
+ break;
+ case 1:
+ /* data: {2{16'0, imm[7:0], 8'0}} */
+ data = (t << 24) | (t << 8);
+ break;
+ case 2:
+ /* data: {2{8'0, imm[7:0], 16'0}} */
+ data = (t << 48) | (t << 16);
+ break;
+ case 3:
+ /* data: {2{imm[7:0], 24'0}} */
+ data = (t << 56) | (t << 24);
+ break;
+ case 4:
+ /* data: {4{8'0, imm[7:0]}} */
+ data = (t << 48) | (t << 32) | (t << 16) | t;
+ break;
+ case 5:
+ /* data: {4{imm[7:0], 8'0}} */
+ data = (t << 56) |(t << 40) | (t << 24) | (t << 8);
+ break;
+ case 6:
+ /* data: {2{16'0, imm[7:0], 8'1}} */
+ data = (t << 40) | ((uint64_t)0xff << 32) | (t << 8) | 0xff;
+ break;
+ case 7:
+ /* data: {2{8'0, imm[7:0], 16'1}} */
+ data = (t << 48) | ((uint64_t)0xffff << 32) | (t << 16) | 0xffff;
+ break;
+ case 8:
+ /* data: {8{imm[7:0]}} */
+ data =(t << 56) | (t << 48) | (t << 40) | (t << 32) |
+ (t << 24) | (t << 16) | (t << 8) | t;
+ break;
+ case 9:
+ /* data: {{8{imm[7]}, ..., 8{imm[0]}}} */
+ {
+ uint64_t b0,b1,b2,b3,b4,b5,b6,b7;
+ b0 = t& 0x1;
+ b1 = (t & 0x2) >> 1;
+ b2 = (t & 0x4) >> 2;
+ b3 = (t & 0x8) >> 3;
+ b4 = (t & 0x10) >> 4;
+ b5 = (t & 0x20) >> 5;
+ b6 = (t & 0x40) >> 6;
+ b7 = (t & 0x80) >> 7;
+ data = (EXPAND_BYTE(b7) << 56) |
+ (EXPAND_BYTE(b6) << 48) |
+ (EXPAND_BYTE(b5) << 40) |
+ (EXPAND_BYTE(b4) << 32) |
+ (EXPAND_BYTE(b3) << 24) |
+ (EXPAND_BYTE(b2) << 16) |
+ (EXPAND_BYTE(b1) << 8) |
+ EXPAND_BYTE(b0);
+ }
+ break;
+ case 10:
+ /* data: {2{imm[7], ~imm[6], {5{imm[6]}}, imm[5:0], 19'0}} */
+ {
+ uint64_t b6, b7;
+ uint64_t t0, t1;
+ b6 = (imm & 0x40) >> 6;
+ b7 = (imm & 0x80) >> 7;
+ t0 = (imm & 0x3f);
+ t1 = (b7 << 6) | ((1-b6) << 5) | (uint64_t)(b6 ? 0x1f : 0);
+ data = (t1 << 57) | (t0 << 51) | (t1 << 25) | (t0 << 19);
+ }
+ break;
+ case 11:
+ /* data: {32'0, imm[7], ~{imm[6]}, 5{imm[6]}, imm[5:0], 19'0} */
+ {
+ uint64_t b6,b7;
+ uint64_t t0, t1;
+ b6 = (imm & 0x40) >> 6;
+ b7 = (imm & 0x80) >> 7;
+ t0 = (imm & 0x3f);
+ t1 = (b7 << 6) | ((1-b6) << 5) | (b6 ? 0x1f : 0);
+ data = (t1 << 25) | (t0 << 19);
+ }
+ break;
+ case 12:
+ /* data: {imm[7], ~imm[6], 8{imm[6]}, imm[5:0], 48'0} */
+ {
+ uint64_t b6,b7;
+ uint64_t t0, t1;
+ b6 = (imm & 0x40) >> 6;
+ b7 = (imm & 0x80) >> 7;
+ t0 = (imm & 0x3f);
+ t1 = (b7 << 9) | ((1-b6) << 8) | (b6 ? 0xff : 0);
+ data = (t1 << 54) | (t0 << 48);
+ }
+ break;
+ default:
+ generate_exception(ctx, EXCCODE_INE);
+ g_assert_not_reached();
+ }
+ return data;
+}
+
+static bool trans_vldi(DisasContext *ctx, arg_vldi *a)
+{
+ int sel, vece;
+ uint64_t value;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ sel = (a->imm >> 12) & 0x1;
+
+ if (sel) {
+ value = vldi_get_value(ctx, a->imm);
+ vece = MO_64;
+ } else {
+ value = ((int32_t)(a->imm << 22)) >> 22;
+ vece = (a->imm >> 10) & 0x3;
+ }
+
+ tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8,
+ tcg_constant_i64(value));
+ return true;
+}
+
+TRANS(vand_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_and)
+TRANS(vor_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_or)
+TRANS(vxor_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_xor)
+TRANS(vnor_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_nor)
+
+static bool trans_vandn_v(DisasContext *ctx, arg_vvv *a)
+{
+ uint32_t vd_ofs, vj_ofs, vk_ofs;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+ vk_ofs = vec_full_offset(a->vk);
+
+ tcg_gen_gvec_andc(MO_64, vd_ofs, vk_ofs, vj_ofs, 16, ctx->vl/8);
+ return true;
+}
+TRANS(vorn_v, LSX, gvec_vvv, MO_64, tcg_gen_gvec_orc)
+TRANS(vandi_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_andi)
+TRANS(vori_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_ori)
+TRANS(vxori_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_xori)
+
+static void gen_vnori(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ TCGv_vec t1;
+
+ t1 = tcg_constant_vec_matching(t, vece, imm);
+ tcg_gen_nor_vec(vece, t, a, t1);
+}
+
+static void gen_vnori_b(TCGv_i64 t, TCGv_i64 a, int64_t imm)
+{
+ tcg_gen_movi_i64(t, dup_const(MO_8, imm));
+ tcg_gen_nor_i64(t, a, t);
+}
+
+static void do_vnori_b(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_nor_vec, 0
+ };
+ static const GVecGen2i op = {
+ .fni8 = gen_vnori_b,
+ .fniv = gen_vnori,
+ .fnoi = gen_helper_vnori_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op);
+}
+
+TRANS(vnori_b, LSX, gvec_vv_i, MO_8, do_vnori_b)
+
+TRANS(vsll_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_shlv)
+TRANS(vsll_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_shlv)
+TRANS(vsll_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_shlv)
+TRANS(vsll_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_shlv)
+TRANS(vslli_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_shli)
+TRANS(vslli_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_shli)
+TRANS(vslli_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_shli)
+TRANS(vslli_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_shli)
+
+TRANS(vsrl_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_shrv)
+TRANS(vsrl_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_shrv)
+TRANS(vsrl_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_shrv)
+TRANS(vsrl_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_shrv)
+TRANS(vsrli_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_shri)
+TRANS(vsrli_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_shri)
+TRANS(vsrli_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_shri)
+TRANS(vsrli_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_shri)
+
+TRANS(vsra_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_sarv)
+TRANS(vsra_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_sarv)
+TRANS(vsra_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_sarv)
+TRANS(vsra_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_sarv)
+TRANS(vsrai_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_sari)
+TRANS(vsrai_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_sari)
+TRANS(vsrai_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_sari)
+TRANS(vsrai_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_sari)
+
+TRANS(vrotr_b, LSX, gvec_vvv, MO_8, tcg_gen_gvec_rotrv)
+TRANS(vrotr_h, LSX, gvec_vvv, MO_16, tcg_gen_gvec_rotrv)
+TRANS(vrotr_w, LSX, gvec_vvv, MO_32, tcg_gen_gvec_rotrv)
+TRANS(vrotr_d, LSX, gvec_vvv, MO_64, tcg_gen_gvec_rotrv)
+TRANS(vrotri_b, LSX, gvec_vv_i, MO_8, tcg_gen_gvec_rotri)
+TRANS(vrotri_h, LSX, gvec_vv_i, MO_16, tcg_gen_gvec_rotri)
+TRANS(vrotri_w, LSX, gvec_vv_i, MO_32, tcg_gen_gvec_rotri)
+TRANS(vrotri_d, LSX, gvec_vv_i, MO_64, tcg_gen_gvec_rotri)
+
+TRANS(vsllwil_h_b, LSX, gen_vv_i, gen_helper_vsllwil_h_b)
+TRANS(vsllwil_w_h, LSX, gen_vv_i, gen_helper_vsllwil_w_h)
+TRANS(vsllwil_d_w, LSX, gen_vv_i, gen_helper_vsllwil_d_w)
+TRANS(vextl_q_d, LSX, gen_vv, gen_helper_vextl_q_d)
+TRANS(vsllwil_hu_bu, LSX, gen_vv_i, gen_helper_vsllwil_hu_bu)
+TRANS(vsllwil_wu_hu, LSX, gen_vv_i, gen_helper_vsllwil_wu_hu)
+TRANS(vsllwil_du_wu, LSX, gen_vv_i, gen_helper_vsllwil_du_wu)
+TRANS(vextl_qu_du, LSX, gen_vv, gen_helper_vextl_qu_du)
+
+TRANS(vsrlr_b, LSX, gen_vvv, gen_helper_vsrlr_b)
+TRANS(vsrlr_h, LSX, gen_vvv, gen_helper_vsrlr_h)
+TRANS(vsrlr_w, LSX, gen_vvv, gen_helper_vsrlr_w)
+TRANS(vsrlr_d, LSX, gen_vvv, gen_helper_vsrlr_d)
+TRANS(vsrlri_b, LSX, gen_vv_i, gen_helper_vsrlri_b)
+TRANS(vsrlri_h, LSX, gen_vv_i, gen_helper_vsrlri_h)
+TRANS(vsrlri_w, LSX, gen_vv_i, gen_helper_vsrlri_w)
+TRANS(vsrlri_d, LSX, gen_vv_i, gen_helper_vsrlri_d)
+
+TRANS(vsrar_b, LSX, gen_vvv, gen_helper_vsrar_b)
+TRANS(vsrar_h, LSX, gen_vvv, gen_helper_vsrar_h)
+TRANS(vsrar_w, LSX, gen_vvv, gen_helper_vsrar_w)
+TRANS(vsrar_d, LSX, gen_vvv, gen_helper_vsrar_d)
+TRANS(vsrari_b, LSX, gen_vv_i, gen_helper_vsrari_b)
+TRANS(vsrari_h, LSX, gen_vv_i, gen_helper_vsrari_h)
+TRANS(vsrari_w, LSX, gen_vv_i, gen_helper_vsrari_w)
+TRANS(vsrari_d, LSX, gen_vv_i, gen_helper_vsrari_d)
+
+TRANS(vsrln_b_h, LSX, gen_vvv, gen_helper_vsrln_b_h)
+TRANS(vsrln_h_w, LSX, gen_vvv, gen_helper_vsrln_h_w)
+TRANS(vsrln_w_d, LSX, gen_vvv, gen_helper_vsrln_w_d)
+TRANS(vsran_b_h, LSX, gen_vvv, gen_helper_vsran_b_h)
+TRANS(vsran_h_w, LSX, gen_vvv, gen_helper_vsran_h_w)
+TRANS(vsran_w_d, LSX, gen_vvv, gen_helper_vsran_w_d)
+
+TRANS(vsrlni_b_h, LSX, gen_vv_i, gen_helper_vsrlni_b_h)
+TRANS(vsrlni_h_w, LSX, gen_vv_i, gen_helper_vsrlni_h_w)
+TRANS(vsrlni_w_d, LSX, gen_vv_i, gen_helper_vsrlni_w_d)
+TRANS(vsrlni_d_q, LSX, gen_vv_i, gen_helper_vsrlni_d_q)
+TRANS(vsrani_b_h, LSX, gen_vv_i, gen_helper_vsrani_b_h)
+TRANS(vsrani_h_w, LSX, gen_vv_i, gen_helper_vsrani_h_w)
+TRANS(vsrani_w_d, LSX, gen_vv_i, gen_helper_vsrani_w_d)
+TRANS(vsrani_d_q, LSX, gen_vv_i, gen_helper_vsrani_d_q)
+
+TRANS(vsrlrn_b_h, LSX, gen_vvv, gen_helper_vsrlrn_b_h)
+TRANS(vsrlrn_h_w, LSX, gen_vvv, gen_helper_vsrlrn_h_w)
+TRANS(vsrlrn_w_d, LSX, gen_vvv, gen_helper_vsrlrn_w_d)
+TRANS(vsrarn_b_h, LSX, gen_vvv, gen_helper_vsrarn_b_h)
+TRANS(vsrarn_h_w, LSX, gen_vvv, gen_helper_vsrarn_h_w)
+TRANS(vsrarn_w_d, LSX, gen_vvv, gen_helper_vsrarn_w_d)
+
+TRANS(vsrlrni_b_h, LSX, gen_vv_i, gen_helper_vsrlrni_b_h)
+TRANS(vsrlrni_h_w, LSX, gen_vv_i, gen_helper_vsrlrni_h_w)
+TRANS(vsrlrni_w_d, LSX, gen_vv_i, gen_helper_vsrlrni_w_d)
+TRANS(vsrlrni_d_q, LSX, gen_vv_i, gen_helper_vsrlrni_d_q)
+TRANS(vsrarni_b_h, LSX, gen_vv_i, gen_helper_vsrarni_b_h)
+TRANS(vsrarni_h_w, LSX, gen_vv_i, gen_helper_vsrarni_h_w)
+TRANS(vsrarni_w_d, LSX, gen_vv_i, gen_helper_vsrarni_w_d)
+TRANS(vsrarni_d_q, LSX, gen_vv_i, gen_helper_vsrarni_d_q)
+
+TRANS(vssrln_b_h, LSX, gen_vvv, gen_helper_vssrln_b_h)
+TRANS(vssrln_h_w, LSX, gen_vvv, gen_helper_vssrln_h_w)
+TRANS(vssrln_w_d, LSX, gen_vvv, gen_helper_vssrln_w_d)
+TRANS(vssran_b_h, LSX, gen_vvv, gen_helper_vssran_b_h)
+TRANS(vssran_h_w, LSX, gen_vvv, gen_helper_vssran_h_w)
+TRANS(vssran_w_d, LSX, gen_vvv, gen_helper_vssran_w_d)
+TRANS(vssrln_bu_h, LSX, gen_vvv, gen_helper_vssrln_bu_h)
+TRANS(vssrln_hu_w, LSX, gen_vvv, gen_helper_vssrln_hu_w)
+TRANS(vssrln_wu_d, LSX, gen_vvv, gen_helper_vssrln_wu_d)
+TRANS(vssran_bu_h, LSX, gen_vvv, gen_helper_vssran_bu_h)
+TRANS(vssran_hu_w, LSX, gen_vvv, gen_helper_vssran_hu_w)
+TRANS(vssran_wu_d, LSX, gen_vvv, gen_helper_vssran_wu_d)
+
+TRANS(vssrlni_b_h, LSX, gen_vv_i, gen_helper_vssrlni_b_h)
+TRANS(vssrlni_h_w, LSX, gen_vv_i, gen_helper_vssrlni_h_w)
+TRANS(vssrlni_w_d, LSX, gen_vv_i, gen_helper_vssrlni_w_d)
+TRANS(vssrlni_d_q, LSX, gen_vv_i, gen_helper_vssrlni_d_q)
+TRANS(vssrani_b_h, LSX, gen_vv_i, gen_helper_vssrani_b_h)
+TRANS(vssrani_h_w, LSX, gen_vv_i, gen_helper_vssrani_h_w)
+TRANS(vssrani_w_d, LSX, gen_vv_i, gen_helper_vssrani_w_d)
+TRANS(vssrani_d_q, LSX, gen_vv_i, gen_helper_vssrani_d_q)
+TRANS(vssrlni_bu_h, LSX, gen_vv_i, gen_helper_vssrlni_bu_h)
+TRANS(vssrlni_hu_w, LSX, gen_vv_i, gen_helper_vssrlni_hu_w)
+TRANS(vssrlni_wu_d, LSX, gen_vv_i, gen_helper_vssrlni_wu_d)
+TRANS(vssrlni_du_q, LSX, gen_vv_i, gen_helper_vssrlni_du_q)
+TRANS(vssrani_bu_h, LSX, gen_vv_i, gen_helper_vssrani_bu_h)
+TRANS(vssrani_hu_w, LSX, gen_vv_i, gen_helper_vssrani_hu_w)
+TRANS(vssrani_wu_d, LSX, gen_vv_i, gen_helper_vssrani_wu_d)
+TRANS(vssrani_du_q, LSX, gen_vv_i, gen_helper_vssrani_du_q)
+
+TRANS(vssrlrn_b_h, LSX, gen_vvv, gen_helper_vssrlrn_b_h)
+TRANS(vssrlrn_h_w, LSX, gen_vvv, gen_helper_vssrlrn_h_w)
+TRANS(vssrlrn_w_d, LSX, gen_vvv, gen_helper_vssrlrn_w_d)
+TRANS(vssrarn_b_h, LSX, gen_vvv, gen_helper_vssrarn_b_h)
+TRANS(vssrarn_h_w, LSX, gen_vvv, gen_helper_vssrarn_h_w)
+TRANS(vssrarn_w_d, LSX, gen_vvv, gen_helper_vssrarn_w_d)
+TRANS(vssrlrn_bu_h, LSX, gen_vvv, gen_helper_vssrlrn_bu_h)
+TRANS(vssrlrn_hu_w, LSX, gen_vvv, gen_helper_vssrlrn_hu_w)
+TRANS(vssrlrn_wu_d, LSX, gen_vvv, gen_helper_vssrlrn_wu_d)
+TRANS(vssrarn_bu_h, LSX, gen_vvv, gen_helper_vssrarn_bu_h)
+TRANS(vssrarn_hu_w, LSX, gen_vvv, gen_helper_vssrarn_hu_w)
+TRANS(vssrarn_wu_d, LSX, gen_vvv, gen_helper_vssrarn_wu_d)
+
+TRANS(vssrlrni_b_h, LSX, gen_vv_i, gen_helper_vssrlrni_b_h)
+TRANS(vssrlrni_h_w, LSX, gen_vv_i, gen_helper_vssrlrni_h_w)
+TRANS(vssrlrni_w_d, LSX, gen_vv_i, gen_helper_vssrlrni_w_d)
+TRANS(vssrlrni_d_q, LSX, gen_vv_i, gen_helper_vssrlrni_d_q)
+TRANS(vssrarni_b_h, LSX, gen_vv_i, gen_helper_vssrarni_b_h)
+TRANS(vssrarni_h_w, LSX, gen_vv_i, gen_helper_vssrarni_h_w)
+TRANS(vssrarni_w_d, LSX, gen_vv_i, gen_helper_vssrarni_w_d)
+TRANS(vssrarni_d_q, LSX, gen_vv_i, gen_helper_vssrarni_d_q)
+TRANS(vssrlrni_bu_h, LSX, gen_vv_i, gen_helper_vssrlrni_bu_h)
+TRANS(vssrlrni_hu_w, LSX, gen_vv_i, gen_helper_vssrlrni_hu_w)
+TRANS(vssrlrni_wu_d, LSX, gen_vv_i, gen_helper_vssrlrni_wu_d)
+TRANS(vssrlrni_du_q, LSX, gen_vv_i, gen_helper_vssrlrni_du_q)
+TRANS(vssrarni_bu_h, LSX, gen_vv_i, gen_helper_vssrarni_bu_h)
+TRANS(vssrarni_hu_w, LSX, gen_vv_i, gen_helper_vssrarni_hu_w)
+TRANS(vssrarni_wu_d, LSX, gen_vv_i, gen_helper_vssrarni_wu_d)
+TRANS(vssrarni_du_q, LSX, gen_vv_i, gen_helper_vssrarni_du_q)
+
+TRANS(vclo_b, LSX, gen_vv, gen_helper_vclo_b)
+TRANS(vclo_h, LSX, gen_vv, gen_helper_vclo_h)
+TRANS(vclo_w, LSX, gen_vv, gen_helper_vclo_w)
+TRANS(vclo_d, LSX, gen_vv, gen_helper_vclo_d)
+TRANS(vclz_b, LSX, gen_vv, gen_helper_vclz_b)
+TRANS(vclz_h, LSX, gen_vv, gen_helper_vclz_h)
+TRANS(vclz_w, LSX, gen_vv, gen_helper_vclz_w)
+TRANS(vclz_d, LSX, gen_vv, gen_helper_vclz_d)
+
+TRANS(vpcnt_b, LSX, gen_vv, gen_helper_vpcnt_b)
+TRANS(vpcnt_h, LSX, gen_vv, gen_helper_vpcnt_h)
+TRANS(vpcnt_w, LSX, gen_vv, gen_helper_vpcnt_w)
+TRANS(vpcnt_d, LSX, gen_vv, gen_helper_vpcnt_d)
+
+static void do_vbit(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b,
+ void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
+{
+ TCGv_vec mask, lsh, t1, one;
+
+ lsh = tcg_temp_new_vec_matching(t);
+ t1 = tcg_temp_new_vec_matching(t);
+ mask = tcg_constant_vec_matching(t, vece, (8 << vece) - 1);
+ one = tcg_constant_vec_matching(t, vece, 1);
+
+ tcg_gen_and_vec(vece, lsh, b, mask);
+ tcg_gen_shlv_vec(vece, t1, one, lsh);
+ func(vece, t, a, t1);
+}
+
+static void gen_vbitclr(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vbit(vece, t, a, b, tcg_gen_andc_vec);
+}
+
+static void gen_vbitset(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vbit(vece, t, a, b, tcg_gen_or_vec);
+}
+
+static void gen_vbitrev(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b)
+{
+ do_vbit(vece, t, a, b, tcg_gen_xor_vec);
+}
+
+static void do_vbitclr(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shlv_vec, INDEX_op_andc_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitclr,
+ .fno = gen_helper_vbitclr_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vbitclr_b, LSX, gvec_vvv, MO_8, do_vbitclr)
+TRANS(vbitclr_h, LSX, gvec_vvv, MO_16, do_vbitclr)
+TRANS(vbitclr_w, LSX, gvec_vvv, MO_32, do_vbitclr)
+TRANS(vbitclr_d, LSX, gvec_vvv, MO_64, do_vbitclr)
+
+static void do_vbiti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm,
+ void (*func)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec))
+{
+ int lsh;
+ TCGv_vec t1, one;
+
+ lsh = imm & ((8 << vece) -1);
+ t1 = tcg_temp_new_vec_matching(t);
+ one = tcg_constant_vec_matching(t, vece, 1);
+
+ tcg_gen_shli_vec(vece, t1, one, lsh);
+ func(vece, t, a, t1);
+}
+
+static void gen_vbitclri(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_vbiti(vece, t, a, imm, tcg_gen_andc_vec);
+}
+
+static void gen_vbitseti(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_vbiti(vece, t, a, imm, tcg_gen_or_vec);
+}
+
+static void gen_vbitrevi(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_vbiti(vece, t, a, imm, tcg_gen_xor_vec);
+}
+
+static void do_vbitclri(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, INDEX_op_andc_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitclri,
+ .fnoi = gen_helper_vbitclri_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vbitclri_b, LSX, gvec_vv_i, MO_8, do_vbitclri)
+TRANS(vbitclri_h, LSX, gvec_vv_i, MO_16, do_vbitclri)
+TRANS(vbitclri_w, LSX, gvec_vv_i, MO_32, do_vbitclri)
+TRANS(vbitclri_d, LSX, gvec_vv_i, MO_64, do_vbitclri)
+
+static void do_vbitset(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shlv_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitset,
+ .fno = gen_helper_vbitset_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vbitset_b, LSX, gvec_vvv, MO_8, do_vbitset)
+TRANS(vbitset_h, LSX, gvec_vvv, MO_16, do_vbitset)
+TRANS(vbitset_w, LSX, gvec_vvv, MO_32, do_vbitset)
+TRANS(vbitset_d, LSX, gvec_vvv, MO_64, do_vbitset)
+
+static void do_vbitseti(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitseti,
+ .fnoi = gen_helper_vbitseti_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vbitseti_b, LSX, gvec_vv_i, MO_8, do_vbitseti)
+TRANS(vbitseti_h, LSX, gvec_vv_i, MO_16, do_vbitseti)
+TRANS(vbitseti_w, LSX, gvec_vv_i, MO_32, do_vbitseti)
+TRANS(vbitseti_d, LSX, gvec_vv_i, MO_64, do_vbitseti)
+
+static void do_vbitrev(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ uint32_t vk_ofs, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shlv_vec, 0
+ };
+ static const GVecGen3 op[4] = {
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitrev,
+ .fno = gen_helper_vbitrev_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_3(vd_ofs, vj_ofs, vk_ofs, oprsz, maxsz, &op[vece]);
+}
+
+TRANS(vbitrev_b, LSX, gvec_vvv, MO_8, do_vbitrev)
+TRANS(vbitrev_h, LSX, gvec_vvv, MO_16, do_vbitrev)
+TRANS(vbitrev_w, LSX, gvec_vvv, MO_32, do_vbitrev)
+TRANS(vbitrev_d, LSX, gvec_vvv, MO_64, do_vbitrev)
+
+static void do_vbitrevi(unsigned vece, uint32_t vd_ofs, uint32_t vj_ofs,
+ int64_t imm, uint32_t oprsz, uint32_t maxsz)
+{
+ static const TCGOpcode vecop_list[] = {
+ INDEX_op_shli_vec, 0
+ };
+ static const GVecGen2i op[4] = {
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_b,
+ .opt_opc = vecop_list,
+ .vece = MO_8
+ },
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_h,
+ .opt_opc = vecop_list,
+ .vece = MO_16
+ },
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_w,
+ .opt_opc = vecop_list,
+ .vece = MO_32
+ },
+ {
+ .fniv = gen_vbitrevi,
+ .fnoi = gen_helper_vbitrevi_d,
+ .opt_opc = vecop_list,
+ .vece = MO_64
+ },
+ };
+
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, oprsz, maxsz, imm, &op[vece]);
+}
+
+TRANS(vbitrevi_b, LSX, gvec_vv_i, MO_8, do_vbitrevi)
+TRANS(vbitrevi_h, LSX, gvec_vv_i, MO_16, do_vbitrevi)
+TRANS(vbitrevi_w, LSX, gvec_vv_i, MO_32, do_vbitrevi)
+TRANS(vbitrevi_d, LSX, gvec_vv_i, MO_64, do_vbitrevi)
+
+TRANS(vfrstp_b, LSX, gen_vvv, gen_helper_vfrstp_b)
+TRANS(vfrstp_h, LSX, gen_vvv, gen_helper_vfrstp_h)
+TRANS(vfrstpi_b, LSX, gen_vv_i, gen_helper_vfrstpi_b)
+TRANS(vfrstpi_h, LSX, gen_vv_i, gen_helper_vfrstpi_h)
+
+TRANS(vfadd_s, LSX, gen_vvv, gen_helper_vfadd_s)
+TRANS(vfadd_d, LSX, gen_vvv, gen_helper_vfadd_d)
+TRANS(vfsub_s, LSX, gen_vvv, gen_helper_vfsub_s)
+TRANS(vfsub_d, LSX, gen_vvv, gen_helper_vfsub_d)
+TRANS(vfmul_s, LSX, gen_vvv, gen_helper_vfmul_s)
+TRANS(vfmul_d, LSX, gen_vvv, gen_helper_vfmul_d)
+TRANS(vfdiv_s, LSX, gen_vvv, gen_helper_vfdiv_s)
+TRANS(vfdiv_d, LSX, gen_vvv, gen_helper_vfdiv_d)
+
+TRANS(vfmadd_s, LSX, gen_vvvv, gen_helper_vfmadd_s)
+TRANS(vfmadd_d, LSX, gen_vvvv, gen_helper_vfmadd_d)
+TRANS(vfmsub_s, LSX, gen_vvvv, gen_helper_vfmsub_s)
+TRANS(vfmsub_d, LSX, gen_vvvv, gen_helper_vfmsub_d)
+TRANS(vfnmadd_s, LSX, gen_vvvv, gen_helper_vfnmadd_s)
+TRANS(vfnmadd_d, LSX, gen_vvvv, gen_helper_vfnmadd_d)
+TRANS(vfnmsub_s, LSX, gen_vvvv, gen_helper_vfnmsub_s)
+TRANS(vfnmsub_d, LSX, gen_vvvv, gen_helper_vfnmsub_d)
+
+TRANS(vfmax_s, LSX, gen_vvv, gen_helper_vfmax_s)
+TRANS(vfmax_d, LSX, gen_vvv, gen_helper_vfmax_d)
+TRANS(vfmin_s, LSX, gen_vvv, gen_helper_vfmin_s)
+TRANS(vfmin_d, LSX, gen_vvv, gen_helper_vfmin_d)
+
+TRANS(vfmaxa_s, LSX, gen_vvv, gen_helper_vfmaxa_s)
+TRANS(vfmaxa_d, LSX, gen_vvv, gen_helper_vfmaxa_d)
+TRANS(vfmina_s, LSX, gen_vvv, gen_helper_vfmina_s)
+TRANS(vfmina_d, LSX, gen_vvv, gen_helper_vfmina_d)
+
+TRANS(vflogb_s, LSX, gen_vv, gen_helper_vflogb_s)
+TRANS(vflogb_d, LSX, gen_vv, gen_helper_vflogb_d)
+
+TRANS(vfclass_s, LSX, gen_vv, gen_helper_vfclass_s)
+TRANS(vfclass_d, LSX, gen_vv, gen_helper_vfclass_d)
+
+TRANS(vfsqrt_s, LSX, gen_vv, gen_helper_vfsqrt_s)
+TRANS(vfsqrt_d, LSX, gen_vv, gen_helper_vfsqrt_d)
+TRANS(vfrecip_s, LSX, gen_vv, gen_helper_vfrecip_s)
+TRANS(vfrecip_d, LSX, gen_vv, gen_helper_vfrecip_d)
+TRANS(vfrsqrt_s, LSX, gen_vv, gen_helper_vfrsqrt_s)
+TRANS(vfrsqrt_d, LSX, gen_vv, gen_helper_vfrsqrt_d)
+
+TRANS(vfcvtl_s_h, LSX, gen_vv, gen_helper_vfcvtl_s_h)
+TRANS(vfcvth_s_h, LSX, gen_vv, gen_helper_vfcvth_s_h)
+TRANS(vfcvtl_d_s, LSX, gen_vv, gen_helper_vfcvtl_d_s)
+TRANS(vfcvth_d_s, LSX, gen_vv, gen_helper_vfcvth_d_s)
+TRANS(vfcvt_h_s, LSX, gen_vvv, gen_helper_vfcvt_h_s)
+TRANS(vfcvt_s_d, LSX, gen_vvv, gen_helper_vfcvt_s_d)
+
+TRANS(vfrintrne_s, LSX, gen_vv, gen_helper_vfrintrne_s)
+TRANS(vfrintrne_d, LSX, gen_vv, gen_helper_vfrintrne_d)
+TRANS(vfrintrz_s, LSX, gen_vv, gen_helper_vfrintrz_s)
+TRANS(vfrintrz_d, LSX, gen_vv, gen_helper_vfrintrz_d)
+TRANS(vfrintrp_s, LSX, gen_vv, gen_helper_vfrintrp_s)
+TRANS(vfrintrp_d, LSX, gen_vv, gen_helper_vfrintrp_d)
+TRANS(vfrintrm_s, LSX, gen_vv, gen_helper_vfrintrm_s)
+TRANS(vfrintrm_d, LSX, gen_vv, gen_helper_vfrintrm_d)
+TRANS(vfrint_s, LSX, gen_vv, gen_helper_vfrint_s)
+TRANS(vfrint_d, LSX, gen_vv, gen_helper_vfrint_d)
+
+TRANS(vftintrne_w_s, LSX, gen_vv, gen_helper_vftintrne_w_s)
+TRANS(vftintrne_l_d, LSX, gen_vv, gen_helper_vftintrne_l_d)
+TRANS(vftintrz_w_s, LSX, gen_vv, gen_helper_vftintrz_w_s)
+TRANS(vftintrz_l_d, LSX, gen_vv, gen_helper_vftintrz_l_d)
+TRANS(vftintrp_w_s, LSX, gen_vv, gen_helper_vftintrp_w_s)
+TRANS(vftintrp_l_d, LSX, gen_vv, gen_helper_vftintrp_l_d)
+TRANS(vftintrm_w_s, LSX, gen_vv, gen_helper_vftintrm_w_s)
+TRANS(vftintrm_l_d, LSX, gen_vv, gen_helper_vftintrm_l_d)
+TRANS(vftint_w_s, LSX, gen_vv, gen_helper_vftint_w_s)
+TRANS(vftint_l_d, LSX, gen_vv, gen_helper_vftint_l_d)
+TRANS(vftintrz_wu_s, LSX, gen_vv, gen_helper_vftintrz_wu_s)
+TRANS(vftintrz_lu_d, LSX, gen_vv, gen_helper_vftintrz_lu_d)
+TRANS(vftint_wu_s, LSX, gen_vv, gen_helper_vftint_wu_s)
+TRANS(vftint_lu_d, LSX, gen_vv, gen_helper_vftint_lu_d)
+TRANS(vftintrne_w_d, LSX, gen_vvv, gen_helper_vftintrne_w_d)
+TRANS(vftintrz_w_d, LSX, gen_vvv, gen_helper_vftintrz_w_d)
+TRANS(vftintrp_w_d, LSX, gen_vvv, gen_helper_vftintrp_w_d)
+TRANS(vftintrm_w_d, LSX, gen_vvv, gen_helper_vftintrm_w_d)
+TRANS(vftint_w_d, LSX, gen_vvv, gen_helper_vftint_w_d)
+TRANS(vftintrnel_l_s, LSX, gen_vv, gen_helper_vftintrnel_l_s)
+TRANS(vftintrneh_l_s, LSX, gen_vv, gen_helper_vftintrneh_l_s)
+TRANS(vftintrzl_l_s, LSX, gen_vv, gen_helper_vftintrzl_l_s)
+TRANS(vftintrzh_l_s, LSX, gen_vv, gen_helper_vftintrzh_l_s)
+TRANS(vftintrpl_l_s, LSX, gen_vv, gen_helper_vftintrpl_l_s)
+TRANS(vftintrph_l_s, LSX, gen_vv, gen_helper_vftintrph_l_s)
+TRANS(vftintrml_l_s, LSX, gen_vv, gen_helper_vftintrml_l_s)
+TRANS(vftintrmh_l_s, LSX, gen_vv, gen_helper_vftintrmh_l_s)
+TRANS(vftintl_l_s, LSX, gen_vv, gen_helper_vftintl_l_s)
+TRANS(vftinth_l_s, LSX, gen_vv, gen_helper_vftinth_l_s)
+
+TRANS(vffint_s_w, LSX, gen_vv, gen_helper_vffint_s_w)
+TRANS(vffint_d_l, LSX, gen_vv, gen_helper_vffint_d_l)
+TRANS(vffint_s_wu, LSX, gen_vv, gen_helper_vffint_s_wu)
+TRANS(vffint_d_lu, LSX, gen_vv, gen_helper_vffint_d_lu)
+TRANS(vffintl_d_w, LSX, gen_vv, gen_helper_vffintl_d_w)
+TRANS(vffinth_d_w, LSX, gen_vv, gen_helper_vffinth_d_w)
+TRANS(vffint_s_l, LSX, gen_vvv, gen_helper_vffint_s_l)
+
+static bool do_cmp(DisasContext *ctx, arg_vvv *a, MemOp mop, TCGCond cond)
+{
+ uint32_t vd_ofs, vj_ofs, vk_ofs;
+
+ CHECK_SXE;
+
+ vd_ofs = vec_full_offset(a->vd);
+ vj_ofs = vec_full_offset(a->vj);
+ vk_ofs = vec_full_offset(a->vk);
+
+ tcg_gen_gvec_cmp(cond, mop, vd_ofs, vj_ofs, vk_ofs, 16, ctx->vl/8);
+ return true;
+}
+
+static void do_cmpi_vec(TCGCond cond,
+ unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ tcg_gen_cmp_vec(cond, vece, t, a, tcg_constant_vec_matching(t, vece, imm));
+}
+
+static void gen_vseqi_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_EQ, vece, t, a, imm);
+}
+
+static void gen_vslei_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LE, vece, t, a, imm);
+}
+
+static void gen_vslti_s_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LT, vece, t, a, imm);
+}
+
+static void gen_vslei_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LEU, vece, t, a, imm);
+}
+
+static void gen_vslti_u_vec(unsigned vece, TCGv_vec t, TCGv_vec a, int64_t imm)
+{
+ do_cmpi_vec(TCG_COND_LTU, vece, t, a, imm);
+}
+
+#define DO_CMPI_S(NAME) \
+static bool do_## NAME ##_s(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
+{ \
+ uint32_t vd_ofs, vj_ofs; \
+ \
+ CHECK_SXE; \
+ \
+ static const TCGOpcode vecop_list[] = { \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen2i op[4] = { \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_b, \
+ .opt_opc = vecop_list, \
+ .vece = MO_8 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_h, \
+ .opt_opc = vecop_list, \
+ .vece = MO_16 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_w, \
+ .opt_opc = vecop_list, \
+ .vece = MO_32 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_s_vec, \
+ .fnoi = gen_helper_## NAME ##_d, \
+ .opt_opc = vecop_list, \
+ .vece = MO_64 \
+ } \
+ }; \
+ \
+ vd_ofs = vec_full_offset(a->vd); \
+ vj_ofs = vec_full_offset(a->vj); \
+ \
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
+ \
+ return true; \
+}
+
+DO_CMPI_S(vseqi)
+DO_CMPI_S(vslei)
+DO_CMPI_S(vslti)
+
+#define DO_CMPI_U(NAME) \
+static bool do_## NAME ##_u(DisasContext *ctx, arg_vv_i *a, MemOp mop) \
+{ \
+ uint32_t vd_ofs, vj_ofs; \
+ \
+ CHECK_SXE; \
+ \
+ static const TCGOpcode vecop_list[] = { \
+ INDEX_op_cmp_vec, 0 \
+ }; \
+ static const GVecGen2i op[4] = { \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_bu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_8 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_hu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_16 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_wu, \
+ .opt_opc = vecop_list, \
+ .vece = MO_32 \
+ }, \
+ { \
+ .fniv = gen_## NAME ##_u_vec, \
+ .fnoi = gen_helper_## NAME ##_du, \
+ .opt_opc = vecop_list, \
+ .vece = MO_64 \
+ } \
+ }; \
+ \
+ vd_ofs = vec_full_offset(a->vd); \
+ vj_ofs = vec_full_offset(a->vj); \
+ \
+ tcg_gen_gvec_2i(vd_ofs, vj_ofs, 16, ctx->vl/8, a->imm, &op[mop]); \
+ \
+ return true; \
+}
+
+DO_CMPI_U(vslei)
+DO_CMPI_U(vslti)
+
+TRANS(vseq_b, LSX, do_cmp, MO_8, TCG_COND_EQ)
+TRANS(vseq_h, LSX, do_cmp, MO_16, TCG_COND_EQ)
+TRANS(vseq_w, LSX, do_cmp, MO_32, TCG_COND_EQ)
+TRANS(vseq_d, LSX, do_cmp, MO_64, TCG_COND_EQ)
+TRANS(vseqi_b, LSX, do_vseqi_s, MO_8)
+TRANS(vseqi_h, LSX, do_vseqi_s, MO_16)
+TRANS(vseqi_w, LSX, do_vseqi_s, MO_32)
+TRANS(vseqi_d, LSX, do_vseqi_s, MO_64)
+
+TRANS(vsle_b, LSX, do_cmp, MO_8, TCG_COND_LE)
+TRANS(vsle_h, LSX, do_cmp, MO_16, TCG_COND_LE)
+TRANS(vsle_w, LSX, do_cmp, MO_32, TCG_COND_LE)
+TRANS(vsle_d, LSX, do_cmp, MO_64, TCG_COND_LE)
+TRANS(vslei_b, LSX, do_vslei_s, MO_8)
+TRANS(vslei_h, LSX, do_vslei_s, MO_16)
+TRANS(vslei_w, LSX, do_vslei_s, MO_32)
+TRANS(vslei_d, LSX, do_vslei_s, MO_64)
+TRANS(vsle_bu, LSX, do_cmp, MO_8, TCG_COND_LEU)
+TRANS(vsle_hu, LSX, do_cmp, MO_16, TCG_COND_LEU)
+TRANS(vsle_wu, LSX, do_cmp, MO_32, TCG_COND_LEU)
+TRANS(vsle_du, LSX, do_cmp, MO_64, TCG_COND_LEU)
+TRANS(vslei_bu, LSX, do_vslei_u, MO_8)
+TRANS(vslei_hu, LSX, do_vslei_u, MO_16)
+TRANS(vslei_wu, LSX, do_vslei_u, MO_32)
+TRANS(vslei_du, LSX, do_vslei_u, MO_64)
+
+TRANS(vslt_b, LSX, do_cmp, MO_8, TCG_COND_LT)
+TRANS(vslt_h, LSX, do_cmp, MO_16, TCG_COND_LT)
+TRANS(vslt_w, LSX, do_cmp, MO_32, TCG_COND_LT)
+TRANS(vslt_d, LSX, do_cmp, MO_64, TCG_COND_LT)
+TRANS(vslti_b, LSX, do_vslti_s, MO_8)
+TRANS(vslti_h, LSX, do_vslti_s, MO_16)
+TRANS(vslti_w, LSX, do_vslti_s, MO_32)
+TRANS(vslti_d, LSX, do_vslti_s, MO_64)
+TRANS(vslt_bu, LSX, do_cmp, MO_8, TCG_COND_LTU)
+TRANS(vslt_hu, LSX, do_cmp, MO_16, TCG_COND_LTU)
+TRANS(vslt_wu, LSX, do_cmp, MO_32, TCG_COND_LTU)
+TRANS(vslt_du, LSX, do_cmp, MO_64, TCG_COND_LTU)
+TRANS(vslti_bu, LSX, do_vslti_u, MO_8)
+TRANS(vslti_hu, LSX, do_vslti_u, MO_16)
+TRANS(vslti_wu, LSX, do_vslti_u, MO_32)
+TRANS(vslti_du, LSX, do_vslti_u, MO_64)
+
+static bool trans_vfcmp_cond_s(DisasContext *ctx, arg_vvv_fcond *a)
+{
+ uint32_t flags;
+ void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ fn = (a->fcond & 1 ? gen_helper_vfcmp_s_s : gen_helper_vfcmp_c_s);
+ flags = get_fcmp_flags(a->fcond >> 1);
+ fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags));
+
+ return true;
+}
+
+static bool trans_vfcmp_cond_d(DisasContext *ctx, arg_vvv_fcond *a)
+{
+ uint32_t flags;
+ void (*fn)(TCGv_env, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32);
+ TCGv_i32 vd = tcg_constant_i32(a->vd);
+ TCGv_i32 vj = tcg_constant_i32(a->vj);
+ TCGv_i32 vk = tcg_constant_i32(a->vk);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ fn = (a->fcond & 1 ? gen_helper_vfcmp_s_d : gen_helper_vfcmp_c_d);
+ flags = get_fcmp_flags(a->fcond >> 1);
+ fn(cpu_env, vd, vj, vk, tcg_constant_i32(flags));
+
+ return true;
+}
+
+static bool trans_vbitsel_v(DisasContext *ctx, arg_vvvv *a)
+{
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ tcg_gen_gvec_bitsel(MO_64, vec_full_offset(a->vd), vec_full_offset(a->va),
+ vec_full_offset(a->vk), vec_full_offset(a->vj),
+ 16, ctx->vl/8);
+ return true;
+}
+
+static void gen_vbitseli(unsigned vece, TCGv_vec a, TCGv_vec b, int64_t imm)
+{
+ tcg_gen_bitsel_vec(vece, a, a, tcg_constant_vec_matching(a, vece, imm), b);
+}
+
+static bool trans_vbitseli_b(DisasContext *ctx, arg_vv_i *a)
+{
+ static const GVecGen2i op = {
+ .fniv = gen_vbitseli,
+ .fnoi = gen_helper_vbitseli_b,
+ .vece = MO_8,
+ .load_dest = true
+ };
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ tcg_gen_gvec_2i(vec_full_offset(a->vd), vec_full_offset(a->vj),
+ 16, ctx->vl/8, a->imm, &op);
+ return true;
+}
+
+#define VSET(NAME, COND) \
+static bool trans_## NAME (DisasContext *ctx, arg_cv *a) \
+{ \
+ TCGv_i64 t1, al, ah; \
+ \
+ al = tcg_temp_new_i64(); \
+ ah = tcg_temp_new_i64(); \
+ t1 = tcg_temp_new_i64(); \
+ \
+ get_vreg64(ah, a->vj, 1); \
+ get_vreg64(al, a->vj, 0); \
+ \
+ if (!avail_LSX(ctx)) { \
+ return false; \
+ } \
+ \
+ CHECK_SXE; \
+ tcg_gen_or_i64(t1, al, ah); \
+ tcg_gen_setcondi_i64(COND, t1, t1, 0); \
+ tcg_gen_st8_tl(t1, cpu_env, offsetof(CPULoongArchState, cf[a->cd & 0x7])); \
+ \
+ return true; \
+}
+
+VSET(vseteqz_v, TCG_COND_EQ)
+VSET(vsetnez_v, TCG_COND_NE)
+
+TRANS(vsetanyeqz_b, LSX, gen_cv, gen_helper_vsetanyeqz_b)
+TRANS(vsetanyeqz_h, LSX, gen_cv, gen_helper_vsetanyeqz_h)
+TRANS(vsetanyeqz_w, LSX, gen_cv, gen_helper_vsetanyeqz_w)
+TRANS(vsetanyeqz_d, LSX, gen_cv, gen_helper_vsetanyeqz_d)
+TRANS(vsetallnez_b, LSX, gen_cv, gen_helper_vsetallnez_b)
+TRANS(vsetallnez_h, LSX, gen_cv, gen_helper_vsetallnez_h)
+TRANS(vsetallnez_w, LSX, gen_cv, gen_helper_vsetallnez_w)
+TRANS(vsetallnez_d, LSX, gen_cv, gen_helper_vsetallnez_d)
+
+static bool trans_vinsgr2vr_b(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_st8_i64(src, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.B(a->imm)));
+ return true;
+}
+
+static bool trans_vinsgr2vr_h(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_st16_i64(src, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.H(a->imm)));
+ return true;
+}
+
+static bool trans_vinsgr2vr_w(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_st32_i64(src, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.W(a->imm)));
+ return true;
+}
+
+static bool trans_vinsgr2vr_d(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_st_i64(src, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.D(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_b(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld8s_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_h(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld16s_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_w(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld32s_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_d(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_bu(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld8u_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.B(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_hu(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld16u_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.H(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_wu(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld32u_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.W(a->imm)));
+ return true;
+}
+
+static bool trans_vpickve2gr_du(DisasContext *ctx, arg_rv_i *a)
+{
+ TCGv dst = gpr_dst(ctx, a->rd, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_ld_i64(dst, cpu_env,
+ offsetof(CPULoongArchState, fpr[a->vj].vreg.D(a->imm)));
+ return true;
+}
+
+static bool gvec_dup(DisasContext *ctx, arg_vr *a, MemOp mop)
+{
+ TCGv src = gpr_src(ctx, a->rj, EXT_NONE);
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ tcg_gen_gvec_dup_i64(mop, vec_full_offset(a->vd),
+ 16, ctx->vl/8, src);
+ return true;
+}
+
+TRANS(vreplgr2vr_b, LSX, gvec_dup, MO_8)
+TRANS(vreplgr2vr_h, LSX, gvec_dup, MO_16)
+TRANS(vreplgr2vr_w, LSX, gvec_dup, MO_32)
+TRANS(vreplgr2vr_d, LSX, gvec_dup, MO_64)
+
+static bool trans_vreplvei_b(DisasContext *ctx, arg_vv_i *a)
+{
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_8,vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.B((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+
+static bool trans_vreplvei_h(DisasContext *ctx, arg_vv_i *a)
+{
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_16, vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.H((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+static bool trans_vreplvei_w(DisasContext *ctx, arg_vv_i *a)
+{
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_32, vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.W((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+static bool trans_vreplvei_d(DisasContext *ctx, arg_vv_i *a)
+{
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+ tcg_gen_gvec_dup_mem(MO_64, vec_full_offset(a->vd),
+ offsetof(CPULoongArchState,
+ fpr[a->vj].vreg.D((a->imm))),
+ 16, ctx->vl/8);
+ return true;
+}
+
+static bool gen_vreplve(DisasContext *ctx, arg_vvr *a, int vece, int bit,
+ void (*func)(TCGv_i64, TCGv_ptr, tcg_target_long))
+{
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_ptr t1 = tcg_temp_new_ptr();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ tcg_gen_andi_i64(t0, gpr_src(ctx, a->rk, EXT_NONE), (LSX_LEN/bit) -1);
+ tcg_gen_shli_i64(t0, t0, vece);
+ if (HOST_BIG_ENDIAN) {
+ tcg_gen_xori_i64(t0, t0, vece << ((LSX_LEN/bit) -1));
+ }
+
+ tcg_gen_trunc_i64_ptr(t1, t0);
+ tcg_gen_add_ptr(t1, t1, cpu_env);
+ func(t2, t1, vec_full_offset(a->vj));
+ tcg_gen_gvec_dup_i64(vece, vec_full_offset(a->vd), 16, ctx->vl/8, t2);
+
+ return true;
+}
+
+TRANS(vreplve_b, LSX, gen_vreplve, MO_8, 8, tcg_gen_ld8u_i64)
+TRANS(vreplve_h, LSX, gen_vreplve, MO_16, 16, tcg_gen_ld16u_i64)
+TRANS(vreplve_w, LSX, gen_vreplve, MO_32, 32, tcg_gen_ld32u_i64)
+TRANS(vreplve_d, LSX, gen_vreplve, MO_64, 64, tcg_gen_ld_i64)
+
+static bool trans_vbsll_v(DisasContext *ctx, arg_vv_i *a)
+{
+ int ofs;
+ TCGv_i64 desthigh, destlow, high, low;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ desthigh = tcg_temp_new_i64();
+ destlow = tcg_temp_new_i64();
+ high = tcg_temp_new_i64();
+ low = tcg_temp_new_i64();
+
+ get_vreg64(low, a->vj, 0);
+
+ ofs = ((a->imm) & 0xf) * 8;
+ if (ofs < 64) {
+ get_vreg64(high, a->vj, 1);
+ tcg_gen_extract2_i64(desthigh, low, high, 64 - ofs);
+ tcg_gen_shli_i64(destlow, low, ofs);
+ } else {
+ tcg_gen_shli_i64(desthigh, low, ofs - 64);
+ destlow = tcg_constant_i64(0);
+ }
+
+ set_vreg64(desthigh, a->vd, 1);
+ set_vreg64(destlow, a->vd, 0);
+
+ return true;
+}
+
+static bool trans_vbsrl_v(DisasContext *ctx, arg_vv_i *a)
+{
+ TCGv_i64 desthigh, destlow, high, low;
+ int ofs;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ desthigh = tcg_temp_new_i64();
+ destlow = tcg_temp_new_i64();
+ high = tcg_temp_new_i64();
+ low = tcg_temp_new_i64();
+
+ get_vreg64(high, a->vj, 1);
+
+ ofs = ((a->imm) & 0xf) * 8;
+ if (ofs < 64) {
+ get_vreg64(low, a->vj, 0);
+ tcg_gen_extract2_i64(destlow, low, high, ofs);
+ tcg_gen_shri_i64(desthigh, high, ofs);
+ } else {
+ tcg_gen_shri_i64(destlow, high, ofs - 64);
+ desthigh = tcg_constant_i64(0);
+ }
+
+ set_vreg64(desthigh, a->vd, 1);
+ set_vreg64(destlow, a->vd, 0);
+
+ return true;
+}
+
+TRANS(vpackev_b, LSX, gen_vvv, gen_helper_vpackev_b)
+TRANS(vpackev_h, LSX, gen_vvv, gen_helper_vpackev_h)
+TRANS(vpackev_w, LSX, gen_vvv, gen_helper_vpackev_w)
+TRANS(vpackev_d, LSX, gen_vvv, gen_helper_vpackev_d)
+TRANS(vpackod_b, LSX, gen_vvv, gen_helper_vpackod_b)
+TRANS(vpackod_h, LSX, gen_vvv, gen_helper_vpackod_h)
+TRANS(vpackod_w, LSX, gen_vvv, gen_helper_vpackod_w)
+TRANS(vpackod_d, LSX, gen_vvv, gen_helper_vpackod_d)
+
+TRANS(vpickev_b, LSX, gen_vvv, gen_helper_vpickev_b)
+TRANS(vpickev_h, LSX, gen_vvv, gen_helper_vpickev_h)
+TRANS(vpickev_w, LSX, gen_vvv, gen_helper_vpickev_w)
+TRANS(vpickev_d, LSX, gen_vvv, gen_helper_vpickev_d)
+TRANS(vpickod_b, LSX, gen_vvv, gen_helper_vpickod_b)
+TRANS(vpickod_h, LSX, gen_vvv, gen_helper_vpickod_h)
+TRANS(vpickod_w, LSX, gen_vvv, gen_helper_vpickod_w)
+TRANS(vpickod_d, LSX, gen_vvv, gen_helper_vpickod_d)
+
+TRANS(vilvl_b, LSX, gen_vvv, gen_helper_vilvl_b)
+TRANS(vilvl_h, LSX, gen_vvv, gen_helper_vilvl_h)
+TRANS(vilvl_w, LSX, gen_vvv, gen_helper_vilvl_w)
+TRANS(vilvl_d, LSX, gen_vvv, gen_helper_vilvl_d)
+TRANS(vilvh_b, LSX, gen_vvv, gen_helper_vilvh_b)
+TRANS(vilvh_h, LSX, gen_vvv, gen_helper_vilvh_h)
+TRANS(vilvh_w, LSX, gen_vvv, gen_helper_vilvh_w)
+TRANS(vilvh_d, LSX, gen_vvv, gen_helper_vilvh_d)
+
+TRANS(vshuf_b, LSX, gen_vvvv, gen_helper_vshuf_b)
+TRANS(vshuf_h, LSX, gen_vvv, gen_helper_vshuf_h)
+TRANS(vshuf_w, LSX, gen_vvv, gen_helper_vshuf_w)
+TRANS(vshuf_d, LSX, gen_vvv, gen_helper_vshuf_d)
+TRANS(vshuf4i_b, LSX, gen_vv_i, gen_helper_vshuf4i_b)
+TRANS(vshuf4i_h, LSX, gen_vv_i, gen_helper_vshuf4i_h)
+TRANS(vshuf4i_w, LSX, gen_vv_i, gen_helper_vshuf4i_w)
+TRANS(vshuf4i_d, LSX, gen_vv_i, gen_helper_vshuf4i_d)
+
+TRANS(vpermi_w, LSX, gen_vv_i, gen_helper_vpermi_w)
+
+TRANS(vextrins_b, LSX, gen_vv_i, gen_helper_vextrins_b)
+TRANS(vextrins_h, LSX, gen_vv_i, gen_helper_vextrins_h)
+TRANS(vextrins_w, LSX, gen_vv_i, gen_helper_vextrins_w)
+TRANS(vextrins_d, LSX, gen_vv_i, gen_helper_vextrins_d)
+
+static bool trans_vld(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv addr;
+ TCGv_i64 rl, rh;
+ TCGv_i128 val;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ addr = gpr_src(ctx, a->rj, EXT_NONE);
+ val = tcg_temp_new_i128();
+ rl = tcg_temp_new_i64();
+ rh = tcg_temp_new_i64();
+
+ addr = make_address_i(ctx, addr, a->imm);
+
+ tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+ tcg_gen_extr_i128_i64(rl, rh, val);
+ set_vreg64(rh, a->vd, 1);
+ set_vreg64(rl, a->vd, 0);
+
+ return true;
+}
+
+static bool trans_vst(DisasContext *ctx, arg_vr_i *a)
+{
+ TCGv addr;
+ TCGv_i128 val;
+ TCGv_i64 ah, al;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ addr = gpr_src(ctx, a->rj, EXT_NONE);
+ val = tcg_temp_new_i128();
+ ah = tcg_temp_new_i64();
+ al = tcg_temp_new_i64();
+
+ addr = make_address_i(ctx, addr, a->imm);
+
+ get_vreg64(ah, a->vd, 1);
+ get_vreg64(al, a->vd, 0);
+ tcg_gen_concat_i64_i128(val, al, ah);
+ tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+
+ return true;
+}
+
+static bool trans_vldx(DisasContext *ctx, arg_vrr *a)
+{
+ TCGv addr, src1, src2;
+ TCGv_i64 rl, rh;
+ TCGv_i128 val;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ src1 = gpr_src(ctx, a->rj, EXT_NONE);
+ src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ val = tcg_temp_new_i128();
+ rl = tcg_temp_new_i64();
+ rh = tcg_temp_new_i64();
+
+ addr = make_address_x(ctx, src1, src2);
+ tcg_gen_qemu_ld_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+ tcg_gen_extr_i128_i64(rl, rh, val);
+ set_vreg64(rh, a->vd, 1);
+ set_vreg64(rl, a->vd, 0);
+
+ return true;
+}
+
+static bool trans_vstx(DisasContext *ctx, arg_vrr *a)
+{
+ TCGv addr, src1, src2;
+ TCGv_i64 ah, al;
+ TCGv_i128 val;
+
+ if (!avail_LSX(ctx)) {
+ return false;
+ }
+
+ CHECK_SXE;
+
+ src1 = gpr_src(ctx, a->rj, EXT_NONE);
+ src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ val = tcg_temp_new_i128();
+ ah = tcg_temp_new_i64();
+ al = tcg_temp_new_i64();
+
+ addr = make_address_x(ctx, src1, src2);
+ get_vreg64(ah, a->vd, 1);
+ get_vreg64(al, a->vd, 0);
+ tcg_gen_concat_i64_i128(val, al, ah);
+ tcg_gen_qemu_st_i128(val, addr, ctx->mem_idx, MO_128 | MO_TE);
+
+ return true;
+}
+
+#define VLDREPL(NAME, MO) \
+static bool trans_## NAME (DisasContext *ctx, arg_vr_i *a) \
+{ \
+ TCGv addr; \
+ TCGv_i64 val; \
+ \
+ if (!avail_LSX(ctx)) { \
+ return false; \
+ } \
+ \
+ CHECK_SXE; \
+ \
+ addr = gpr_src(ctx, a->rj, EXT_NONE); \
+ val = tcg_temp_new_i64(); \
+ \
+ addr = make_address_i(ctx, addr, a->imm); \
+ \
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, MO); \
+ tcg_gen_gvec_dup_i64(MO, vec_full_offset(a->vd), 16, ctx->vl/8, val); \
+ \
+ return true; \
+}
+
+VLDREPL(vldrepl_b, MO_8)
+VLDREPL(vldrepl_h, MO_16)
+VLDREPL(vldrepl_w, MO_32)
+VLDREPL(vldrepl_d, MO_64)
+
+#define VSTELM(NAME, MO, E) \
+static bool trans_## NAME (DisasContext *ctx, arg_vr_ii *a) \
+{ \
+ TCGv addr; \
+ TCGv_i64 val; \
+ \
+ if (!avail_LSX(ctx)) { \
+ return false; \
+ } \
+ \
+ CHECK_SXE; \
+ \
+ addr = gpr_src(ctx, a->rj, EXT_NONE); \
+ val = tcg_temp_new_i64(); \
+ \
+ addr = make_address_i(ctx, addr, a->imm); \
+ \
+ tcg_gen_ld_i64(val, cpu_env, \
+ offsetof(CPULoongArchState, fpr[a->vd].vreg.E(a->imm2))); \
+ tcg_gen_qemu_st_i64(val, addr, ctx->mem_idx, MO); \
+ \
+ return true; \
+}
+
+VSTELM(vstelm_b, MO_8, B)
+VSTELM(vstelm_h, MO_16, H)
+VSTELM(vstelm_w, MO_32, W)
+VSTELM(vstelm_d, MO_64, D)
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * QEMU LoongArch LSX helper functions.
- *
- * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/exec-all.h"
-#include "exec/helper-proto.h"
-#include "fpu/softfloat.h"
-#include "internals.h"
-#include "tcg/tcg.h"
-
-#define DO_ADD(a, b) (a + b)
-#define DO_SUB(a, b) (a - b)
-
-#define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- typedef __typeof(Vd->E1(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
- } \
-}
-
-DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD)
-DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD)
-DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD)
-
-void HELPER(vhaddw_q_d)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
-}
-
-DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB)
-DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB)
-DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB)
-
-void HELPER(vhsubw_q_d)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
-}
-
-DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD)
-DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD)
-DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD)
-
-void HELPER(vhaddw_qu_du)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
- int128_make64((uint64_t)Vk->D(0)));
-}
-
-DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB)
-DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB)
-DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB)
-
-void HELPER(vhsubw_qu_du)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
- int128_make64((uint64_t)Vk->D(0)));
-}
-
-#define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->E1(0)) TD; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
- } \
-}
-
-#define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->E1(0)) TD; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
- } \
-}
-
-void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
-}
-
-DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD)
-DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD)
-DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD)
-
-void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
-}
-
-DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD)
-DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD)
-DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD)
-
-void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
-}
-
-DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB)
-DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB)
-DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB)
-
-void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
-}
-
-DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB)
-DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB)
-DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB)
-
-void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
- int128_make64((uint64_t)Vk->D(0)));
-}
-
-DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD)
-DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD)
-DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD)
-
-void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
- int128_make64((uint64_t)Vk->D(1)));
-}
-
-DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD)
-DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD)
-DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD)
-
-void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(0)),
- int128_make64((uint64_t)Vk->D(0)));
-}
-
-DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB)
-DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB)
-DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB)
-
-void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
- int128_make64((uint64_t)Vk->D(1)));
-}
-
-DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB)
-DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB)
-DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB)
-
-#define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->ES1(0)) TDS; \
- typedef __typeof(Vd->EU1(0)) TDU; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
- } \
-}
-
-#define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->ES1(0)) TDS; \
- typedef __typeof(Vd->EU1(0)) TDU; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
- } \
-}
-
-void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
- int128_makes64(Vk->D(0)));
-}
-
-DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD)
-DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD)
-DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD)
-
-void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
- int128_makes64(Vk->D(1)));
-}
-
-DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD)
-DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD)
-DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD)
-
-#define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
-#define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
-
-#define DO_3OP(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
- } \
-}
-
-DO_3OP(vavg_b, 8, B, DO_VAVG)
-DO_3OP(vavg_h, 16, H, DO_VAVG)
-DO_3OP(vavg_w, 32, W, DO_VAVG)
-DO_3OP(vavg_d, 64, D, DO_VAVG)
-DO_3OP(vavgr_b, 8, B, DO_VAVGR)
-DO_3OP(vavgr_h, 16, H, DO_VAVGR)
-DO_3OP(vavgr_w, 32, W, DO_VAVGR)
-DO_3OP(vavgr_d, 64, D, DO_VAVGR)
-DO_3OP(vavg_bu, 8, UB, DO_VAVG)
-DO_3OP(vavg_hu, 16, UH, DO_VAVG)
-DO_3OP(vavg_wu, 32, UW, DO_VAVG)
-DO_3OP(vavg_du, 64, UD, DO_VAVG)
-DO_3OP(vavgr_bu, 8, UB, DO_VAVGR)
-DO_3OP(vavgr_hu, 16, UH, DO_VAVGR)
-DO_3OP(vavgr_wu, 32, UW, DO_VAVGR)
-DO_3OP(vavgr_du, 64, UD, DO_VAVGR)
-
-#define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
-
-DO_3OP(vabsd_b, 8, B, DO_VABSD)
-DO_3OP(vabsd_h, 16, H, DO_VABSD)
-DO_3OP(vabsd_w, 32, W, DO_VABSD)
-DO_3OP(vabsd_d, 64, D, DO_VABSD)
-DO_3OP(vabsd_bu, 8, UB, DO_VABSD)
-DO_3OP(vabsd_hu, 16, UH, DO_VABSD)
-DO_3OP(vabsd_wu, 32, UW, DO_VABSD)
-DO_3OP(vabsd_du, 64, UD, DO_VABSD)
-
-#define DO_VABS(a) ((a < 0) ? (-a) : (a))
-
-#define DO_VADDA(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i)) + DO_OP(Vk->E(i)); \
- } \
-}
-
-DO_VADDA(vadda_b, 8, B, DO_VABS)
-DO_VADDA(vadda_h, 16, H, DO_VABS)
-DO_VADDA(vadda_w, 32, W, DO_VABS)
-DO_VADDA(vadda_d, 64, D, DO_VABS)
-
-#define DO_MIN(a, b) (a < b ? a : b)
-#define DO_MAX(a, b) (a > b ? a : b)
-
-#define VMINMAXI(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- typedef __typeof(Vd->E(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
- } \
-}
-
-VMINMAXI(vmini_b, 8, B, DO_MIN)
-VMINMAXI(vmini_h, 16, H, DO_MIN)
-VMINMAXI(vmini_w, 32, W, DO_MIN)
-VMINMAXI(vmini_d, 64, D, DO_MIN)
-VMINMAXI(vmaxi_b, 8, B, DO_MAX)
-VMINMAXI(vmaxi_h, 16, H, DO_MAX)
-VMINMAXI(vmaxi_w, 32, W, DO_MAX)
-VMINMAXI(vmaxi_d, 64, D, DO_MAX)
-VMINMAXI(vmini_bu, 8, UB, DO_MIN)
-VMINMAXI(vmini_hu, 16, UH, DO_MIN)
-VMINMAXI(vmini_wu, 32, UW, DO_MIN)
-VMINMAXI(vmini_du, 64, UD, DO_MIN)
-VMINMAXI(vmaxi_bu, 8, UB, DO_MAX)
-VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
-VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
-VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
-
-#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->E1(0)) T; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
- } \
-}
-
-void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v)
-{
- uint64_t l, h1, h2;
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- muls64(&l, &h1, Vj->D(0), Vk->D(0));
- muls64(&l, &h2, Vj->D(1), Vk->D(1));
-
- Vd->D(0) = h1;
- Vd->D(1) = h2;
-}
-
-DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
-DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
-DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
-
-void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v)
-{
- uint64_t l, h1, h2;
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
- VReg *Vk = (VReg *)vk;
-
- mulu64(&l, &h1, Vj->D(0), Vk->D(0));
- mulu64(&l, &h2, Vj->D(1), Vk->D(1));
-
- Vd->D(0) = h1;
- Vd->D(1) = h2;
-}
-
-DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
-DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH)
-DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH)
-
-#define DO_MUL(a, b) (a * b)
-
-DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL)
-DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL)
-DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL)
-
-DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL)
-DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL)
-DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL)
-
-DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL)
-DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL)
-DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL)
-
-DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL)
-DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL)
-DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL)
-
-DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
-DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
-DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
-
-DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
-DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
-DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
-
-#define DO_MADD(a, b, c) (a + b * c)
-#define DO_MSUB(a, b, c) (a - b * c)
-
-#define VMADDSUB(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
- } \
-}
-
-VMADDSUB(vmadd_b, 8, B, DO_MADD)
-VMADDSUB(vmadd_h, 16, H, DO_MADD)
-VMADDSUB(vmadd_w, 32, W, DO_MADD)
-VMADDSUB(vmadd_d, 64, D, DO_MADD)
-VMADDSUB(vmsub_b, 8, B, DO_MSUB)
-VMADDSUB(vmsub_h, 16, H, DO_MSUB)
-VMADDSUB(vmsub_w, 32, W, DO_MSUB)
-VMADDSUB(vmsub_d, 64, D, DO_MSUB)
-
-#define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->E1(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
- } \
-}
-
-VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL)
-VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL)
-VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL)
-VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL)
-VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL)
-VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL)
-
-#define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->E1(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
- (TD)Vk->E2(2 * i + 1)); \
- } \
-}
-
-VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL)
-VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL)
-VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL)
-VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL)
-VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL)
-VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL)
-
-#define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->ES1(0)) TS1; \
- typedef __typeof(Vd->EU1(0)) TU1; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
- (TS1)Vk->ES2(2 * i)); \
- } \
-}
-
-VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
-VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
-VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
-
-#define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- typedef __typeof(Vd->ES1(0)) TS1; \
- typedef __typeof(Vd->EU1(0)) TU1; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
- (TS1)Vk->ES2(2 * i + 1)); \
- } \
-}
-
-VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
-VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
-VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
-
-#define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
-#define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
-#define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
- unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
-#define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
- unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
-
-#define VDIV(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
- } \
-}
-
-VDIV(vdiv_b, 8, B, DO_DIV)
-VDIV(vdiv_h, 16, H, DO_DIV)
-VDIV(vdiv_w, 32, W, DO_DIV)
-VDIV(vdiv_d, 64, D, DO_DIV)
-VDIV(vdiv_bu, 8, UB, DO_DIVU)
-VDIV(vdiv_hu, 16, UH, DO_DIVU)
-VDIV(vdiv_wu, 32, UW, DO_DIVU)
-VDIV(vdiv_du, 64, UD, DO_DIVU)
-VDIV(vmod_b, 8, B, DO_REM)
-VDIV(vmod_h, 16, H, DO_REM)
-VDIV(vmod_w, 32, W, DO_REM)
-VDIV(vmod_d, 64, D, DO_REM)
-VDIV(vmod_bu, 8, UB, DO_REMU)
-VDIV(vmod_hu, 16, UH, DO_REMU)
-VDIV(vmod_wu, 32, UW, DO_REMU)
-VDIV(vmod_du, 64, UD, DO_REMU)
-
-#define VSAT_S(NAME, BIT, E) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- typedef __typeof(Vd->E(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
- Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
- } \
-}
-
-VSAT_S(vsat_b, 8, B)
-VSAT_S(vsat_h, 16, H)
-VSAT_S(vsat_w, 32, W)
-VSAT_S(vsat_d, 64, D)
-
-#define VSAT_U(NAME, BIT, E) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- typedef __typeof(Vd->E(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
- } \
-}
-
-VSAT_U(vsat_bu, 8, UB)
-VSAT_U(vsat_hu, 16, UH)
-VSAT_U(vsat_wu, 32, UW)
-VSAT_U(vsat_du, 64, UD)
-
-#define VEXTH(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \
- } \
-}
-
-void HELPER(vexth_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- Vd->Q(0) = int128_makes64(Vj->D(1));
-}
-
-void HELPER(vexth_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- Vd->Q(0) = int128_make64((uint64_t)Vj->D(1));
-}
-
-VEXTH(vexth_h_b, 16, H, B)
-VEXTH(vexth_w_h, 32, W, H)
-VEXTH(vexth_d_w, 64, D, W)
-VEXTH(vexth_hu_bu, 16, UH, UB)
-VEXTH(vexth_wu_hu, 32, UW, UH)
-VEXTH(vexth_du_wu, 64, UD, UW)
-
-#define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
-
-DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV)
-DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV)
-DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV)
-DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV)
-
-static uint64_t do_vmskltz_b(int64_t val)
-{
- uint64_t m = 0x8080808080808080ULL;
- uint64_t c = val & m;
- c |= c << 7;
- c |= c << 14;
- c |= c << 28;
- return c >> 56;
-}
-
-void HELPER(vmskltz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- uint16_t temp = 0;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp = do_vmskltz_b(Vj->D(0));
- temp |= (do_vmskltz_b(Vj->D(1)) << 8);
- Vd->D(0) = temp;
- Vd->D(1) = 0;
-}
-
-static uint64_t do_vmskltz_h(int64_t val)
-{
- uint64_t m = 0x8000800080008000ULL;
- uint64_t c = val & m;
- c |= c << 15;
- c |= c << 30;
- return c >> 60;
-}
-
-void HELPER(vmskltz_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- uint16_t temp = 0;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp = do_vmskltz_h(Vj->D(0));
- temp |= (do_vmskltz_h(Vj->D(1)) << 4);
- Vd->D(0) = temp;
- Vd->D(1) = 0;
-}
-
-static uint64_t do_vmskltz_w(int64_t val)
-{
- uint64_t m = 0x8000000080000000ULL;
- uint64_t c = val & m;
- c |= c << 31;
- return c >> 62;
-}
-
-void HELPER(vmskltz_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- uint16_t temp = 0;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp = do_vmskltz_w(Vj->D(0));
- temp |= (do_vmskltz_w(Vj->D(1)) << 2);
- Vd->D(0) = temp;
- Vd->D(1) = 0;
-}
-
-static uint64_t do_vmskltz_d(int64_t val)
-{
- return (uint64_t)val >> 63;
-}
-void HELPER(vmskltz_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- uint16_t temp = 0;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp = do_vmskltz_d(Vj->D(0));
- temp |= (do_vmskltz_d(Vj->D(1)) << 1);
- Vd->D(0) = temp;
- Vd->D(1) = 0;
-}
-
-void HELPER(vmskgez_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- uint16_t temp = 0;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp = do_vmskltz_b(Vj->D(0));
- temp |= (do_vmskltz_b(Vj->D(1)) << 8);
- Vd->D(0) = (uint16_t)(~temp);
- Vd->D(1) = 0;
-}
-
-static uint64_t do_vmskez_b(uint64_t a)
-{
- uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
- uint64_t c = ~(((a & m) + m) | a | m);
- c |= c << 7;
- c |= c << 14;
- c |= c << 28;
- return c >> 56;
-}
-
-void HELPER(vmsknz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- uint16_t temp = 0;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp = do_vmskez_b(Vj->D(0));
- temp |= (do_vmskez_b(Vj->D(1)) << 8);
- Vd->D(0) = (uint16_t)(~temp);
- Vd->D(1) = 0;
-}
-
-void HELPER(vnori_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
-{
- int i;
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
-
- for (i = 0; i < LSX_LEN/8; i++) {
- Vd->B(i) = ~(Vj->B(i) | (uint8_t)imm);
- }
-}
-
-#define VSLLWIL(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- typedef __typeof(temp.E1(0)) TD; \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = (TD)Vj->E2(i) << (imm % BIT); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vextl_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- Vd->Q(0) = int128_makes64(Vj->D(0));
-}
-
-void HELPER(vextl_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- Vd->Q(0) = int128_make64(Vj->D(0));
-}
-
-VSLLWIL(vsllwil_h_b, 16, H, B)
-VSLLWIL(vsllwil_w_h, 32, W, H)
-VSLLWIL(vsllwil_d_w, 64, D, W)
-VSLLWIL(vsllwil_hu_bu, 16, UH, UB)
-VSLLWIL(vsllwil_wu_hu, 32, UW, UH)
-VSLLWIL(vsllwil_du_wu, 64, UD, UW)
-
-#define do_vsrlr(E, T) \
-static T do_vsrlr_ ##E(T s1, int sh) \
-{ \
- if (sh == 0) { \
- return s1; \
- } else { \
- return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
- } \
-}
-
-do_vsrlr(B, uint8_t)
-do_vsrlr(H, uint16_t)
-do_vsrlr(W, uint32_t)
-do_vsrlr(D, uint64_t)
-
-#define VSRLR(NAME, BIT, T, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
- } \
-}
-
-VSRLR(vsrlr_b, 8, uint8_t, B)
-VSRLR(vsrlr_h, 16, uint16_t, H)
-VSRLR(vsrlr_w, 32, uint32_t, W)
-VSRLR(vsrlr_d, 64, uint64_t, D)
-
-#define VSRLRI(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
- } \
-}
-
-VSRLRI(vsrlri_b, 8, B)
-VSRLRI(vsrlri_h, 16, H)
-VSRLRI(vsrlri_w, 32, W)
-VSRLRI(vsrlri_d, 64, D)
-
-#define do_vsrar(E, T) \
-static T do_vsrar_ ##E(T s1, int sh) \
-{ \
- if (sh == 0) { \
- return s1; \
- } else { \
- return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
- } \
-}
-
-do_vsrar(B, int8_t)
-do_vsrar(H, int16_t)
-do_vsrar(W, int32_t)
-do_vsrar(D, int64_t)
-
-#define VSRAR(NAME, BIT, T, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
- } \
-}
-
-VSRAR(vsrar_b, 8, uint8_t, B)
-VSRAR(vsrar_h, 16, uint16_t, H)
-VSRAR(vsrar_w, 32, uint32_t, W)
-VSRAR(vsrar_d, 64, uint64_t, D)
-
-#define VSRARI(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
- } \
-}
-
-VSRARI(vsrari_b, 8, B)
-VSRARI(vsrari_h, 16, H)
-VSRARI(vsrari_w, 32, W)
-VSRARI(vsrari_d, 64, D)
-
-#define R_SHIFT(a, b) (a >> b)
-
-#define VSRLN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSRLN(vsrln_b_h, 16, uint16_t, B, H)
-VSRLN(vsrln_h_w, 32, uint32_t, H, W)
-VSRLN(vsrln_w_d, 64, uint64_t, W, D)
-
-#define VSRAN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSRAN(vsran_b_h, 16, uint16_t, B, H)
-VSRAN(vsran_h_w, 32, uint32_t, H, W)
-VSRAN(vsran_w_d, 64, uint64_t, W, D)
-
-#define VSRLNI(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i, max; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- max = LSX_LEN/BIT; \
- for (i = 0; i < max; i++) { \
- temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \
- temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vsrlni_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp.D(0) = 0;
- temp.D(1) = 0;
- temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128));
- temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128));
- *Vd = temp;
-}
-
-VSRLNI(vsrlni_b_h, 16, uint16_t, B, H)
-VSRLNI(vsrlni_h_w, 32, uint32_t, H, W)
-VSRLNI(vsrlni_w_d, 64, uint64_t, W, D)
-
-#define VSRANI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i, max; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- max = LSX_LEN/BIT; \
- for (i = 0; i < max; i++) { \
- temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \
- temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vsrani_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp.D(0) = 0;
- temp.D(1) = 0;
- temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128));
- temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128));
- *Vd = temp;
-}
-
-VSRANI(vsrani_b_h, 16, B, H)
-VSRANI(vsrani_h_w, 32, H, W)
-VSRANI(vsrani_w_d, 64, W, D)
-
-#define VSRLRN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_vsrlr_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSRLRN(vsrlrn_b_h, 16, uint16_t, B, H)
-VSRLRN(vsrlrn_h_w, 32, uint32_t, H, W)
-VSRLRN(vsrlrn_w_d, 64, uint64_t, W, D)
-
-#define VSRARN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_vsrar_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSRARN(vsrarn_b_h, 16, uint8_t, B, H)
-VSRARN(vsrarn_h_w, 32, uint16_t, H, W)
-VSRARN(vsrarn_w_d, 64, uint32_t, W, D)
-
-#define VSRLRNI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i, max; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- max = LSX_LEN/BIT; \
- for (i = 0; i < max; i++) { \
- temp.E1(i) = do_vsrlr_ ## E2(Vj->E2(i), imm); \
- temp.E1(i + max) = do_vsrlr_ ## E2(Vd->E2(i), imm); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vsrlrni_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- Int128 r1, r2;
-
- if (imm == 0) {
- temp.D(0) = int128_getlo(Vj->Q(0));
- temp.D(1) = int128_getlo(Vd->Q(0));
- } else {
- r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one());
- r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one());
-
- temp.D(0) = int128_getlo(int128_add(int128_urshift(Vj->Q(0), imm), r1));
- temp.D(1) = int128_getlo(int128_add(int128_urshift(Vd->Q(0), imm), r2));
- }
- *Vd = temp;
-}
-
-VSRLRNI(vsrlrni_b_h, 16, B, H)
-VSRLRNI(vsrlrni_h_w, 32, H, W)
-VSRLRNI(vsrlrni_w_d, 64, W, D)
-
-#define VSRARNI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i, max; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- temp.D(0) = 0; \
- temp.D(1) = 0; \
- max = LSX_LEN/BIT; \
- for (i = 0; i < max; i++) { \
- temp.E1(i) = do_vsrar_ ## E2(Vj->E2(i), imm); \
- temp.E1(i + max) = do_vsrar_ ## E2(Vd->E2(i), imm); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vsrarni_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- Int128 r1, r2;
-
- if (imm == 0) {
- temp.D(0) = int128_getlo(Vj->Q(0));
- temp.D(1) = int128_getlo(Vd->Q(0));
- } else {
- r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
- r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
-
- temp.D(0) = int128_getlo(int128_add(int128_rshift(Vj->Q(0), imm), r1));
- temp.D(1) = int128_getlo(int128_add(int128_rshift(Vd->Q(0), imm), r2));
- }
- *Vd = temp;
-}
-
-VSRARNI(vsrarni_b_h, 16, B, H)
-VSRARNI(vsrarni_h_w, 32, H, W)
-VSRARNI(vsrarni_w_d, 64, W, D)
-
-#define SSRLNS(NAME, T1, T2, T3) \
-static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- if (sa == 0) { \
- shft_res = e2; \
- } else { \
- shft_res = (((T1)e2) >> sa); \
- } \
- T3 mask; \
- mask = (1ull << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRLNS(B, uint16_t, int16_t, uint8_t)
-SSRLNS(H, uint32_t, int32_t, uint16_t)
-SSRLNS(W, uint64_t, int64_t, uint32_t)
-
-#define VSSRLN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrlns_ ## E1(Vj->E2(i), (T)Vk->E2(i)% BIT, BIT/2 -1); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRLN(vssrln_b_h, 16, uint16_t, B, H)
-VSSRLN(vssrln_h_w, 32, uint32_t, H, W)
-VSSRLN(vssrln_w_d, 64, uint64_t, W, D)
-
-#define SSRANS(E, T1, T2) \
-static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- if (sa == 0) { \
- shft_res = e2; \
- } else { \
- shft_res = e2 >> sa; \
- } \
- T2 mask; \
- mask = (1ll << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else if (shft_res < -(mask +1)) { \
- return ~mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRANS(B, int16_t, int8_t)
-SSRANS(H, int32_t, int16_t)
-SSRANS(W, int64_t, int32_t)
-
-#define VSSRAN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrans_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRAN(vssran_b_h, 16, uint16_t, B, H)
-VSSRAN(vssran_h_w, 32, uint32_t, H, W)
-VSSRAN(vssran_w_d, 64, uint64_t, W, D)
-
-#define SSRLNU(E, T1, T2, T3) \
-static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- if (sa == 0) { \
- shft_res = e2; \
- } else { \
- shft_res = (((T1)e2) >> sa); \
- } \
- T2 mask; \
- mask = (1ull << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRLNU(B, uint16_t, uint8_t, int16_t)
-SSRLNU(H, uint32_t, uint16_t, int32_t)
-SSRLNU(W, uint64_t, uint32_t, int64_t)
-
-#define VSSRLNU(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRLNU(vssrln_bu_h, 16, uint16_t, B, H)
-VSSRLNU(vssrln_hu_w, 32, uint32_t, H, W)
-VSSRLNU(vssrln_wu_d, 64, uint64_t, W, D)
-
-#define SSRANU(E, T1, T2, T3) \
-static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- if (sa == 0) { \
- shft_res = e2; \
- } else { \
- shft_res = e2 >> sa; \
- } \
- if (e2 < 0) { \
- shft_res = 0; \
- } \
- T2 mask; \
- mask = (1ull << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRANU(B, uint16_t, uint8_t, int16_t)
-SSRANU(H, uint32_t, uint16_t, int32_t)
-SSRANU(W, uint64_t, uint32_t, int64_t)
-
-#define VSSRANU(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssranu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRANU(vssran_bu_h, 16, uint16_t, B, H)
-VSSRANU(vssran_hu_w, 32, uint32_t, H, W)
-VSSRANU(vssran_wu_d, 64, uint64_t, W, D)
-
-#define VSSRLNI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrlns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrlns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
- } \
- *Vd = temp; \
-}
-
-void HELPER(vssrlni_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- Int128 shft_res1, shft_res2, mask;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- if (imm == 0) {
- shft_res1 = Vj->Q(0);
- shft_res2 = Vd->Q(0);
- } else {
- shft_res1 = int128_urshift(Vj->Q(0), imm);
- shft_res2 = int128_urshift(Vd->Q(0), imm);
- }
- mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
-
- if (int128_ult(mask, shft_res1)) {
- Vd->D(0) = int128_getlo(mask);
- }else {
- Vd->D(0) = int128_getlo(shft_res1);
- }
-
- if (int128_ult(mask, shft_res2)) {
- Vd->D(1) = int128_getlo(mask);
- }else {
- Vd->D(1) = int128_getlo(shft_res2);
- }
-}
-
-VSSRLNI(vssrlni_b_h, 16, B, H)
-VSSRLNI(vssrlni_h_w, 32, H, W)
-VSSRLNI(vssrlni_w_d, 64, W, D)
-
-#define VSSRANI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrans_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrans_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vssrani_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- Int128 shft_res1, shft_res2, mask, min;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- if (imm == 0) {
- shft_res1 = Vj->Q(0);
- shft_res2 = Vd->Q(0);
- } else {
- shft_res1 = int128_rshift(Vj->Q(0), imm);
- shft_res2 = int128_rshift(Vd->Q(0), imm);
- }
- mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
- min = int128_lshift(int128_one(), 63);
-
- if (int128_gt(shft_res1, mask)) {
- Vd->D(0) = int128_getlo(mask);
- } else if (int128_lt(shft_res1, int128_neg(min))) {
- Vd->D(0) = int128_getlo(min);
- } else {
- Vd->D(0) = int128_getlo(shft_res1);
- }
-
- if (int128_gt(shft_res2, mask)) {
- Vd->D(1) = int128_getlo(mask);
- } else if (int128_lt(shft_res2, int128_neg(min))) {
- Vd->D(1) = int128_getlo(min);
- } else {
- Vd->D(1) = int128_getlo(shft_res2);
- }
-}
-
-VSSRANI(vssrani_b_h, 16, B, H)
-VSSRANI(vssrani_h_w, 32, H, W)
-VSSRANI(vssrani_w_d, 64, W, D)
-
-#define VSSRLNUI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), imm, BIT/2); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrlnu_ ## E1(Vd->E2(i), imm, BIT/2); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vssrlni_du_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- Int128 shft_res1, shft_res2, mask;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- if (imm == 0) {
- shft_res1 = Vj->Q(0);
- shft_res2 = Vd->Q(0);
- } else {
- shft_res1 = int128_urshift(Vj->Q(0), imm);
- shft_res2 = int128_urshift(Vd->Q(0), imm);
- }
- mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
-
- if (int128_ult(mask, shft_res1)) {
- Vd->D(0) = int128_getlo(mask);
- }else {
- Vd->D(0) = int128_getlo(shft_res1);
- }
-
- if (int128_ult(mask, shft_res2)) {
- Vd->D(1) = int128_getlo(mask);
- }else {
- Vd->D(1) = int128_getlo(shft_res2);
- }
-}
-
-VSSRLNUI(vssrlni_bu_h, 16, B, H)
-VSSRLNUI(vssrlni_hu_w, 32, H, W)
-VSSRLNUI(vssrlni_wu_d, 64, W, D)
-
-#define VSSRANUI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssranu_ ## E1(Vj->E2(i), imm, BIT/2); \
- temp.E1(i + LSX_LEN/BIT) = do_ssranu_ ## E1(Vd->E2(i), imm, BIT/2); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vssrani_du_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- Int128 shft_res1, shft_res2, mask;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- if (imm == 0) {
- shft_res1 = Vj->Q(0);
- shft_res2 = Vd->Q(0);
- } else {
- shft_res1 = int128_rshift(Vj->Q(0), imm);
- shft_res2 = int128_rshift(Vd->Q(0), imm);
- }
-
- if (int128_lt(Vj->Q(0), int128_zero())) {
- shft_res1 = int128_zero();
- }
-
- if (int128_lt(Vd->Q(0), int128_zero())) {
- shft_res2 = int128_zero();
- }
-
- mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
-
- if (int128_ult(mask, shft_res1)) {
- Vd->D(0) = int128_getlo(mask);
- }else {
- Vd->D(0) = int128_getlo(shft_res1);
- }
-
- if (int128_ult(mask, shft_res2)) {
- Vd->D(1) = int128_getlo(mask);
- }else {
- Vd->D(1) = int128_getlo(shft_res2);
- }
-}
-
-VSSRANUI(vssrani_bu_h, 16, B, H)
-VSSRANUI(vssrani_hu_w, 32, H, W)
-VSSRANUI(vssrani_wu_d, 64, W, D)
-
-#define SSRLRNS(E1, E2, T1, T2, T3) \
-static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- \
- shft_res = do_vsrlr_ ## E2(e2, sa); \
- T1 mask; \
- mask = (1ull << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRLRNS(B, H, uint16_t, int16_t, uint8_t)
-SSRLRNS(H, W, uint32_t, int32_t, uint16_t)
-SSRLRNS(W, D, uint64_t, int64_t, uint32_t)
-
-#define VSSRLRN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRLRN(vssrlrn_b_h, 16, uint16_t, B, H)
-VSSRLRN(vssrlrn_h_w, 32, uint32_t, H, W)
-VSSRLRN(vssrlrn_w_d, 64, uint64_t, W, D)
-
-#define SSRARNS(E1, E2, T1, T2) \
-static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- \
- shft_res = do_vsrar_ ## E2(e2, sa); \
- T2 mask; \
- mask = (1ll << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else if (shft_res < -(mask +1)) { \
- return ~mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRARNS(B, H, int16_t, int8_t)
-SSRARNS(H, W, int32_t, int16_t)
-SSRARNS(W, D, int64_t, int32_t)
-
-#define VSSRARN(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrarns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRARN(vssrarn_b_h, 16, uint16_t, B, H)
-VSSRARN(vssrarn_h_w, 32, uint32_t, H, W)
-VSSRARN(vssrarn_w_d, 64, uint64_t, W, D)
-
-#define SSRLRNU(E1, E2, T1, T2, T3) \
-static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- \
- shft_res = do_vsrlr_ ## E2(e2, sa); \
- \
- T2 mask; \
- mask = (1ull << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRLRNU(B, H, uint16_t, uint8_t, int16_t)
-SSRLRNU(H, W, uint32_t, uint16_t, int32_t)
-SSRLRNU(W, D, uint64_t, uint32_t, int64_t)
-
-#define VSSRLRNU(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRLRNU(vssrlrn_bu_h, 16, uint16_t, B, H)
-VSSRLRNU(vssrlrn_hu_w, 32, uint32_t, H, W)
-VSSRLRNU(vssrlrn_wu_d, 64, uint64_t, W, D)
-
-#define SSRARNU(E1, E2, T1, T2, T3) \
-static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \
-{ \
- T1 shft_res; \
- \
- if (e2 < 0) { \
- shft_res = 0; \
- } else { \
- shft_res = do_vsrar_ ## E2(e2, sa); \
- } \
- T2 mask; \
- mask = (1ull << sh) -1; \
- if (shft_res > mask) { \
- return mask; \
- } else { \
- return shft_res; \
- } \
-}
-
-SSRARNU(B, H, uint16_t, uint8_t, int16_t)
-SSRARNU(H, W, uint32_t, uint16_t, int32_t)
-SSRARNU(W, D, uint64_t, uint32_t, int64_t)
-
-#define VSSRARNU(NAME, BIT, T, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
- } \
- Vd->D(1) = 0; \
-}
-
-VSSRARNU(vssrarn_bu_h, 16, uint16_t, B, H)
-VSSRARNU(vssrarn_hu_w, 32, uint32_t, H, W)
-VSSRARNU(vssrarn_wu_d, 64, uint64_t, W, D)
-
-#define VSSRLRNI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrlrns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
- } \
- *Vd = temp; \
-}
-
-#define VSSRLRNI_Q(NAME, sh) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- Int128 shft_res1, shft_res2, mask, r1, r2; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- if (imm == 0) { \
- shft_res1 = Vj->Q(0); \
- shft_res2 = Vd->Q(0); \
- } else { \
- r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); \
- r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); \
- \
- shft_res1 = (int128_add(int128_urshift(Vj->Q(0), imm), r1)); \
- shft_res2 = (int128_add(int128_urshift(Vd->Q(0), imm), r2)); \
- } \
- \
- mask = int128_sub(int128_lshift(int128_one(), sh), int128_one()); \
- \
- if (int128_ult(mask, shft_res1)) { \
- Vd->D(0) = int128_getlo(mask); \
- }else { \
- Vd->D(0) = int128_getlo(shft_res1); \
- } \
- \
- if (int128_ult(mask, shft_res2)) { \
- Vd->D(1) = int128_getlo(mask); \
- }else { \
- Vd->D(1) = int128_getlo(shft_res2); \
- } \
-}
-
-VSSRLRNI(vssrlrni_b_h, 16, B, H)
-VSSRLRNI(vssrlrni_h_w, 32, H, W)
-VSSRLRNI(vssrlrni_w_d, 64, W, D)
-VSSRLRNI_Q(vssrlrni_d_q, 63)
-
-#define VSSRARNI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrarns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrarns_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vssrarni_d_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- if (imm == 0) {
- shft_res1 = Vj->Q(0);
- shft_res2 = Vd->Q(0);
- } else {
- r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
- r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
-
- shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
- shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
- }
-
- mask1 = int128_sub(int128_lshift(int128_one(), 63), int128_one());
- mask2 = int128_lshift(int128_one(), 63);
-
- if (int128_gt(shft_res1, mask1)) {
- Vd->D(0) = int128_getlo(mask1);
- } else if (int128_lt(shft_res1, int128_neg(mask2))) {
- Vd->D(0) = int128_getlo(mask2);
- } else {
- Vd->D(0) = int128_getlo(shft_res1);
- }
-
- if (int128_gt(shft_res2, mask1)) {
- Vd->D(1) = int128_getlo(mask1);
- } else if (int128_lt(shft_res2, int128_neg(mask2))) {
- Vd->D(1) = int128_getlo(mask2);
- } else {
- Vd->D(1) = int128_getlo(shft_res2);
- }
-}
-
-VSSRARNI(vssrarni_b_h, 16, B, H)
-VSSRARNI(vssrarni_h_w, 32, H, W)
-VSSRARNI(vssrarni_w_d, 64, W, D)
-
-#define VSSRLRNUI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), imm, BIT/2); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrlrnu_ ## E1(Vd->E2(i), imm, BIT/2); \
- } \
- *Vd = temp; \
-}
-
-VSSRLRNUI(vssrlrni_bu_h, 16, B, H)
-VSSRLRNUI(vssrlrni_hu_w, 32, H, W)
-VSSRLRNUI(vssrlrni_wu_d, 64, W, D)
-VSSRLRNI_Q(vssrlrni_du_q, 64)
-
-#define VSSRARNUI(NAME, BIT, E1, E2) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), imm, BIT/2); \
- temp.E1(i + LSX_LEN/BIT) = do_ssrarnu_ ## E1(Vd->E2(i), imm, BIT/2); \
- } \
- *Vd = temp; \
-}
-
-void HELPER(vssrarni_du_q)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- if (imm == 0) {
- shft_res1 = Vj->Q(0);
- shft_res2 = Vd->Q(0);
- } else {
- r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
- r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
-
- shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
- shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
- }
-
- if (int128_lt(Vj->Q(0), int128_zero())) {
- shft_res1 = int128_zero();
- }
- if (int128_lt(Vd->Q(0), int128_zero())) {
- shft_res2 = int128_zero();
- }
-
- mask1 = int128_sub(int128_lshift(int128_one(), 64), int128_one());
- mask2 = int128_lshift(int128_one(), 64);
-
- if (int128_gt(shft_res1, mask1)) {
- Vd->D(0) = int128_getlo(mask1);
- } else if (int128_lt(shft_res1, int128_neg(mask2))) {
- Vd->D(0) = int128_getlo(mask2);
- } else {
- Vd->D(0) = int128_getlo(shft_res1);
- }
-
- if (int128_gt(shft_res2, mask1)) {
- Vd->D(1) = int128_getlo(mask1);
- } else if (int128_lt(shft_res2, int128_neg(mask2))) {
- Vd->D(1) = int128_getlo(mask2);
- } else {
- Vd->D(1) = int128_getlo(shft_res2);
- }
-}
-
-VSSRARNUI(vssrarni_bu_h, 16, B, H)
-VSSRARNUI(vssrarni_hu_w, 32, H, W)
-VSSRARNUI(vssrarni_wu_d, 64, W, D)
-
-#define DO_2OP(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) \
- { \
- Vd->E(i) = DO_OP(Vj->E(i)); \
- } \
-}
-
-#define DO_CLO_B(N) (clz32(~N & 0xff) - 24)
-#define DO_CLO_H(N) (clz32(~N & 0xffff) - 16)
-#define DO_CLO_W(N) (clz32(~N))
-#define DO_CLO_D(N) (clz64(~N))
-#define DO_CLZ_B(N) (clz32(N) - 24)
-#define DO_CLZ_H(N) (clz32(N) - 16)
-#define DO_CLZ_W(N) (clz32(N))
-#define DO_CLZ_D(N) (clz64(N))
-
-DO_2OP(vclo_b, 8, UB, DO_CLO_B)
-DO_2OP(vclo_h, 16, UH, DO_CLO_H)
-DO_2OP(vclo_w, 32, UW, DO_CLO_W)
-DO_2OP(vclo_d, 64, UD, DO_CLO_D)
-DO_2OP(vclz_b, 8, UB, DO_CLZ_B)
-DO_2OP(vclz_h, 16, UH, DO_CLZ_H)
-DO_2OP(vclz_w, 32, UW, DO_CLZ_W)
-DO_2OP(vclz_d, 64, UD, DO_CLZ_D)
-
-#define VPCNT(NAME, BIT, E, FN) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) \
- { \
- Vd->E(i) = FN(Vj->E(i)); \
- } \
-}
-
-VPCNT(vpcnt_b, 8, UB, ctpop8)
-VPCNT(vpcnt_h, 16, UH, ctpop16)
-VPCNT(vpcnt_w, 32, UW, ctpop32)
-VPCNT(vpcnt_d, 64, UD, ctpop64)
-
-#define DO_BITCLR(a, bit) (a & ~(1ull << bit))
-#define DO_BITSET(a, bit) (a | 1ull << bit)
-#define DO_BITREV(a, bit) (a ^ (1ull << bit))
-
-#define DO_BIT(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- VReg *Vk = (VReg *)vk; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \
- } \
-}
-
-DO_BIT(vbitclr_b, 8, UB, DO_BITCLR)
-DO_BIT(vbitclr_h, 16, UH, DO_BITCLR)
-DO_BIT(vbitclr_w, 32, UW, DO_BITCLR)
-DO_BIT(vbitclr_d, 64, UD, DO_BITCLR)
-DO_BIT(vbitset_b, 8, UB, DO_BITSET)
-DO_BIT(vbitset_h, 16, UH, DO_BITSET)
-DO_BIT(vbitset_w, 32, UW, DO_BITSET)
-DO_BIT(vbitset_d, 64, UD, DO_BITSET)
-DO_BIT(vbitrev_b, 8, UB, DO_BITREV)
-DO_BIT(vbitrev_h, 16, UH, DO_BITREV)
-DO_BIT(vbitrev_w, 32, UW, DO_BITREV)
-DO_BIT(vbitrev_d, 64, UD, DO_BITREV)
-
-#define DO_BITI(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i), imm); \
- } \
-}
-
-DO_BITI(vbitclri_b, 8, UB, DO_BITCLR)
-DO_BITI(vbitclri_h, 16, UH, DO_BITCLR)
-DO_BITI(vbitclri_w, 32, UW, DO_BITCLR)
-DO_BITI(vbitclri_d, 64, UD, DO_BITCLR)
-DO_BITI(vbitseti_b, 8, UB, DO_BITSET)
-DO_BITI(vbitseti_h, 16, UH, DO_BITSET)
-DO_BITI(vbitseti_w, 32, UW, DO_BITSET)
-DO_BITI(vbitseti_d, 64, UD, DO_BITSET)
-DO_BITI(vbitrevi_b, 8, UB, DO_BITREV)
-DO_BITI(vbitrevi_h, 16, UH, DO_BITREV)
-DO_BITI(vbitrevi_w, 32, UW, DO_BITREV)
-DO_BITI(vbitrevi_d, 64, UD, DO_BITREV)
-
-#define VFRSTP(NAME, BIT, MASK, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i, m; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- if (Vj->E(i) < 0) { \
- break; \
- } \
- } \
- m = Vk->E(0) & MASK; \
- Vd->E(m) = i; \
-}
-
-VFRSTP(vfrstp_b, 8, 0xf, B)
-VFRSTP(vfrstp_h, 16, 0x7, H)
-
-#define VFRSTPI(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i, m; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- if (Vj->E(i) < 0) { \
- break; \
- } \
- } \
- m = imm % (LSX_LEN/BIT); \
- Vd->E(m) = i; \
-}
-
-VFRSTPI(vfrstpi_b, 8, B)
-VFRSTPI(vfrstpi_h, 16, H)
-
-static void vec_update_fcsr0_mask(CPULoongArchState *env,
- uintptr_t pc, int mask)
-{
- int flags = get_float_exception_flags(&env->fp_status);
-
- set_float_exception_flags(0, &env->fp_status);
-
- flags &= ~mask;
-
- if (flags) {
- flags = ieee_ex_to_loongarch(flags);
- UPDATE_FP_CAUSE(env->fcsr0, flags);
- }
-
- if (GET_FP_ENABLES(env->fcsr0) & flags) {
- do_raise_exception(env, EXCCODE_FPE, pc);
- } else {
- UPDATE_FP_FLAGS(env->fcsr0, flags);
- }
-}
-
-static void vec_update_fcsr0(CPULoongArchState *env, uintptr_t pc)
-{
- vec_update_fcsr0_mask(env, pc, 0);
-}
-
-static inline void vec_clear_cause(CPULoongArchState *env)
-{
- SET_FP_CAUSE(env->fcsr0, 0);
-}
-
-#define DO_3OP_F(NAME, BIT, E, FN) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- } \
-}
-
-DO_3OP_F(vfadd_s, 32, UW, float32_add)
-DO_3OP_F(vfadd_d, 64, UD, float64_add)
-DO_3OP_F(vfsub_s, 32, UW, float32_sub)
-DO_3OP_F(vfsub_d, 64, UD, float64_sub)
-DO_3OP_F(vfmul_s, 32, UW, float32_mul)
-DO_3OP_F(vfmul_d, 64, UD, float64_mul)
-DO_3OP_F(vfdiv_s, 32, UW, float32_div)
-DO_3OP_F(vfdiv_d, 64, UD, float64_div)
-DO_3OP_F(vfmax_s, 32, UW, float32_maxnum)
-DO_3OP_F(vfmax_d, 64, UD, float64_maxnum)
-DO_3OP_F(vfmin_s, 32, UW, float32_minnum)
-DO_3OP_F(vfmin_d, 64, UD, float64_minnum)
-DO_3OP_F(vfmaxa_s, 32, UW, float32_maxnummag)
-DO_3OP_F(vfmaxa_d, 64, UD, float64_maxnummag)
-DO_3OP_F(vfmina_s, 32, UW, float32_minnummag)
-DO_3OP_F(vfmina_d, 64, UD, float64_minnummag)
-
-#define DO_4OP_F(NAME, BIT, E, FN, flags) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- VReg *Va = &(env->fpr[va].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- } \
-}
-
-DO_4OP_F(vfmadd_s, 32, UW, float32_muladd, 0)
-DO_4OP_F(vfmadd_d, 64, UD, float64_muladd, 0)
-DO_4OP_F(vfmsub_s, 32, UW, float32_muladd, float_muladd_negate_c)
-DO_4OP_F(vfmsub_d, 64, UD, float64_muladd, float_muladd_negate_c)
-DO_4OP_F(vfnmadd_s, 32, UW, float32_muladd, float_muladd_negate_result)
-DO_4OP_F(vfnmadd_d, 64, UD, float64_muladd, float_muladd_negate_result)
-DO_4OP_F(vfnmsub_s, 32, UW, float32_muladd,
- float_muladd_negate_c | float_muladd_negate_result)
-DO_4OP_F(vfnmsub_d, 64, UD, float64_muladd,
- float_muladd_negate_c | float_muladd_negate_result)
-
-#define DO_2OP_F(NAME, BIT, E, FN) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = FN(env, Vj->E(i)); \
- } \
-}
-
-#define FLOGB(BIT, T) \
-static T do_flogb_## BIT(CPULoongArchState *env, T fj) \
-{ \
- T fp, fd; \
- float_status *status = &env->fp_status; \
- FloatRoundMode old_mode = get_float_rounding_mode(status); \
- \
- set_float_rounding_mode(float_round_down, status); \
- fp = float ## BIT ##_log2(fj, status); \
- fd = float ## BIT ##_round_to_int(fp, status); \
- set_float_rounding_mode(old_mode, status); \
- vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \
- return fd; \
-}
-
-FLOGB(32, uint32_t)
-FLOGB(64, uint64_t)
-
-#define FCLASS(NAME, BIT, E, FN) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = FN(env, Vj->E(i)); \
- } \
-}
-
-FCLASS(vfclass_s, 32, UW, helper_fclass_s)
-FCLASS(vfclass_d, 64, UD, helper_fclass_d)
-
-#define FSQRT(BIT, T) \
-static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \
-{ \
- T fd; \
- fd = float ## BIT ##_sqrt(fj, &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- return fd; \
-}
-
-FSQRT(32, uint32_t)
-FSQRT(64, uint64_t)
-
-#define FRECIP(BIT, T) \
-static T do_frecip_## BIT(CPULoongArchState *env, T fj) \
-{ \
- T fd; \
- fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- return fd; \
-}
-
-FRECIP(32, uint32_t)
-FRECIP(64, uint64_t)
-
-#define FRSQRT(BIT, T) \
-static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \
-{ \
- T fd, fp; \
- fp = float ## BIT ##_sqrt(fj, &env->fp_status); \
- fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- return fd; \
-}
-
-FRSQRT(32, uint32_t)
-FRSQRT(64, uint64_t)
-
-DO_2OP_F(vflogb_s, 32, UW, do_flogb_32)
-DO_2OP_F(vflogb_d, 64, UD, do_flogb_64)
-DO_2OP_F(vfsqrt_s, 32, UW, do_fsqrt_32)
-DO_2OP_F(vfsqrt_d, 64, UD, do_fsqrt_64)
-DO_2OP_F(vfrecip_s, 32, UW, do_frecip_32)
-DO_2OP_F(vfrecip_d, 64, UD, do_frecip_64)
-DO_2OP_F(vfrsqrt_s, 32, UW, do_frsqrt_32)
-DO_2OP_F(vfrsqrt_d, 64, UD, do_frsqrt_64)
-
-static uint32_t float16_cvt_float32(uint16_t h, float_status *status)
-{
- return float16_to_float32(h, true, status);
-}
-static uint64_t float32_cvt_float64(uint32_t s, float_status *status)
-{
- return float32_to_float64(s, status);
-}
-
-static uint16_t float32_cvt_float16(uint32_t s, float_status *status)
-{
- return float32_to_float16(s, true, status);
-}
-static uint32_t float64_cvt_float32(uint64_t d, float_status *status)
-{
- return float64_to_float32(d, status);
-}
-
-void HELPER(vfcvtl_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < LSX_LEN/32; i++) {
- temp.UW(i) = float16_cvt_float32(Vj->UH(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vfcvtl_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < LSX_LEN/64; i++) {
- temp.UD(i) = float32_cvt_float64(Vj->UW(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vfcvth_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < LSX_LEN/32; i++) {
- temp.UW(i) = float16_cvt_float32(Vj->UH(i + 4), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vfcvth_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < LSX_LEN/64; i++) {
- temp.UD(i) = float32_cvt_float64(Vj->UW(i + 2), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vfcvt_h_s)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- vec_clear_cause(env);
- for(i = 0; i < LSX_LEN/32; i++) {
- temp.UH(i + 4) = float32_cvt_float16(Vj->UW(i), &env->fp_status);
- temp.UH(i) = float32_cvt_float16(Vk->UW(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vfcvt_s_d)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- vec_clear_cause(env);
- for(i = 0; i < LSX_LEN/64; i++) {
- temp.UW(i + 2) = float64_cvt_float32(Vj->UD(i), &env->fp_status);
- temp.UW(i) = float64_cvt_float32(Vk->UD(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vfrint_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < 4; i++) {
- Vd->W(i) = float32_round_to_int(Vj->UW(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
-}
-
-void HELPER(vfrint_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < 2; i++) {
- Vd->D(i) = float64_round_to_int(Vj->UD(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
-}
-
-#define FCVT_2OP(NAME, BIT, E, MODE) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
- set_float_rounding_mode(MODE, &env->fp_status); \
- Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \
- set_float_rounding_mode(old_mode, &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- } \
-}
-
-FCVT_2OP(vfrintrne_s, 32, UW, float_round_nearest_even)
-FCVT_2OP(vfrintrne_d, 64, UD, float_round_nearest_even)
-FCVT_2OP(vfrintrz_s, 32, UW, float_round_to_zero)
-FCVT_2OP(vfrintrz_d, 64, UD, float_round_to_zero)
-FCVT_2OP(vfrintrp_s, 32, UW, float_round_up)
-FCVT_2OP(vfrintrp_d, 64, UD, float_round_up)
-FCVT_2OP(vfrintrm_s, 32, UW, float_round_down)
-FCVT_2OP(vfrintrm_d, 64, UD, float_round_down)
-
-#define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \
-static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \
-{ \
- T2 fd; \
- FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
- \
- set_float_rounding_mode(MODE, &env->fp_status); \
- fd = do_## FMT1 ##_to_## FMT2(env, fj); \
- set_float_rounding_mode(old_mode, &env->fp_status); \
- return fd; \
-}
-
-#define DO_FTINT(FMT1, FMT2, T1, T2) \
-static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \
-{ \
- T2 fd; \
- \
- fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
- if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \
- if (FMT1 ##_is_any_nan(fj)) { \
- fd = 0; \
- } \
- } \
- vec_update_fcsr0(env, GETPC()); \
- return fd; \
-}
-
-DO_FTINT(float32, int32, uint32_t, uint32_t)
-DO_FTINT(float64, int64, uint64_t, uint64_t)
-DO_FTINT(float32, uint32, uint32_t, uint32_t)
-DO_FTINT(float64, uint64, uint64_t, uint64_t)
-DO_FTINT(float64, int32, uint64_t, uint32_t)
-DO_FTINT(float32, int64, uint32_t, uint64_t)
-
-FTINT(rne_w_s, float32, int32, uint32_t, uint32_t, float_round_nearest_even)
-FTINT(rne_l_d, float64, int64, uint64_t, uint64_t, float_round_nearest_even)
-FTINT(rp_w_s, float32, int32, uint32_t, uint32_t, float_round_up)
-FTINT(rp_l_d, float64, int64, uint64_t, uint64_t, float_round_up)
-FTINT(rz_w_s, float32, int32, uint32_t, uint32_t, float_round_to_zero)
-FTINT(rz_l_d, float64, int64, uint64_t, uint64_t, float_round_to_zero)
-FTINT(rm_w_s, float32, int32, uint32_t, uint32_t, float_round_down)
-FTINT(rm_l_d, float64, int64, uint64_t, uint64_t, float_round_down)
-
-DO_2OP_F(vftintrne_w_s, 32, UW, do_ftintrne_w_s)
-DO_2OP_F(vftintrne_l_d, 64, UD, do_ftintrne_l_d)
-DO_2OP_F(vftintrp_w_s, 32, UW, do_ftintrp_w_s)
-DO_2OP_F(vftintrp_l_d, 64, UD, do_ftintrp_l_d)
-DO_2OP_F(vftintrz_w_s, 32, UW, do_ftintrz_w_s)
-DO_2OP_F(vftintrz_l_d, 64, UD, do_ftintrz_l_d)
-DO_2OP_F(vftintrm_w_s, 32, UW, do_ftintrm_w_s)
-DO_2OP_F(vftintrm_l_d, 64, UD, do_ftintrm_l_d)
-DO_2OP_F(vftint_w_s, 32, UW, do_float32_to_int32)
-DO_2OP_F(vftint_l_d, 64, UD, do_float64_to_int64)
-
-FTINT(rz_wu_s, float32, uint32, uint32_t, uint32_t, float_round_to_zero)
-FTINT(rz_lu_d, float64, uint64, uint64_t, uint64_t, float_round_to_zero)
-
-DO_2OP_F(vftintrz_wu_s, 32, UW, do_ftintrz_wu_s)
-DO_2OP_F(vftintrz_lu_d, 64, UD, do_ftintrz_lu_d)
-DO_2OP_F(vftint_wu_s, 32, UW, do_float32_to_uint32)
-DO_2OP_F(vftint_lu_d, 64, UD, do_float64_to_uint64)
-
-FTINT(rm_w_d, float64, int32, uint64_t, uint32_t, float_round_down)
-FTINT(rp_w_d, float64, int32, uint64_t, uint32_t, float_round_up)
-FTINT(rz_w_d, float64, int32, uint64_t, uint32_t, float_round_to_zero)
-FTINT(rne_w_d, float64, int32, uint64_t, uint32_t, float_round_nearest_even)
-
-#define FTINT_W_D(NAME, FN) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < 2; i++) { \
- temp.W(i + 2) = FN(env, Vj->UD(i)); \
- temp.W(i) = FN(env, Vk->UD(i)); \
- } \
- *Vd = temp; \
-}
-
-FTINT_W_D(vftint_w_d, do_float64_to_int32)
-FTINT_W_D(vftintrm_w_d, do_ftintrm_w_d)
-FTINT_W_D(vftintrp_w_d, do_ftintrp_w_d)
-FTINT_W_D(vftintrz_w_d, do_ftintrz_w_d)
-FTINT_W_D(vftintrne_w_d, do_ftintrne_w_d)
-
-FTINT(rml_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
-FTINT(rpl_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
-FTINT(rzl_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
-FTINT(rnel_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
-FTINT(rmh_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
-FTINT(rph_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
-FTINT(rzh_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
-FTINT(rneh_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
-
-#define FTINTL_L_S(NAME, FN) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < 2; i++) { \
- temp.D(i) = FN(env, Vj->UW(i)); \
- } \
- *Vd = temp; \
-}
-
-FTINTL_L_S(vftintl_l_s, do_float32_to_int64)
-FTINTL_L_S(vftintrml_l_s, do_ftintrml_l_s)
-FTINTL_L_S(vftintrpl_l_s, do_ftintrpl_l_s)
-FTINTL_L_S(vftintrzl_l_s, do_ftintrzl_l_s)
-FTINTL_L_S(vftintrnel_l_s, do_ftintrnel_l_s)
-
-#define FTINTH_L_S(NAME, FN) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < 2; i++) { \
- temp.D(i) = FN(env, Vj->UW(i + 2)); \
- } \
- *Vd = temp; \
-}
-
-FTINTH_L_S(vftinth_l_s, do_float32_to_int64)
-FTINTH_L_S(vftintrmh_l_s, do_ftintrmh_l_s)
-FTINTH_L_S(vftintrph_l_s, do_ftintrph_l_s)
-FTINTH_L_S(vftintrzh_l_s, do_ftintrzh_l_s)
-FTINTH_L_S(vftintrneh_l_s, do_ftintrneh_l_s)
-
-#define FFINT(NAME, FMT1, FMT2, T1, T2) \
-static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \
-{ \
- T2 fd; \
- \
- fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
- vec_update_fcsr0(env, GETPC()); \
- return fd; \
-}
-
-FFINT(s_w, int32, float32, int32_t, uint32_t)
-FFINT(d_l, int64, float64, int64_t, uint64_t)
-FFINT(s_wu, uint32, float32, uint32_t, uint32_t)
-FFINT(d_lu, uint64, float64, uint64_t, uint64_t)
-
-DO_2OP_F(vffint_s_w, 32, W, do_ffint_s_w)
-DO_2OP_F(vffint_d_l, 64, D, do_ffint_d_l)
-DO_2OP_F(vffint_s_wu, 32, UW, do_ffint_s_wu)
-DO_2OP_F(vffint_d_lu, 64, UD, do_ffint_d_lu)
-
-void HELPER(vffintl_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < 2; i++) {
- temp.D(i) = int32_to_float64(Vj->W(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vffinth_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < 2; i++) {
- temp.D(i) = int32_to_float64(Vj->W(i + 2), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-void HELPER(vffint_s_l)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk)
-{
- int i;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
-
- vec_clear_cause(env);
- for (i = 0; i < 2; i++) {
- temp.W(i + 2) = int64_to_float32(Vj->D(i), &env->fp_status);
- temp.W(i) = int64_to_float32(Vk->D(i), &env->fp_status);
- vec_update_fcsr0(env, GETPC());
- }
- *Vd = temp;
-}
-
-#define VSEQ(a, b) (a == b ? -1 : 0)
-#define VSLE(a, b) (a <= b ? -1 : 0)
-#define VSLT(a, b) (a < b ? -1 : 0)
-
-#define VCMPI(NAME, BIT, E, DO_OP) \
-void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
-{ \
- int i; \
- VReg *Vd = (VReg *)vd; \
- VReg *Vj = (VReg *)vj; \
- typedef __typeof(Vd->E(0)) TD; \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
- } \
-}
-
-VCMPI(vseqi_b, 8, B, VSEQ)
-VCMPI(vseqi_h, 16, H, VSEQ)
-VCMPI(vseqi_w, 32, W, VSEQ)
-VCMPI(vseqi_d, 64, D, VSEQ)
-VCMPI(vslei_b, 8, B, VSLE)
-VCMPI(vslei_h, 16, H, VSLE)
-VCMPI(vslei_w, 32, W, VSLE)
-VCMPI(vslei_d, 64, D, VSLE)
-VCMPI(vslei_bu, 8, UB, VSLE)
-VCMPI(vslei_hu, 16, UH, VSLE)
-VCMPI(vslei_wu, 32, UW, VSLE)
-VCMPI(vslei_du, 64, UD, VSLE)
-VCMPI(vslti_b, 8, B, VSLT)
-VCMPI(vslti_h, 16, H, VSLT)
-VCMPI(vslti_w, 32, W, VSLT)
-VCMPI(vslti_d, 64, D, VSLT)
-VCMPI(vslti_bu, 8, UB, VSLT)
-VCMPI(vslti_hu, 16, UH, VSLT)
-VCMPI(vslti_wu, 32, UW, VSLT)
-VCMPI(vslti_du, 64, UD, VSLT)
-
-static uint64_t vfcmp_common(CPULoongArchState *env,
- FloatRelation cmp, uint32_t flags)
-{
- uint64_t ret = 0;
-
- switch (cmp) {
- case float_relation_less:
- ret = (flags & FCMP_LT);
- break;
- case float_relation_equal:
- ret = (flags & FCMP_EQ);
- break;
- case float_relation_greater:
- ret = (flags & FCMP_GT);
- break;
- case float_relation_unordered:
- ret = (flags & FCMP_UN);
- break;
- default:
- g_assert_not_reached();
- }
-
- if (ret) {
- ret = -1;
- }
-
- return ret;
-}
-
-#define VFCMP(NAME, BIT, E, FN) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \
-{ \
- int i; \
- VReg t; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- vec_clear_cause(env); \
- for (i = 0; i < LSX_LEN/BIT ; i++) { \
- FloatRelation cmp; \
- cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
- t.E(i) = vfcmp_common(env, cmp, flags); \
- vec_update_fcsr0(env, GETPC()); \
- } \
- *Vd = t; \
-}
-
-VFCMP(vfcmp_c_s, 32, UW, float32_compare_quiet)
-VFCMP(vfcmp_s_s, 32, UW, float32_compare)
-VFCMP(vfcmp_c_d, 64, UD, float64_compare_quiet)
-VFCMP(vfcmp_s_d, 64, UD, float64_compare)
-
-void HELPER(vbitseli_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
-{
- int i;
- VReg *Vd = (VReg *)vd;
- VReg *Vj = (VReg *)vj;
-
- for (i = 0; i < 16; i++) {
- Vd->B(i) = (~Vd->B(i) & Vj->B(i)) | (Vd->B(i) & imm);
- }
-}
-
-/* Copy from target/arm/tcg/sve_helper.c */
-static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
-{
- uint64_t bits = 8 << esz;
- uint64_t ones = dup_const(esz, 1);
- uint64_t signs = ones << (bits - 1);
- uint64_t cmp0, cmp1;
-
- cmp1 = dup_const(esz, n);
- cmp0 = cmp1 ^ m0;
- cmp1 = cmp1 ^ m1;
- cmp0 = (cmp0 - ones) & ~cmp0;
- cmp1 = (cmp1 - ones) & ~cmp1;
- return (cmp0 | cmp1) & signs;
-}
-
-#define SETANYEQZ(NAME, MO) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
-{ \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \
-}
-SETANYEQZ(vsetanyeqz_b, MO_8)
-SETANYEQZ(vsetanyeqz_h, MO_16)
-SETANYEQZ(vsetanyeqz_w, MO_32)
-SETANYEQZ(vsetanyeqz_d, MO_64)
-
-#define SETALLNEZ(NAME, MO) \
-void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
-{ \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \
-}
-SETALLNEZ(vsetallnez_b, MO_8)
-SETALLNEZ(vsetallnez_h, MO_16)
-SETALLNEZ(vsetallnez_w, MO_32)
-SETALLNEZ(vsetallnez_d, MO_64)
-
-#define VPACKEV(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(2 * i + 1) = Vj->E(2 * i); \
- temp.E(2 *i) = Vk->E(2 * i); \
- } \
- *Vd = temp; \
-}
-
-VPACKEV(vpackev_b, 16, B)
-VPACKEV(vpackev_h, 32, H)
-VPACKEV(vpackev_w, 64, W)
-VPACKEV(vpackev_d, 128, D)
-
-#define VPACKOD(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(2 * i + 1) = Vj->E(2 * i + 1); \
- temp.E(2 * i) = Vk->E(2 * i + 1); \
- } \
- *Vd = temp; \
-}
-
-VPACKOD(vpackod_b, 16, B)
-VPACKOD(vpackod_h, 32, H)
-VPACKOD(vpackod_w, 64, W)
-VPACKOD(vpackod_d, 128, D)
-
-#define VPICKEV(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \
- temp.E(i) = Vk->E(2 * i); \
- } \
- *Vd = temp; \
-}
-
-VPICKEV(vpickev_b, 16, B)
-VPICKEV(vpickev_h, 32, H)
-VPICKEV(vpickev_w, 64, W)
-VPICKEV(vpickev_d, 128, D)
-
-#define VPICKOD(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \
- temp.E(i) = Vk->E(2 * i + 1); \
- } \
- *Vd = temp; \
-}
-
-VPICKOD(vpickod_b, 16, B)
-VPICKOD(vpickod_h, 32, H)
-VPICKOD(vpickod_w, 64, W)
-VPICKOD(vpickod_d, 128, D)
-
-#define VILVL(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(2 * i + 1) = Vj->E(i); \
- temp.E(2 * i) = Vk->E(i); \
- } \
- *Vd = temp; \
-}
-
-VILVL(vilvl_b, 16, B)
-VILVL(vilvl_h, 32, H)
-VILVL(vilvl_w, 64, W)
-VILVL(vilvl_d, 128, D)
-
-#define VILVH(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \
- temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \
- } \
- *Vd = temp; \
-}
-
-VILVH(vilvh_b, 16, B)
-VILVH(vilvh_h, 32, H)
-VILVH(vilvh_w, 64, W)
-VILVH(vilvh_d, 128, D)
-
-void HELPER(vshuf_b)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va)
-{
- int i, m;
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
- VReg *Vk = &(env->fpr[vk].vreg);
- VReg *Va = &(env->fpr[va].vreg);
-
- m = LSX_LEN/8;
- for (i = 0; i < m ; i++) {
- uint64_t k = (uint8_t)Va->B(i) % (2 * m);
- temp.B(i) = k < m ? Vk->B(k) : Vj->B(k - m);
- }
- *Vd = temp;
-}
-
-#define VSHUF(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t vk) \
-{ \
- int i, m; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- VReg *Vk = &(env->fpr[vk].vreg); \
- \
- m = LSX_LEN/BIT; \
- for (i = 0; i < m; i++) { \
- uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \
- temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \
- } \
- *Vd = temp; \
-}
-
-VSHUF(vshuf_h, 16, H)
-VSHUF(vshuf_w, 32, W)
-VSHUF(vshuf_d, 64, D)
-
-#define VSHUF4I(NAME, BIT, E) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int i; \
- VReg temp; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- for (i = 0; i < LSX_LEN/BIT; i++) { \
- temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \
- (2 * ((i) & 0x03))) & 0x03)); \
- } \
- *Vd = temp; \
-}
-
-VSHUF4I(vshuf4i_b, 8, B)
-VSHUF4I(vshuf4i_h, 16, H)
-VSHUF4I(vshuf4i_w, 32, W)
-
-void HELPER(vshuf4i_d)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- VReg temp;
- temp.D(0) = (imm & 2 ? Vj : Vd)->D(imm & 1);
- temp.D(1) = (imm & 8 ? Vj : Vd)->D((imm >> 2) & 1);
- *Vd = temp;
-}
-
-void HELPER(vpermi_w)(CPULoongArchState *env,
- uint32_t vd, uint32_t vj, uint32_t imm)
-{
- VReg temp;
- VReg *Vd = &(env->fpr[vd].vreg);
- VReg *Vj = &(env->fpr[vj].vreg);
-
- temp.W(0) = Vj->W(imm & 0x3);
- temp.W(1) = Vj->W((imm >> 2) & 0x3);
- temp.W(2) = Vd->W((imm >> 4) & 0x3);
- temp.W(3) = Vd->W((imm >> 6) & 0x3);
- *Vd = temp;
-}
-
-#define VEXTRINS(NAME, BIT, E, MASK) \
-void HELPER(NAME)(CPULoongArchState *env, \
- uint32_t vd, uint32_t vj, uint32_t imm) \
-{ \
- int ins, extr; \
- VReg *Vd = &(env->fpr[vd].vreg); \
- VReg *Vj = &(env->fpr[vj].vreg); \
- \
- ins = (imm >> 4) & MASK; \
- extr = imm & MASK; \
- Vd->E(ins) = Vj->E(extr); \
-}
-
-VEXTRINS(vextrins_b, 8, B, 0xf)
-VEXTRINS(vextrins_h, 16, H, 0x7)
-VEXTRINS(vextrins_w, 32, W, 0x3)
-VEXTRINS(vextrins_d, 64, D, 0x1)
'op_helper.c',
'translate.c',
'gdbstub.c',
- 'lsx_helper.c',
+ 'vec_helper.c',
))
loongarch_tcg_ss.add(zlib)
#include "insn_trans/trans_fmemory.c.inc"
#include "insn_trans/trans_branch.c.inc"
#include "insn_trans/trans_privileged.c.inc"
-#include "insn_trans/trans_lsx.c.inc"
+#include "insn_trans/trans_vec.c.inc"
static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
{
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QEMU LoongArch vector helper functions.
+ *
+ * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/exec-all.h"
+#include "exec/helper-proto.h"
+#include "fpu/softfloat.h"
+#include "internals.h"
+#include "tcg/tcg.h"
+
+#define DO_ADD(a, b) (a + b)
+#define DO_SUB(a, b) (a - b)
+
+#define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
+ } \
+}
+
+DO_ODD_EVEN(vhaddw_h_b, 16, H, B, DO_ADD)
+DO_ODD_EVEN(vhaddw_w_h, 32, W, H, DO_ADD)
+DO_ODD_EVEN(vhaddw_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vhaddw_q_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhsubw_h_b, 16, H, B, DO_SUB)
+DO_ODD_EVEN(vhsubw_w_h, 32, W, H, DO_SUB)
+DO_ODD_EVEN(vhsubw_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vhsubw_q_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhaddw_hu_bu, 16, UH, UB, DO_ADD)
+DO_ODD_EVEN(vhaddw_wu_hu, 32, UW, UH, DO_ADD)
+DO_ODD_EVEN(vhaddw_du_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vhaddw_qu_du)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_ODD_EVEN(vhsubw_hu_bu, 16, UH, UB, DO_SUB)
+DO_ODD_EVEN(vhsubw_wu_hu, 32, UW, UH, DO_SUB)
+DO_ODD_EVEN(vhsubw_du_wu, 64, UD, UW, DO_SUB)
+
+void HELPER(vhsubw_qu_du)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+#define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
+ } \
+}
+
+#define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
+ } \
+}
+
+void HELPER(vaddwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
+}
+
+DO_EVEN(vaddwev_h_b, 16, H, B, DO_ADD)
+DO_EVEN(vaddwev_w_h, 32, W, H, DO_ADD)
+DO_EVEN(vaddwev_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vaddwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
+}
+
+DO_ODD(vaddwod_h_b, 16, H, B, DO_ADD)
+DO_ODD(vaddwod_w_h, 32, W, H, DO_ADD)
+DO_ODD(vaddwod_d_w, 64, D, W, DO_ADD)
+
+void HELPER(vsubwev_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(0)), int128_makes64(Vk->D(0)));
+}
+
+DO_EVEN(vsubwev_h_b, 16, H, B, DO_SUB)
+DO_EVEN(vsubwev_w_h, 32, W, H, DO_SUB)
+DO_EVEN(vsubwev_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vsubwod_q_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_makes64(Vj->D(1)), int128_makes64(Vk->D(1)));
+}
+
+DO_ODD(vsubwod_h_b, 16, H, B, DO_SUB)
+DO_ODD(vsubwod_w_h, 32, W, H, DO_SUB)
+DO_ODD(vsubwod_d_w, 64, D, W, DO_SUB)
+
+void HELPER(vaddwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_EVEN(vaddwev_h_bu, 16, UH, UB, DO_ADD)
+DO_EVEN(vaddwev_w_hu, 32, UW, UH, DO_ADD)
+DO_EVEN(vaddwev_d_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vaddwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(1)));
+}
+
+DO_ODD(vaddwod_h_bu, 16, UH, UB, DO_ADD)
+DO_ODD(vaddwod_w_hu, 32, UW, UH, DO_ADD)
+DO_ODD(vaddwod_d_wu, 64, UD, UW, DO_ADD)
+
+void HELPER(vsubwev_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(0)),
+ int128_make64((uint64_t)Vk->D(0)));
+}
+
+DO_EVEN(vsubwev_h_bu, 16, UH, UB, DO_SUB)
+DO_EVEN(vsubwev_w_hu, 32, UW, UH, DO_SUB)
+DO_EVEN(vsubwev_d_wu, 64, UD, UW, DO_SUB)
+
+void HELPER(vsubwod_q_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_sub(int128_make64((uint64_t)Vj->D(1)),
+ int128_make64((uint64_t)Vk->D(1)));
+}
+
+DO_ODD(vsubwod_h_bu, 16, UH, UB, DO_SUB)
+DO_ODD(vsubwod_w_hu, 32, UW, UH, DO_SUB)
+DO_ODD(vsubwod_d_wu, 64, UD, UW, DO_SUB)
+
+#define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TDS; \
+ typedef __typeof(Vd->EU1(0)) TDU; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
+ } \
+}
+
+#define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TDS; \
+ typedef __typeof(Vd->EU1(0)) TDU; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
+ } \
+}
+
+void HELPER(vaddwev_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(0)),
+ int128_makes64(Vk->D(0)));
+}
+
+DO_EVEN_U_S(vaddwev_h_bu_b, 16, H, UH, B, UB, DO_ADD)
+DO_EVEN_U_S(vaddwev_w_hu_h, 32, W, UW, H, UH, DO_ADD)
+DO_EVEN_U_S(vaddwev_d_wu_w, 64, D, UD, W, UW, DO_ADD)
+
+void HELPER(vaddwod_q_du_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ Vd->Q(0) = int128_add(int128_make64((uint64_t)Vj->D(1)),
+ int128_makes64(Vk->D(1)));
+}
+
+DO_ODD_U_S(vaddwod_h_bu_b, 16, H, UH, B, UB, DO_ADD)
+DO_ODD_U_S(vaddwod_w_hu_h, 32, W, UW, H, UH, DO_ADD)
+DO_ODD_U_S(vaddwod_d_wu_w, 64, D, UD, W, UW, DO_ADD)
+
+#define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
+#define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
+
+#define DO_3OP(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
+ } \
+}
+
+DO_3OP(vavg_b, 8, B, DO_VAVG)
+DO_3OP(vavg_h, 16, H, DO_VAVG)
+DO_3OP(vavg_w, 32, W, DO_VAVG)
+DO_3OP(vavg_d, 64, D, DO_VAVG)
+DO_3OP(vavgr_b, 8, B, DO_VAVGR)
+DO_3OP(vavgr_h, 16, H, DO_VAVGR)
+DO_3OP(vavgr_w, 32, W, DO_VAVGR)
+DO_3OP(vavgr_d, 64, D, DO_VAVGR)
+DO_3OP(vavg_bu, 8, UB, DO_VAVG)
+DO_3OP(vavg_hu, 16, UH, DO_VAVG)
+DO_3OP(vavg_wu, 32, UW, DO_VAVG)
+DO_3OP(vavg_du, 64, UD, DO_VAVG)
+DO_3OP(vavgr_bu, 8, UB, DO_VAVGR)
+DO_3OP(vavgr_hu, 16, UH, DO_VAVGR)
+DO_3OP(vavgr_wu, 32, UW, DO_VAVGR)
+DO_3OP(vavgr_du, 64, UD, DO_VAVGR)
+
+#define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
+
+DO_3OP(vabsd_b, 8, B, DO_VABSD)
+DO_3OP(vabsd_h, 16, H, DO_VABSD)
+DO_3OP(vabsd_w, 32, W, DO_VABSD)
+DO_3OP(vabsd_d, 64, D, DO_VABSD)
+DO_3OP(vabsd_bu, 8, UB, DO_VABSD)
+DO_3OP(vabsd_hu, 16, UH, DO_VABSD)
+DO_3OP(vabsd_wu, 32, UW, DO_VABSD)
+DO_3OP(vabsd_du, 64, UD, DO_VABSD)
+
+#define DO_VABS(a) ((a < 0) ? (-a) : (a))
+
+#define DO_VADDA(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i)) + DO_OP(Vk->E(i)); \
+ } \
+}
+
+DO_VADDA(vadda_b, 8, B, DO_VABS)
+DO_VADDA(vadda_h, 16, H, DO_VABS)
+DO_VADDA(vadda_w, 32, W, DO_VABS)
+DO_VADDA(vadda_d, 64, D, DO_VABS)
+
+#define DO_MIN(a, b) (a < b ? a : b)
+#define DO_MAX(a, b) (a > b ? a : b)
+
+#define VMINMAXI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
+ } \
+}
+
+VMINMAXI(vmini_b, 8, B, DO_MIN)
+VMINMAXI(vmini_h, 16, H, DO_MIN)
+VMINMAXI(vmini_w, 32, W, DO_MIN)
+VMINMAXI(vmini_d, 64, D, DO_MIN)
+VMINMAXI(vmaxi_b, 8, B, DO_MAX)
+VMINMAXI(vmaxi_h, 16, H, DO_MAX)
+VMINMAXI(vmaxi_w, 32, W, DO_MAX)
+VMINMAXI(vmaxi_d, 64, D, DO_MAX)
+VMINMAXI(vmini_bu, 8, UB, DO_MIN)
+VMINMAXI(vmini_hu, 16, UH, DO_MIN)
+VMINMAXI(vmini_wu, 32, UW, DO_MIN)
+VMINMAXI(vmini_du, 64, UD, DO_MIN)
+VMINMAXI(vmaxi_bu, 8, UB, DO_MAX)
+VMINMAXI(vmaxi_hu, 16, UH, DO_MAX)
+VMINMAXI(vmaxi_wu, 32, UW, DO_MAX)
+VMINMAXI(vmaxi_du, 64, UD, DO_MAX)
+
+#define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) T; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
+ } \
+}
+
+void HELPER(vmuh_d)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ uint64_t l, h1, h2;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ muls64(&l, &h1, Vj->D(0), Vk->D(0));
+ muls64(&l, &h2, Vj->D(1), Vk->D(1));
+
+ Vd->D(0) = h1;
+ Vd->D(1) = h2;
+}
+
+DO_VMUH(vmuh_b, 8, H, B, DO_MUH)
+DO_VMUH(vmuh_h, 16, W, H, DO_MUH)
+DO_VMUH(vmuh_w, 32, D, W, DO_MUH)
+
+void HELPER(vmuh_du)(void *vd, void *vj, void *vk, uint32_t v)
+{
+ uint64_t l, h1, h2;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+ VReg *Vk = (VReg *)vk;
+
+ mulu64(&l, &h1, Vj->D(0), Vk->D(0));
+ mulu64(&l, &h2, Vj->D(1), Vk->D(1));
+
+ Vd->D(0) = h1;
+ Vd->D(1) = h2;
+}
+
+DO_VMUH(vmuh_bu, 8, UH, UB, DO_MUH)
+DO_VMUH(vmuh_hu, 16, UW, UH, DO_MUH)
+DO_VMUH(vmuh_wu, 32, UD, UW, DO_MUH)
+
+#define DO_MUL(a, b) (a * b)
+
+DO_EVEN(vmulwev_h_b, 16, H, B, DO_MUL)
+DO_EVEN(vmulwev_w_h, 32, W, H, DO_MUL)
+DO_EVEN(vmulwev_d_w, 64, D, W, DO_MUL)
+
+DO_ODD(vmulwod_h_b, 16, H, B, DO_MUL)
+DO_ODD(vmulwod_w_h, 32, W, H, DO_MUL)
+DO_ODD(vmulwod_d_w, 64, D, W, DO_MUL)
+
+DO_EVEN(vmulwev_h_bu, 16, UH, UB, DO_MUL)
+DO_EVEN(vmulwev_w_hu, 32, UW, UH, DO_MUL)
+DO_EVEN(vmulwev_d_wu, 64, UD, UW, DO_MUL)
+
+DO_ODD(vmulwod_h_bu, 16, UH, UB, DO_MUL)
+DO_ODD(vmulwod_w_hu, 32, UW, UH, DO_MUL)
+DO_ODD(vmulwod_d_wu, 64, UD, UW, DO_MUL)
+
+DO_EVEN_U_S(vmulwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+DO_EVEN_U_S(vmulwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+DO_EVEN_U_S(vmulwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+DO_ODD_U_S(vmulwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+DO_ODD_U_S(vmulwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+DO_ODD_U_S(vmulwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+#define DO_MADD(a, b, c) (a + b * c)
+#define DO_MSUB(a, b, c) (a - b * c)
+
+#define VMADDSUB(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
+ } \
+}
+
+VMADDSUB(vmadd_b, 8, B, DO_MADD)
+VMADDSUB(vmadd_h, 16, H, DO_MADD)
+VMADDSUB(vmadd_w, 32, W, DO_MADD)
+VMADDSUB(vmadd_d, 64, D, DO_MADD)
+VMADDSUB(vmsub_b, 8, B, DO_MSUB)
+VMADDSUB(vmsub_h, 16, H, DO_MSUB)
+VMADDSUB(vmsub_w, 32, W, DO_MSUB)
+VMADDSUB(vmsub_d, 64, D, DO_MSUB)
+
+#define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
+ } \
+}
+
+VMADDWEV(vmaddwev_h_b, 16, H, B, DO_MUL)
+VMADDWEV(vmaddwev_w_h, 32, W, H, DO_MUL)
+VMADDWEV(vmaddwev_d_w, 64, D, W, DO_MUL)
+VMADDWEV(vmaddwev_h_bu, 16, UH, UB, DO_MUL)
+VMADDWEV(vmaddwev_w_hu, 32, UW, UH, DO_MUL)
+VMADDWEV(vmaddwev_d_wu, 64, UD, UW, DO_MUL)
+
+#define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->E1(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
+ (TD)Vk->E2(2 * i + 1)); \
+ } \
+}
+
+VMADDWOD(vmaddwod_h_b, 16, H, B, DO_MUL)
+VMADDWOD(vmaddwod_w_h, 32, W, H, DO_MUL)
+VMADDWOD(vmaddwod_d_w, 64, D, W, DO_MUL)
+VMADDWOD(vmaddwod_h_bu, 16, UH, UB, DO_MUL)
+VMADDWOD(vmaddwod_w_hu, 32, UW, UH, DO_MUL)
+VMADDWOD(vmaddwod_d_wu, 64, UD, UW, DO_MUL)
+
+#define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TS1; \
+ typedef __typeof(Vd->EU1(0)) TU1; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
+ (TS1)Vk->ES2(2 * i)); \
+ } \
+}
+
+VMADDWEV_U_S(vmaddwev_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+VMADDWEV_U_S(vmaddwev_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+VMADDWEV_U_S(vmaddwev_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+#define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ typedef __typeof(Vd->ES1(0)) TS1; \
+ typedef __typeof(Vd->EU1(0)) TU1; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
+ (TS1)Vk->ES2(2 * i + 1)); \
+ } \
+}
+
+VMADDWOD_U_S(vmaddwod_h_bu_b, 16, H, UH, B, UB, DO_MUL)
+VMADDWOD_U_S(vmaddwod_w_hu_h, 32, W, UW, H, UH, DO_MUL)
+VMADDWOD_U_S(vmaddwod_d_wu_w, 64, D, UD, W, UW, DO_MUL)
+
+#define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
+#define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
+#define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
+ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
+#define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
+ unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
+
+#define VDIV(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
+ } \
+}
+
+VDIV(vdiv_b, 8, B, DO_DIV)
+VDIV(vdiv_h, 16, H, DO_DIV)
+VDIV(vdiv_w, 32, W, DO_DIV)
+VDIV(vdiv_d, 64, D, DO_DIV)
+VDIV(vdiv_bu, 8, UB, DO_DIVU)
+VDIV(vdiv_hu, 16, UH, DO_DIVU)
+VDIV(vdiv_wu, 32, UW, DO_DIVU)
+VDIV(vdiv_du, 64, UD, DO_DIVU)
+VDIV(vmod_b, 8, B, DO_REM)
+VDIV(vmod_h, 16, H, DO_REM)
+VDIV(vmod_w, 32, W, DO_REM)
+VDIV(vmod_d, 64, D, DO_REM)
+VDIV(vmod_bu, 8, UB, DO_REMU)
+VDIV(vmod_hu, 16, UH, DO_REMU)
+VDIV(vmod_wu, 32, UW, DO_REMU)
+VDIV(vmod_du, 64, UD, DO_REMU)
+
+#define VSAT_S(NAME, BIT, E) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
+ Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
+ } \
+}
+
+VSAT_S(vsat_b, 8, B)
+VSAT_S(vsat_h, 16, H)
+VSAT_S(vsat_w, 32, W)
+VSAT_S(vsat_d, 64, D)
+
+#define VSAT_U(NAME, BIT, E) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
+ } \
+}
+
+VSAT_U(vsat_bu, 8, UB)
+VSAT_U(vsat_hu, 16, UH)
+VSAT_U(vsat_wu, 32, UW)
+VSAT_U(vsat_du, 64, UD)
+
+#define VEXTH(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \
+ } \
+}
+
+void HELPER(vexth_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_makes64(Vj->D(1));
+}
+
+void HELPER(vexth_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_make64((uint64_t)Vj->D(1));
+}
+
+VEXTH(vexth_h_b, 16, H, B)
+VEXTH(vexth_w_h, 32, W, H)
+VEXTH(vexth_d_w, 64, D, W)
+VEXTH(vexth_hu_bu, 16, UH, UB)
+VEXTH(vexth_wu_hu, 32, UW, UH)
+VEXTH(vexth_du_wu, 64, UD, UW)
+
+#define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
+
+DO_3OP(vsigncov_b, 8, B, DO_SIGNCOV)
+DO_3OP(vsigncov_h, 16, H, DO_SIGNCOV)
+DO_3OP(vsigncov_w, 32, W, DO_SIGNCOV)
+DO_3OP(vsigncov_d, 64, D, DO_SIGNCOV)
+
+static uint64_t do_vmskltz_b(int64_t val)
+{
+ uint64_t m = 0x8080808080808080ULL;
+ uint64_t c = val & m;
+ c |= c << 7;
+ c |= c << 14;
+ c |= c << 28;
+ return c >> 56;
+}
+
+void HELPER(vmskltz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_b(Vj->D(0));
+ temp |= (do_vmskltz_b(Vj->D(1)) << 8);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskltz_h(int64_t val)
+{
+ uint64_t m = 0x8000800080008000ULL;
+ uint64_t c = val & m;
+ c |= c << 15;
+ c |= c << 30;
+ return c >> 60;
+}
+
+void HELPER(vmskltz_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_h(Vj->D(0));
+ temp |= (do_vmskltz_h(Vj->D(1)) << 4);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskltz_w(int64_t val)
+{
+ uint64_t m = 0x8000000080000000ULL;
+ uint64_t c = val & m;
+ c |= c << 31;
+ return c >> 62;
+}
+
+void HELPER(vmskltz_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_w(Vj->D(0));
+ temp |= (do_vmskltz_w(Vj->D(1)) << 2);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskltz_d(int64_t val)
+{
+ return (uint64_t)val >> 63;
+}
+void HELPER(vmskltz_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_d(Vj->D(0));
+ temp |= (do_vmskltz_d(Vj->D(1)) << 1);
+ Vd->D(0) = temp;
+ Vd->D(1) = 0;
+}
+
+void HELPER(vmskgez_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskltz_b(Vj->D(0));
+ temp |= (do_vmskltz_b(Vj->D(1)) << 8);
+ Vd->D(0) = (uint16_t)(~temp);
+ Vd->D(1) = 0;
+}
+
+static uint64_t do_vmskez_b(uint64_t a)
+{
+ uint64_t m = 0x7f7f7f7f7f7f7f7fULL;
+ uint64_t c = ~(((a & m) + m) | a | m);
+ c |= c << 7;
+ c |= c << 14;
+ c |= c << 28;
+ return c >> 56;
+}
+
+void HELPER(vmsknz_b)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ uint16_t temp = 0;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp = do_vmskez_b(Vj->D(0));
+ temp |= (do_vmskez_b(Vj->D(1)) << 8);
+ Vd->D(0) = (uint16_t)(~temp);
+ Vd->D(1) = 0;
+}
+
+void HELPER(vnori_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
+{
+ int i;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+
+ for (i = 0; i < LSX_LEN/8; i++) {
+ Vd->B(i) = ~(Vj->B(i) | (uint8_t)imm);
+ }
+}
+
+#define VSLLWIL(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ typedef __typeof(temp.E1(0)) TD; \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = (TD)Vj->E2(i) << (imm % BIT); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vextl_q_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_makes64(Vj->D(0));
+}
+
+void HELPER(vextl_qu_du)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ Vd->Q(0) = int128_make64(Vj->D(0));
+}
+
+VSLLWIL(vsllwil_h_b, 16, H, B)
+VSLLWIL(vsllwil_w_h, 32, W, H)
+VSLLWIL(vsllwil_d_w, 64, D, W)
+VSLLWIL(vsllwil_hu_bu, 16, UH, UB)
+VSLLWIL(vsllwil_wu_hu, 32, UW, UH)
+VSLLWIL(vsllwil_du_wu, 64, UD, UW)
+
+#define do_vsrlr(E, T) \
+static T do_vsrlr_ ##E(T s1, int sh) \
+{ \
+ if (sh == 0) { \
+ return s1; \
+ } else { \
+ return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
+ } \
+}
+
+do_vsrlr(B, uint8_t)
+do_vsrlr(H, uint16_t)
+do_vsrlr(W, uint32_t)
+do_vsrlr(D, uint64_t)
+
+#define VSRLR(NAME, BIT, T, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
+ } \
+}
+
+VSRLR(vsrlr_b, 8, uint8_t, B)
+VSRLR(vsrlr_h, 16, uint16_t, H)
+VSRLR(vsrlr_w, 32, uint32_t, W)
+VSRLR(vsrlr_d, 64, uint64_t, D)
+
+#define VSRLRI(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
+ } \
+}
+
+VSRLRI(vsrlri_b, 8, B)
+VSRLRI(vsrlri_h, 16, H)
+VSRLRI(vsrlri_w, 32, W)
+VSRLRI(vsrlri_d, 64, D)
+
+#define do_vsrar(E, T) \
+static T do_vsrar_ ##E(T s1, int sh) \
+{ \
+ if (sh == 0) { \
+ return s1; \
+ } else { \
+ return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
+ } \
+}
+
+do_vsrar(B, int8_t)
+do_vsrar(H, int16_t)
+do_vsrar(W, int32_t)
+do_vsrar(D, int64_t)
+
+#define VSRAR(NAME, BIT, T, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
+ } \
+}
+
+VSRAR(vsrar_b, 8, uint8_t, B)
+VSRAR(vsrar_h, 16, uint16_t, H)
+VSRAR(vsrar_w, 32, uint32_t, W)
+VSRAR(vsrar_d, 64, uint64_t, D)
+
+#define VSRARI(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
+ } \
+}
+
+VSRARI(vsrari_b, 8, B)
+VSRARI(vsrari_h, 16, H)
+VSRARI(vsrari_w, 32, W)
+VSRARI(vsrari_d, 64, D)
+
+#define R_SHIFT(a, b) (a >> b)
+
+#define VSRLN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRLN(vsrln_b_h, 16, uint16_t, B, H)
+VSRLN(vsrln_h_w, 32, uint32_t, H, W)
+VSRLN(vsrln_w_d, 64, uint64_t, W, D)
+
+#define VSRAN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRAN(vsran_b_h, 16, uint16_t, B, H)
+VSRAN(vsran_h_w, 32, uint32_t, H, W)
+VSRAN(vsran_w_d, 64, uint64_t, W, D)
+
+#define VSRLNI(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \
+ temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrlni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp.D(0) = 0;
+ temp.D(1) = 0;
+ temp.D(0) = int128_getlo(int128_urshift(Vj->Q(0), imm % 128));
+ temp.D(1) = int128_getlo(int128_urshift(Vd->Q(0), imm % 128));
+ *Vd = temp;
+}
+
+VSRLNI(vsrlni_b_h, 16, uint16_t, B, H)
+VSRLNI(vsrlni_h_w, 32, uint32_t, H, W)
+VSRLNI(vsrlni_w_d, 64, uint64_t, W, D)
+
+#define VSRANI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \
+ temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrani_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp.D(0) = 0;
+ temp.D(1) = 0;
+ temp.D(0) = int128_getlo(int128_rshift(Vj->Q(0), imm % 128));
+ temp.D(1) = int128_getlo(int128_rshift(Vd->Q(0), imm % 128));
+ *Vd = temp;
+}
+
+VSRANI(vsrani_b_h, 16, B, H)
+VSRANI(vsrani_h_w, 32, H, W)
+VSRANI(vsrani_w_d, 64, W, D)
+
+#define VSRLRN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_vsrlr_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRLRN(vsrlrn_b_h, 16, uint16_t, B, H)
+VSRLRN(vsrlrn_h_w, 32, uint32_t, H, W)
+VSRLRN(vsrlrn_w_d, 64, uint64_t, W, D)
+
+#define VSRARN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_vsrar_ ## E2(Vj->E2(i), ((T)Vk->E2(i))%BIT); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSRARN(vsrarn_b_h, 16, uint8_t, B, H)
+VSRARN(vsrarn_h_w, 32, uint16_t, H, W)
+VSRARN(vsrarn_w_d, 64, uint32_t, W, D)
+
+#define VSRLRNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = do_vsrlr_ ## E2(Vj->E2(i), imm); \
+ temp.E1(i + max) = do_vsrlr_ ## E2(Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrlrni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ Int128 r1, r2;
+
+ if (imm == 0) {
+ temp.D(0) = int128_getlo(Vj->Q(0));
+ temp.D(1) = int128_getlo(Vd->Q(0));
+ } else {
+ r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one());
+
+ temp.D(0) = int128_getlo(int128_add(int128_urshift(Vj->Q(0), imm), r1));
+ temp.D(1) = int128_getlo(int128_add(int128_urshift(Vd->Q(0), imm), r2));
+ }
+ *Vd = temp;
+}
+
+VSRLRNI(vsrlrni_b_h, 16, B, H)
+VSRLRNI(vsrlrni_h_w, 32, H, W)
+VSRLRNI(vsrlrni_w_d, 64, W, D)
+
+#define VSRARNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, max; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ temp.D(0) = 0; \
+ temp.D(1) = 0; \
+ max = LSX_LEN/BIT; \
+ for (i = 0; i < max; i++) { \
+ temp.E1(i) = do_vsrar_ ## E2(Vj->E2(i), imm); \
+ temp.E1(i + max) = do_vsrar_ ## E2(Vd->E2(i), imm); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vsrarni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ Int128 r1, r2;
+
+ if (imm == 0) {
+ temp.D(0) = int128_getlo(Vj->Q(0));
+ temp.D(1) = int128_getlo(Vd->Q(0));
+ } else {
+ r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
+
+ temp.D(0) = int128_getlo(int128_add(int128_rshift(Vj->Q(0), imm), r1));
+ temp.D(1) = int128_getlo(int128_add(int128_rshift(Vd->Q(0), imm), r2));
+ }
+ *Vd = temp;
+}
+
+VSRARNI(vsrarni_b_h, 16, B, H)
+VSRARNI(vsrarni_h_w, 32, H, W)
+VSRARNI(vsrarni_w_d, 64, W, D)
+
+#define SSRLNS(NAME, T1, T2, T3) \
+static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = (((T1)e2) >> sa); \
+ } \
+ T3 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLNS(B, uint16_t, int16_t, uint8_t)
+SSRLNS(H, uint32_t, int32_t, uint16_t)
+SSRLNS(W, uint64_t, int64_t, uint32_t)
+
+#define VSSRLN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlns_ ## E1(Vj->E2(i), (T)Vk->E2(i)% BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLN(vssrln_b_h, 16, uint16_t, B, H)
+VSSRLN(vssrln_h_w, 32, uint32_t, H, W)
+VSSRLN(vssrln_w_d, 64, uint64_t, W, D)
+
+#define SSRANS(E, T1, T2) \
+static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = e2 >> sa; \
+ } \
+ T2 mask; \
+ mask = (1ll << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else if (shft_res < -(mask +1)) { \
+ return ~mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRANS(B, int16_t, int8_t)
+SSRANS(H, int32_t, int16_t)
+SSRANS(W, int64_t, int32_t)
+
+#define VSSRAN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrans_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRAN(vssran_b_h, 16, uint16_t, B, H)
+VSSRAN(vssran_h_w, 32, uint32_t, H, W)
+VSSRAN(vssran_w_d, 64, uint64_t, W, D)
+
+#define SSRLNU(E, T1, T2, T3) \
+static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = (((T1)e2) >> sa); \
+ } \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLNU(B, uint16_t, uint8_t, int16_t)
+SSRLNU(H, uint32_t, uint16_t, int32_t)
+SSRLNU(W, uint64_t, uint32_t, int64_t)
+
+#define VSSRLNU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLNU(vssrln_bu_h, 16, uint16_t, B, H)
+VSSRLNU(vssrln_hu_w, 32, uint32_t, H, W)
+VSSRLNU(vssrln_wu_d, 64, uint64_t, W, D)
+
+#define SSRANU(E, T1, T2, T3) \
+static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ if (sa == 0) { \
+ shft_res = e2; \
+ } else { \
+ shft_res = e2 >> sa; \
+ } \
+ if (e2 < 0) { \
+ shft_res = 0; \
+ } \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRANU(B, uint16_t, uint8_t, int16_t)
+SSRANU(H, uint32_t, uint16_t, int32_t)
+SSRANU(W, uint64_t, uint32_t, int64_t)
+
+#define VSSRANU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssranu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRANU(vssran_bu_h, 16, uint16_t, B, H)
+VSSRANU(vssran_hu_w, 32, uint32_t, H, W)
+VSSRANU(vssran_wu_d, 64, uint64_t, W, D)
+
+#define VSSRLNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrlni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_urshift(Vj->Q(0), imm);
+ shft_res2 = int128_urshift(Vd->Q(0), imm);
+ }
+ mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
+
+ if (int128_ult(mask, shft_res1)) {
+ Vd->D(0) = int128_getlo(mask);
+ }else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_ult(mask, shft_res2)) {
+ Vd->D(1) = int128_getlo(mask);
+ }else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRLNI(vssrlni_b_h, 16, B, H)
+VSSRLNI(vssrlni_h_w, 32, H, W)
+VSSRLNI(vssrlni_w_d, 64, W, D)
+
+#define VSSRANI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrans_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrans_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrani_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask, min;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_rshift(Vj->Q(0), imm);
+ shft_res2 = int128_rshift(Vd->Q(0), imm);
+ }
+ mask = int128_sub(int128_lshift(int128_one(), 63), int128_one());
+ min = int128_lshift(int128_one(), 63);
+
+ if (int128_gt(shft_res1, mask)) {
+ Vd->D(0) = int128_getlo(mask);
+ } else if (int128_lt(shft_res1, int128_neg(min))) {
+ Vd->D(0) = int128_getlo(min);
+ } else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_gt(shft_res2, mask)) {
+ Vd->D(1) = int128_getlo(mask);
+ } else if (int128_lt(shft_res2, int128_neg(min))) {
+ Vd->D(1) = int128_getlo(min);
+ } else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRANI(vssrani_b_h, 16, B, H)
+VSSRANI(vssrani_h_w, 32, H, W)
+VSSRANI(vssrani_w_d, 64, W, D)
+
+#define VSSRLNUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlnu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlnu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrlni_du_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_urshift(Vj->Q(0), imm);
+ shft_res2 = int128_urshift(Vd->Q(0), imm);
+ }
+ mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
+
+ if (int128_ult(mask, shft_res1)) {
+ Vd->D(0) = int128_getlo(mask);
+ }else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_ult(mask, shft_res2)) {
+ Vd->D(1) = int128_getlo(mask);
+ }else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRLNUI(vssrlni_bu_h, 16, B, H)
+VSSRLNUI(vssrlni_hu_w, 32, H, W)
+VSSRLNUI(vssrlni_wu_d, 64, W, D)
+
+#define VSSRANUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssranu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssranu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrani_du_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ shft_res1 = int128_rshift(Vj->Q(0), imm);
+ shft_res2 = int128_rshift(Vd->Q(0), imm);
+ }
+
+ if (int128_lt(Vj->Q(0), int128_zero())) {
+ shft_res1 = int128_zero();
+ }
+
+ if (int128_lt(Vd->Q(0), int128_zero())) {
+ shft_res2 = int128_zero();
+ }
+
+ mask = int128_sub(int128_lshift(int128_one(), 64), int128_one());
+
+ if (int128_ult(mask, shft_res1)) {
+ Vd->D(0) = int128_getlo(mask);
+ }else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_ult(mask, shft_res2)) {
+ Vd->D(1) = int128_getlo(mask);
+ }else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRANUI(vssrani_bu_h, 16, B, H)
+VSSRANUI(vssrani_hu_w, 32, H, W)
+VSSRANUI(vssrani_wu_d, 64, W, D)
+
+#define SSRLRNS(E1, E2, T1, T2, T3) \
+static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ shft_res = do_vsrlr_ ## E2(e2, sa); \
+ T1 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLRNS(B, H, uint16_t, int16_t, uint8_t)
+SSRLRNS(H, W, uint32_t, int32_t, uint16_t)
+SSRLRNS(W, D, uint64_t, int64_t, uint32_t)
+
+#define VSSRLRN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLRN(vssrlrn_b_h, 16, uint16_t, B, H)
+VSSRLRN(vssrlrn_h_w, 32, uint32_t, H, W)
+VSSRLRN(vssrlrn_w_d, 64, uint64_t, W, D)
+
+#define SSRARNS(E1, E2, T1, T2) \
+static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ shft_res = do_vsrar_ ## E2(e2, sa); \
+ T2 mask; \
+ mask = (1ll << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else if (shft_res < -(mask +1)) { \
+ return ~mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRARNS(B, H, int16_t, int8_t)
+SSRARNS(H, W, int32_t, int16_t)
+SSRARNS(W, D, int64_t, int32_t)
+
+#define VSSRARN(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrarns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRARN(vssrarn_b_h, 16, uint16_t, B, H)
+VSSRARN(vssrarn_h_w, 32, uint32_t, H, W)
+VSSRARN(vssrarn_w_d, 64, uint64_t, W, D)
+
+#define SSRLRNU(E1, E2, T1, T2, T3) \
+static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ shft_res = do_vsrlr_ ## E2(e2, sa); \
+ \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRLRNU(B, H, uint16_t, uint8_t, int16_t)
+SSRLRNU(H, W, uint32_t, uint16_t, int32_t)
+SSRLRNU(W, D, uint64_t, uint32_t, int64_t)
+
+#define VSSRLRNU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRLRNU(vssrlrn_bu_h, 16, uint16_t, B, H)
+VSSRLRNU(vssrlrn_hu_w, 32, uint32_t, H, W)
+VSSRLRNU(vssrlrn_wu_d, 64, uint64_t, W, D)
+
+#define SSRARNU(E1, E2, T1, T2, T3) \
+static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \
+{ \
+ T1 shft_res; \
+ \
+ if (e2 < 0) { \
+ shft_res = 0; \
+ } else { \
+ shft_res = do_vsrar_ ## E2(e2, sa); \
+ } \
+ T2 mask; \
+ mask = (1ull << sh) -1; \
+ if (shft_res > mask) { \
+ return mask; \
+ } else { \
+ return shft_res; \
+ } \
+}
+
+SSRARNU(B, H, uint16_t, uint8_t, int16_t)
+SSRARNU(H, W, uint32_t, uint16_t, int32_t)
+SSRARNU(W, D, uint64_t, uint32_t, int64_t)
+
+#define VSSRARNU(NAME, BIT, T, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
+ } \
+ Vd->D(1) = 0; \
+}
+
+VSSRARNU(vssrarn_bu_h, 16, uint16_t, B, H)
+VSSRARNU(vssrarn_hu_w, 32, uint32_t, H, W)
+VSSRARNU(vssrarn_wu_d, 64, uint64_t, W, D)
+
+#define VSSRLRNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlrns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
+ } \
+ *Vd = temp; \
+}
+
+#define VSSRLRNI_Q(NAME, sh) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ Int128 shft_res1, shft_res2, mask, r1, r2; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ if (imm == 0) { \
+ shft_res1 = Vj->Q(0); \
+ shft_res2 = Vd->Q(0); \
+ } else { \
+ r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); \
+ r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); \
+ \
+ shft_res1 = (int128_add(int128_urshift(Vj->Q(0), imm), r1)); \
+ shft_res2 = (int128_add(int128_urshift(Vd->Q(0), imm), r2)); \
+ } \
+ \
+ mask = int128_sub(int128_lshift(int128_one(), sh), int128_one()); \
+ \
+ if (int128_ult(mask, shft_res1)) { \
+ Vd->D(0) = int128_getlo(mask); \
+ }else { \
+ Vd->D(0) = int128_getlo(shft_res1); \
+ } \
+ \
+ if (int128_ult(mask, shft_res2)) { \
+ Vd->D(1) = int128_getlo(mask); \
+ }else { \
+ Vd->D(1) = int128_getlo(shft_res2); \
+ } \
+}
+
+VSSRLRNI(vssrlrni_b_h, 16, B, H)
+VSSRLRNI(vssrlrni_h_w, 32, H, W)
+VSSRLRNI(vssrlrni_w_d, 64, W, D)
+VSSRLRNI_Q(vssrlrni_d_q, 63)
+
+#define VSSRARNI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrarns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrarns_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrarni_d_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
+
+ shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
+ shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
+ }
+
+ mask1 = int128_sub(int128_lshift(int128_one(), 63), int128_one());
+ mask2 = int128_lshift(int128_one(), 63);
+
+ if (int128_gt(shft_res1, mask1)) {
+ Vd->D(0) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res1, int128_neg(mask2))) {
+ Vd->D(0) = int128_getlo(mask2);
+ } else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_gt(shft_res2, mask1)) {
+ Vd->D(1) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res2, int128_neg(mask2))) {
+ Vd->D(1) = int128_getlo(mask2);
+ } else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRARNI(vssrarni_b_h, 16, B, H)
+VSSRARNI(vssrarni_h_w, 32, H, W)
+VSSRARNI(vssrarni_w_d, 64, W, D)
+
+#define VSSRLRNUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrlrnu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+VSSRLRNUI(vssrlrni_bu_h, 16, B, H)
+VSSRLRNUI(vssrlrni_hu_w, 32, H, W)
+VSSRLRNUI(vssrlrni_wu_d, 64, W, D)
+VSSRLRNI_Q(vssrlrni_du_q, 64)
+
+#define VSSRARNUI(NAME, BIT, E1, E2) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), imm, BIT/2); \
+ temp.E1(i + LSX_LEN/BIT) = do_ssrarnu_ ## E1(Vd->E2(i), imm, BIT/2); \
+ } \
+ *Vd = temp; \
+}
+
+void HELPER(vssrarni_du_q)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ Int128 shft_res1, shft_res2, mask1, mask2, r1, r2;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ if (imm == 0) {
+ shft_res1 = Vj->Q(0);
+ shft_res2 = Vd->Q(0);
+ } else {
+ r1 = int128_and(int128_rshift(Vj->Q(0), (imm -1)), int128_one());
+ r2 = int128_and(int128_rshift(Vd->Q(0), (imm -1)), int128_one());
+
+ shft_res1 = int128_add(int128_rshift(Vj->Q(0), imm), r1);
+ shft_res2 = int128_add(int128_rshift(Vd->Q(0), imm), r2);
+ }
+
+ if (int128_lt(Vj->Q(0), int128_zero())) {
+ shft_res1 = int128_zero();
+ }
+ if (int128_lt(Vd->Q(0), int128_zero())) {
+ shft_res2 = int128_zero();
+ }
+
+ mask1 = int128_sub(int128_lshift(int128_one(), 64), int128_one());
+ mask2 = int128_lshift(int128_one(), 64);
+
+ if (int128_gt(shft_res1, mask1)) {
+ Vd->D(0) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res1, int128_neg(mask2))) {
+ Vd->D(0) = int128_getlo(mask2);
+ } else {
+ Vd->D(0) = int128_getlo(shft_res1);
+ }
+
+ if (int128_gt(shft_res2, mask1)) {
+ Vd->D(1) = int128_getlo(mask1);
+ } else if (int128_lt(shft_res2, int128_neg(mask2))) {
+ Vd->D(1) = int128_getlo(mask2);
+ } else {
+ Vd->D(1) = int128_getlo(shft_res2);
+ }
+}
+
+VSSRARNUI(vssrarni_bu_h, 16, B, H)
+VSSRARNUI(vssrarni_hu_w, 32, H, W)
+VSSRARNUI(vssrarni_wu_d, 64, W, D)
+
+#define DO_2OP(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) \
+ { \
+ Vd->E(i) = DO_OP(Vj->E(i)); \
+ } \
+}
+
+#define DO_CLO_B(N) (clz32(~N & 0xff) - 24)
+#define DO_CLO_H(N) (clz32(~N & 0xffff) - 16)
+#define DO_CLO_W(N) (clz32(~N))
+#define DO_CLO_D(N) (clz64(~N))
+#define DO_CLZ_B(N) (clz32(N) - 24)
+#define DO_CLZ_H(N) (clz32(N) - 16)
+#define DO_CLZ_W(N) (clz32(N))
+#define DO_CLZ_D(N) (clz64(N))
+
+DO_2OP(vclo_b, 8, UB, DO_CLO_B)
+DO_2OP(vclo_h, 16, UH, DO_CLO_H)
+DO_2OP(vclo_w, 32, UW, DO_CLO_W)
+DO_2OP(vclo_d, 64, UD, DO_CLO_D)
+DO_2OP(vclz_b, 8, UB, DO_CLZ_B)
+DO_2OP(vclz_h, 16, UH, DO_CLZ_H)
+DO_2OP(vclz_w, 32, UW, DO_CLZ_W)
+DO_2OP(vclz_d, 64, UD, DO_CLZ_D)
+
+#define VPCNT(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) \
+ { \
+ Vd->E(i) = FN(Vj->E(i)); \
+ } \
+}
+
+VPCNT(vpcnt_b, 8, UB, ctpop8)
+VPCNT(vpcnt_h, 16, UH, ctpop16)
+VPCNT(vpcnt_w, 32, UW, ctpop32)
+VPCNT(vpcnt_d, 64, UD, ctpop64)
+
+#define DO_BITCLR(a, bit) (a & ~(1ull << bit))
+#define DO_BITSET(a, bit) (a | 1ull << bit)
+#define DO_BITREV(a, bit) (a ^ (1ull << bit))
+
+#define DO_BIT(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ VReg *Vk = (VReg *)vk; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \
+ } \
+}
+
+DO_BIT(vbitclr_b, 8, UB, DO_BITCLR)
+DO_BIT(vbitclr_h, 16, UH, DO_BITCLR)
+DO_BIT(vbitclr_w, 32, UW, DO_BITCLR)
+DO_BIT(vbitclr_d, 64, UD, DO_BITCLR)
+DO_BIT(vbitset_b, 8, UB, DO_BITSET)
+DO_BIT(vbitset_h, 16, UH, DO_BITSET)
+DO_BIT(vbitset_w, 32, UW, DO_BITSET)
+DO_BIT(vbitset_d, 64, UD, DO_BITSET)
+DO_BIT(vbitrev_b, 8, UB, DO_BITREV)
+DO_BIT(vbitrev_h, 16, UH, DO_BITREV)
+DO_BIT(vbitrev_w, 32, UW, DO_BITREV)
+DO_BIT(vbitrev_d, 64, UD, DO_BITREV)
+
+#define DO_BITI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), imm); \
+ } \
+}
+
+DO_BITI(vbitclri_b, 8, UB, DO_BITCLR)
+DO_BITI(vbitclri_h, 16, UH, DO_BITCLR)
+DO_BITI(vbitclri_w, 32, UW, DO_BITCLR)
+DO_BITI(vbitclri_d, 64, UD, DO_BITCLR)
+DO_BITI(vbitseti_b, 8, UB, DO_BITSET)
+DO_BITI(vbitseti_h, 16, UH, DO_BITSET)
+DO_BITI(vbitseti_w, 32, UW, DO_BITSET)
+DO_BITI(vbitseti_d, 64, UD, DO_BITSET)
+DO_BITI(vbitrevi_b, 8, UB, DO_BITREV)
+DO_BITI(vbitrevi_h, 16, UH, DO_BITREV)
+DO_BITI(vbitrevi_w, 32, UW, DO_BITREV)
+DO_BITI(vbitrevi_d, 64, UD, DO_BITREV)
+
+#define VFRSTP(NAME, BIT, MASK, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i, m; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ if (Vj->E(i) < 0) { \
+ break; \
+ } \
+ } \
+ m = Vk->E(0) & MASK; \
+ Vd->E(m) = i; \
+}
+
+VFRSTP(vfrstp_b, 8, 0xf, B)
+VFRSTP(vfrstp_h, 16, 0x7, H)
+
+#define VFRSTPI(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i, m; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ if (Vj->E(i) < 0) { \
+ break; \
+ } \
+ } \
+ m = imm % (LSX_LEN/BIT); \
+ Vd->E(m) = i; \
+}
+
+VFRSTPI(vfrstpi_b, 8, B)
+VFRSTPI(vfrstpi_h, 16, H)
+
+static void vec_update_fcsr0_mask(CPULoongArchState *env,
+ uintptr_t pc, int mask)
+{
+ int flags = get_float_exception_flags(&env->fp_status);
+
+ set_float_exception_flags(0, &env->fp_status);
+
+ flags &= ~mask;
+
+ if (flags) {
+ flags = ieee_ex_to_loongarch(flags);
+ UPDATE_FP_CAUSE(env->fcsr0, flags);
+ }
+
+ if (GET_FP_ENABLES(env->fcsr0) & flags) {
+ do_raise_exception(env, EXCCODE_FPE, pc);
+ } else {
+ UPDATE_FP_FLAGS(env->fcsr0, flags);
+ }
+}
+
+static void vec_update_fcsr0(CPULoongArchState *env, uintptr_t pc)
+{
+ vec_update_fcsr0_mask(env, pc, 0);
+}
+
+static inline void vec_clear_cause(CPULoongArchState *env)
+{
+ SET_FP_CAUSE(env->fcsr0, 0);
+}
+
+#define DO_3OP_F(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+}
+
+DO_3OP_F(vfadd_s, 32, UW, float32_add)
+DO_3OP_F(vfadd_d, 64, UD, float64_add)
+DO_3OP_F(vfsub_s, 32, UW, float32_sub)
+DO_3OP_F(vfsub_d, 64, UD, float64_sub)
+DO_3OP_F(vfmul_s, 32, UW, float32_mul)
+DO_3OP_F(vfmul_d, 64, UD, float64_mul)
+DO_3OP_F(vfdiv_s, 32, UW, float32_div)
+DO_3OP_F(vfdiv_d, 64, UD, float64_div)
+DO_3OP_F(vfmax_s, 32, UW, float32_maxnum)
+DO_3OP_F(vfmax_d, 64, UD, float64_maxnum)
+DO_3OP_F(vfmin_s, 32, UW, float32_minnum)
+DO_3OP_F(vfmin_d, 64, UD, float64_minnum)
+DO_3OP_F(vfmaxa_s, 32, UW, float32_maxnummag)
+DO_3OP_F(vfmaxa_d, 64, UD, float64_maxnummag)
+DO_3OP_F(vfmina_s, 32, UW, float32_minnummag)
+DO_3OP_F(vfmina_d, 64, UD, float64_minnummag)
+
+#define DO_4OP_F(NAME, BIT, E, FN, flags) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ VReg *Va = &(env->fpr[va].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+}
+
+DO_4OP_F(vfmadd_s, 32, UW, float32_muladd, 0)
+DO_4OP_F(vfmadd_d, 64, UD, float64_muladd, 0)
+DO_4OP_F(vfmsub_s, 32, UW, float32_muladd, float_muladd_negate_c)
+DO_4OP_F(vfmsub_d, 64, UD, float64_muladd, float_muladd_negate_c)
+DO_4OP_F(vfnmadd_s, 32, UW, float32_muladd, float_muladd_negate_result)
+DO_4OP_F(vfnmadd_d, 64, UD, float64_muladd, float_muladd_negate_result)
+DO_4OP_F(vfnmsub_s, 32, UW, float32_muladd,
+ float_muladd_negate_c | float_muladd_negate_result)
+DO_4OP_F(vfnmsub_d, 64, UD, float64_muladd,
+ float_muladd_negate_c | float_muladd_negate_result)
+
+#define DO_2OP_F(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(env, Vj->E(i)); \
+ } \
+}
+
+#define FLOGB(BIT, T) \
+static T do_flogb_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fp, fd; \
+ float_status *status = &env->fp_status; \
+ FloatRoundMode old_mode = get_float_rounding_mode(status); \
+ \
+ set_float_rounding_mode(float_round_down, status); \
+ fp = float ## BIT ##_log2(fj, status); \
+ fd = float ## BIT ##_round_to_int(fp, status); \
+ set_float_rounding_mode(old_mode, status); \
+ vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \
+ return fd; \
+}
+
+FLOGB(32, uint32_t)
+FLOGB(64, uint64_t)
+
+#define FCLASS(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = FN(env, Vj->E(i)); \
+ } \
+}
+
+FCLASS(vfclass_s, 32, UW, helper_fclass_s)
+FCLASS(vfclass_d, 64, UD, helper_fclass_d)
+
+#define FSQRT(BIT, T) \
+static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fd; \
+ fd = float ## BIT ##_sqrt(fj, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FSQRT(32, uint32_t)
+FSQRT(64, uint64_t)
+
+#define FRECIP(BIT, T) \
+static T do_frecip_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fd; \
+ fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FRECIP(32, uint32_t)
+FRECIP(64, uint64_t)
+
+#define FRSQRT(BIT, T) \
+static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \
+{ \
+ T fd, fp; \
+ fp = float ## BIT ##_sqrt(fj, &env->fp_status); \
+ fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FRSQRT(32, uint32_t)
+FRSQRT(64, uint64_t)
+
+DO_2OP_F(vflogb_s, 32, UW, do_flogb_32)
+DO_2OP_F(vflogb_d, 64, UD, do_flogb_64)
+DO_2OP_F(vfsqrt_s, 32, UW, do_fsqrt_32)
+DO_2OP_F(vfsqrt_d, 64, UD, do_fsqrt_64)
+DO_2OP_F(vfrecip_s, 32, UW, do_frecip_32)
+DO_2OP_F(vfrecip_d, 64, UD, do_frecip_64)
+DO_2OP_F(vfrsqrt_s, 32, UW, do_frsqrt_32)
+DO_2OP_F(vfrsqrt_d, 64, UD, do_frsqrt_64)
+
+static uint32_t float16_cvt_float32(uint16_t h, float_status *status)
+{
+ return float16_to_float32(h, true, status);
+}
+static uint64_t float32_cvt_float64(uint32_t s, float_status *status)
+{
+ return float32_to_float64(s, status);
+}
+
+static uint16_t float32_cvt_float16(uint32_t s, float_status *status)
+{
+ return float32_to_float16(s, true, status);
+}
+static uint32_t float64_cvt_float32(uint64_t d, float_status *status)
+{
+ return float64_to_float32(d, status);
+}
+
+void HELPER(vfcvtl_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/32; i++) {
+ temp.UW(i) = float16_cvt_float32(Vj->UH(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvtl_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/64; i++) {
+ temp.UD(i) = float32_cvt_float64(Vj->UW(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvth_s_h)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/32; i++) {
+ temp.UW(i) = float16_cvt_float32(Vj->UH(i + 4), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvth_d_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < LSX_LEN/64; i++) {
+ temp.UD(i) = float32_cvt_float64(Vj->UW(i + 2), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvt_h_s)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ vec_clear_cause(env);
+ for(i = 0; i < LSX_LEN/32; i++) {
+ temp.UH(i + 4) = float32_cvt_float16(Vj->UW(i), &env->fp_status);
+ temp.UH(i) = float32_cvt_float16(Vk->UW(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfcvt_s_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ vec_clear_cause(env);
+ for(i = 0; i < LSX_LEN/64; i++) {
+ temp.UW(i + 2) = float64_cvt_float32(Vj->UD(i), &env->fp_status);
+ temp.UW(i) = float64_cvt_float32(Vk->UD(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vfrint_s)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 4; i++) {
+ Vd->W(i) = float32_round_to_int(Vj->UW(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+}
+
+void HELPER(vfrint_d)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ Vd->D(i) = float64_round_to_int(Vj->UD(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+}
+
+#define FCVT_2OP(NAME, BIT, E, MODE) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
+ set_float_rounding_mode(MODE, &env->fp_status); \
+ Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \
+ set_float_rounding_mode(old_mode, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+}
+
+FCVT_2OP(vfrintrne_s, 32, UW, float_round_nearest_even)
+FCVT_2OP(vfrintrne_d, 64, UD, float_round_nearest_even)
+FCVT_2OP(vfrintrz_s, 32, UW, float_round_to_zero)
+FCVT_2OP(vfrintrz_d, 64, UD, float_round_to_zero)
+FCVT_2OP(vfrintrp_s, 32, UW, float_round_up)
+FCVT_2OP(vfrintrp_d, 64, UD, float_round_up)
+FCVT_2OP(vfrintrm_s, 32, UW, float_round_down)
+FCVT_2OP(vfrintrm_d, 64, UD, float_round_down)
+
+#define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \
+static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \
+{ \
+ T2 fd; \
+ FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
+ \
+ set_float_rounding_mode(MODE, &env->fp_status); \
+ fd = do_## FMT1 ##_to_## FMT2(env, fj); \
+ set_float_rounding_mode(old_mode, &env->fp_status); \
+ return fd; \
+}
+
+#define DO_FTINT(FMT1, FMT2, T1, T2) \
+static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \
+{ \
+ T2 fd; \
+ \
+ fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
+ if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \
+ if (FMT1 ##_is_any_nan(fj)) { \
+ fd = 0; \
+ } \
+ } \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+DO_FTINT(float32, int32, uint32_t, uint32_t)
+DO_FTINT(float64, int64, uint64_t, uint64_t)
+DO_FTINT(float32, uint32, uint32_t, uint32_t)
+DO_FTINT(float64, uint64, uint64_t, uint64_t)
+DO_FTINT(float64, int32, uint64_t, uint32_t)
+DO_FTINT(float32, int64, uint32_t, uint64_t)
+
+FTINT(rne_w_s, float32, int32, uint32_t, uint32_t, float_round_nearest_even)
+FTINT(rne_l_d, float64, int64, uint64_t, uint64_t, float_round_nearest_even)
+FTINT(rp_w_s, float32, int32, uint32_t, uint32_t, float_round_up)
+FTINT(rp_l_d, float64, int64, uint64_t, uint64_t, float_round_up)
+FTINT(rz_w_s, float32, int32, uint32_t, uint32_t, float_round_to_zero)
+FTINT(rz_l_d, float64, int64, uint64_t, uint64_t, float_round_to_zero)
+FTINT(rm_w_s, float32, int32, uint32_t, uint32_t, float_round_down)
+FTINT(rm_l_d, float64, int64, uint64_t, uint64_t, float_round_down)
+
+DO_2OP_F(vftintrne_w_s, 32, UW, do_ftintrne_w_s)
+DO_2OP_F(vftintrne_l_d, 64, UD, do_ftintrne_l_d)
+DO_2OP_F(vftintrp_w_s, 32, UW, do_ftintrp_w_s)
+DO_2OP_F(vftintrp_l_d, 64, UD, do_ftintrp_l_d)
+DO_2OP_F(vftintrz_w_s, 32, UW, do_ftintrz_w_s)
+DO_2OP_F(vftintrz_l_d, 64, UD, do_ftintrz_l_d)
+DO_2OP_F(vftintrm_w_s, 32, UW, do_ftintrm_w_s)
+DO_2OP_F(vftintrm_l_d, 64, UD, do_ftintrm_l_d)
+DO_2OP_F(vftint_w_s, 32, UW, do_float32_to_int32)
+DO_2OP_F(vftint_l_d, 64, UD, do_float64_to_int64)
+
+FTINT(rz_wu_s, float32, uint32, uint32_t, uint32_t, float_round_to_zero)
+FTINT(rz_lu_d, float64, uint64, uint64_t, uint64_t, float_round_to_zero)
+
+DO_2OP_F(vftintrz_wu_s, 32, UW, do_ftintrz_wu_s)
+DO_2OP_F(vftintrz_lu_d, 64, UD, do_ftintrz_lu_d)
+DO_2OP_F(vftint_wu_s, 32, UW, do_float32_to_uint32)
+DO_2OP_F(vftint_lu_d, 64, UD, do_float64_to_uint64)
+
+FTINT(rm_w_d, float64, int32, uint64_t, uint32_t, float_round_down)
+FTINT(rp_w_d, float64, int32, uint64_t, uint32_t, float_round_up)
+FTINT(rz_w_d, float64, int32, uint64_t, uint32_t, float_round_to_zero)
+FTINT(rne_w_d, float64, int32, uint64_t, uint32_t, float_round_nearest_even)
+
+#define FTINT_W_D(NAME, FN) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < 2; i++) { \
+ temp.W(i + 2) = FN(env, Vj->UD(i)); \
+ temp.W(i) = FN(env, Vk->UD(i)); \
+ } \
+ *Vd = temp; \
+}
+
+FTINT_W_D(vftint_w_d, do_float64_to_int32)
+FTINT_W_D(vftintrm_w_d, do_ftintrm_w_d)
+FTINT_W_D(vftintrp_w_d, do_ftintrp_w_d)
+FTINT_W_D(vftintrz_w_d, do_ftintrz_w_d)
+FTINT_W_D(vftintrne_w_d, do_ftintrne_w_d)
+
+FTINT(rml_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
+FTINT(rpl_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
+FTINT(rzl_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
+FTINT(rnel_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
+FTINT(rmh_l_s, float32, int64, uint32_t, uint64_t, float_round_down)
+FTINT(rph_l_s, float32, int64, uint32_t, uint64_t, float_round_up)
+FTINT(rzh_l_s, float32, int64, uint32_t, uint64_t, float_round_to_zero)
+FTINT(rneh_l_s, float32, int64, uint32_t, uint64_t, float_round_nearest_even)
+
+#define FTINTL_L_S(NAME, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < 2; i++) { \
+ temp.D(i) = FN(env, Vj->UW(i)); \
+ } \
+ *Vd = temp; \
+}
+
+FTINTL_L_S(vftintl_l_s, do_float32_to_int64)
+FTINTL_L_S(vftintrml_l_s, do_ftintrml_l_s)
+FTINTL_L_S(vftintrpl_l_s, do_ftintrpl_l_s)
+FTINTL_L_S(vftintrzl_l_s, do_ftintrzl_l_s)
+FTINTL_L_S(vftintrnel_l_s, do_ftintrnel_l_s)
+
+#define FTINTH_L_S(NAME, FN) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < 2; i++) { \
+ temp.D(i) = FN(env, Vj->UW(i + 2)); \
+ } \
+ *Vd = temp; \
+}
+
+FTINTH_L_S(vftinth_l_s, do_float32_to_int64)
+FTINTH_L_S(vftintrmh_l_s, do_ftintrmh_l_s)
+FTINTH_L_S(vftintrph_l_s, do_ftintrph_l_s)
+FTINTH_L_S(vftintrzh_l_s, do_ftintrzh_l_s)
+FTINTH_L_S(vftintrneh_l_s, do_ftintrneh_l_s)
+
+#define FFINT(NAME, FMT1, FMT2, T1, T2) \
+static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \
+{ \
+ T2 fd; \
+ \
+ fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
+ vec_update_fcsr0(env, GETPC()); \
+ return fd; \
+}
+
+FFINT(s_w, int32, float32, int32_t, uint32_t)
+FFINT(d_l, int64, float64, int64_t, uint64_t)
+FFINT(s_wu, uint32, float32, uint32_t, uint32_t)
+FFINT(d_lu, uint64, float64, uint64_t, uint64_t)
+
+DO_2OP_F(vffint_s_w, 32, W, do_ffint_s_w)
+DO_2OP_F(vffint_d_l, 64, D, do_ffint_d_l)
+DO_2OP_F(vffint_s_wu, 32, UW, do_ffint_s_wu)
+DO_2OP_F(vffint_d_lu, 64, UD, do_ffint_d_lu)
+
+void HELPER(vffintl_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ temp.D(i) = int32_to_float64(Vj->W(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vffinth_d_w)(CPULoongArchState *env, uint32_t vd, uint32_t vj)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ temp.D(i) = int32_to_float64(Vj->W(i + 2), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+void HELPER(vffint_s_l)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk)
+{
+ int i;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+
+ vec_clear_cause(env);
+ for (i = 0; i < 2; i++) {
+ temp.W(i + 2) = int64_to_float32(Vj->D(i), &env->fp_status);
+ temp.W(i) = int64_to_float32(Vk->D(i), &env->fp_status);
+ vec_update_fcsr0(env, GETPC());
+ }
+ *Vd = temp;
+}
+
+#define VSEQ(a, b) (a == b ? -1 : 0)
+#define VSLE(a, b) (a <= b ? -1 : 0)
+#define VSLT(a, b) (a < b ? -1 : 0)
+
+#define VCMPI(NAME, BIT, E, DO_OP) \
+void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
+{ \
+ int i; \
+ VReg *Vd = (VReg *)vd; \
+ VReg *Vj = (VReg *)vj; \
+ typedef __typeof(Vd->E(0)) TD; \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
+ } \
+}
+
+VCMPI(vseqi_b, 8, B, VSEQ)
+VCMPI(vseqi_h, 16, H, VSEQ)
+VCMPI(vseqi_w, 32, W, VSEQ)
+VCMPI(vseqi_d, 64, D, VSEQ)
+VCMPI(vslei_b, 8, B, VSLE)
+VCMPI(vslei_h, 16, H, VSLE)
+VCMPI(vslei_w, 32, W, VSLE)
+VCMPI(vslei_d, 64, D, VSLE)
+VCMPI(vslei_bu, 8, UB, VSLE)
+VCMPI(vslei_hu, 16, UH, VSLE)
+VCMPI(vslei_wu, 32, UW, VSLE)
+VCMPI(vslei_du, 64, UD, VSLE)
+VCMPI(vslti_b, 8, B, VSLT)
+VCMPI(vslti_h, 16, H, VSLT)
+VCMPI(vslti_w, 32, W, VSLT)
+VCMPI(vslti_d, 64, D, VSLT)
+VCMPI(vslti_bu, 8, UB, VSLT)
+VCMPI(vslti_hu, 16, UH, VSLT)
+VCMPI(vslti_wu, 32, UW, VSLT)
+VCMPI(vslti_du, 64, UD, VSLT)
+
+static uint64_t vfcmp_common(CPULoongArchState *env,
+ FloatRelation cmp, uint32_t flags)
+{
+ uint64_t ret = 0;
+
+ switch (cmp) {
+ case float_relation_less:
+ ret = (flags & FCMP_LT);
+ break;
+ case float_relation_equal:
+ ret = (flags & FCMP_EQ);
+ break;
+ case float_relation_greater:
+ ret = (flags & FCMP_GT);
+ break;
+ case float_relation_unordered:
+ ret = (flags & FCMP_UN);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ if (ret) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+#define VFCMP(NAME, BIT, E, FN) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \
+{ \
+ int i; \
+ VReg t; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ vec_clear_cause(env); \
+ for (i = 0; i < LSX_LEN/BIT ; i++) { \
+ FloatRelation cmp; \
+ cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
+ t.E(i) = vfcmp_common(env, cmp, flags); \
+ vec_update_fcsr0(env, GETPC()); \
+ } \
+ *Vd = t; \
+}
+
+VFCMP(vfcmp_c_s, 32, UW, float32_compare_quiet)
+VFCMP(vfcmp_s_s, 32, UW, float32_compare)
+VFCMP(vfcmp_c_d, 64, UD, float64_compare_quiet)
+VFCMP(vfcmp_s_d, 64, UD, float64_compare)
+
+void HELPER(vbitseli_b)(void *vd, void *vj, uint64_t imm, uint32_t v)
+{
+ int i;
+ VReg *Vd = (VReg *)vd;
+ VReg *Vj = (VReg *)vj;
+
+ for (i = 0; i < 16; i++) {
+ Vd->B(i) = (~Vd->B(i) & Vj->B(i)) | (Vd->B(i) & imm);
+ }
+}
+
+/* Copy from target/arm/tcg/sve_helper.c */
+static inline bool do_match2(uint64_t n, uint64_t m0, uint64_t m1, int esz)
+{
+ uint64_t bits = 8 << esz;
+ uint64_t ones = dup_const(esz, 1);
+ uint64_t signs = ones << (bits - 1);
+ uint64_t cmp0, cmp1;
+
+ cmp1 = dup_const(esz, n);
+ cmp0 = cmp1 ^ m0;
+ cmp1 = cmp1 ^ m1;
+ cmp0 = (cmp0 - ones) & ~cmp0;
+ cmp1 = (cmp1 - ones) & ~cmp1;
+ return (cmp0 | cmp1) & signs;
+}
+
+#define SETANYEQZ(NAME, MO) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
+{ \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \
+}
+SETANYEQZ(vsetanyeqz_b, MO_8)
+SETANYEQZ(vsetanyeqz_h, MO_16)
+SETANYEQZ(vsetanyeqz_w, MO_32)
+SETANYEQZ(vsetanyeqz_d, MO_64)
+
+#define SETALLNEZ(NAME, MO) \
+void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
+{ \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \
+}
+SETALLNEZ(vsetallnez_b, MO_8)
+SETALLNEZ(vsetallnez_h, MO_16)
+SETALLNEZ(vsetallnez_w, MO_32)
+SETALLNEZ(vsetallnez_d, MO_64)
+
+#define VPACKEV(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(2 * i); \
+ temp.E(2 *i) = Vk->E(2 * i); \
+ } \
+ *Vd = temp; \
+}
+
+VPACKEV(vpackev_b, 16, B)
+VPACKEV(vpackev_h, 32, H)
+VPACKEV(vpackev_w, 64, W)
+VPACKEV(vpackev_d, 128, D)
+
+#define VPACKOD(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(2 * i + 1); \
+ temp.E(2 * i) = Vk->E(2 * i + 1); \
+ } \
+ *Vd = temp; \
+}
+
+VPACKOD(vpackod_b, 16, B)
+VPACKOD(vpackod_h, 32, H)
+VPACKOD(vpackod_w, 64, W)
+VPACKOD(vpackod_d, 128, D)
+
+#define VPICKEV(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \
+ temp.E(i) = Vk->E(2 * i); \
+ } \
+ *Vd = temp; \
+}
+
+VPICKEV(vpickev_b, 16, B)
+VPICKEV(vpickev_h, 32, H)
+VPICKEV(vpickev_w, 64, W)
+VPICKEV(vpickev_d, 128, D)
+
+#define VPICKOD(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \
+ temp.E(i) = Vk->E(2 * i + 1); \
+ } \
+ *Vd = temp; \
+}
+
+VPICKOD(vpickod_b, 16, B)
+VPICKOD(vpickod_h, 32, H)
+VPICKOD(vpickod_w, 64, W)
+VPICKOD(vpickod_d, 128, D)
+
+#define VILVL(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(i); \
+ temp.E(2 * i) = Vk->E(i); \
+ } \
+ *Vd = temp; \
+}
+
+VILVL(vilvl_b, 16, B)
+VILVL(vilvl_h, 32, H)
+VILVL(vilvl_w, 64, W)
+VILVL(vilvl_d, 128, D)
+
+#define VILVH(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \
+ temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \
+ } \
+ *Vd = temp; \
+}
+
+VILVH(vilvh_b, 16, B)
+VILVH(vilvh_h, 32, H)
+VILVH(vilvh_w, 64, W)
+VILVH(vilvh_d, 128, D)
+
+void HELPER(vshuf_b)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t vk, uint32_t va)
+{
+ int i, m;
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+ VReg *Vk = &(env->fpr[vk].vreg);
+ VReg *Va = &(env->fpr[va].vreg);
+
+ m = LSX_LEN/8;
+ for (i = 0; i < m ; i++) {
+ uint64_t k = (uint8_t)Va->B(i) % (2 * m);
+ temp.B(i) = k < m ? Vk->B(k) : Vj->B(k - m);
+ }
+ *Vd = temp;
+}
+
+#define VSHUF(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t vk) \
+{ \
+ int i, m; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ VReg *Vk = &(env->fpr[vk].vreg); \
+ \
+ m = LSX_LEN/BIT; \
+ for (i = 0; i < m; i++) { \
+ uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \
+ temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \
+ } \
+ *Vd = temp; \
+}
+
+VSHUF(vshuf_h, 16, H)
+VSHUF(vshuf_w, 32, W)
+VSHUF(vshuf_d, 64, D)
+
+#define VSHUF4I(NAME, BIT, E) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int i; \
+ VReg temp; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ for (i = 0; i < LSX_LEN/BIT; i++) { \
+ temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \
+ (2 * ((i) & 0x03))) & 0x03)); \
+ } \
+ *Vd = temp; \
+}
+
+VSHUF4I(vshuf4i_b, 8, B)
+VSHUF4I(vshuf4i_h, 16, H)
+VSHUF4I(vshuf4i_w, 32, W)
+
+void HELPER(vshuf4i_d)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ VReg temp;
+ temp.D(0) = (imm & 2 ? Vj : Vd)->D(imm & 1);
+ temp.D(1) = (imm & 8 ? Vj : Vd)->D((imm >> 2) & 1);
+ *Vd = temp;
+}
+
+void HELPER(vpermi_w)(CPULoongArchState *env,
+ uint32_t vd, uint32_t vj, uint32_t imm)
+{
+ VReg temp;
+ VReg *Vd = &(env->fpr[vd].vreg);
+ VReg *Vj = &(env->fpr[vj].vreg);
+
+ temp.W(0) = Vj->W(imm & 0x3);
+ temp.W(1) = Vj->W((imm >> 2) & 0x3);
+ temp.W(2) = Vd->W((imm >> 4) & 0x3);
+ temp.W(3) = Vd->W((imm >> 6) & 0x3);
+ *Vd = temp;
+}
+
+#define VEXTRINS(NAME, BIT, E, MASK) \
+void HELPER(NAME)(CPULoongArchState *env, \
+ uint32_t vd, uint32_t vj, uint32_t imm) \
+{ \
+ int ins, extr; \
+ VReg *Vd = &(env->fpr[vd].vreg); \
+ VReg *Vj = &(env->fpr[vj].vreg); \
+ \
+ ins = (imm >> 4) & MASK; \
+ extr = imm & MASK; \
+ Vd->E(ins) = Vj->E(extr); \
+}
+
+VEXTRINS(vextrins_b, 8, B, 0xf)
+VEXTRINS(vextrins_h, 16, H, 0x7)
+VEXTRINS(vextrins_w, 32, W, 0x3)
+VEXTRINS(vextrins_d, 64, D, 0x1)