#define TCG_CT_CONST_S32 0x100
#define TCG_CT_CONST_U32 0x200
#define TCG_CT_CONST_I32 0x400
+#define TCG_CT_CONST_WSZ 0x800
/* Registers used with L constraint, which are the first argument
registers on x86_64, and two random call clobbered registers on
#else
# define have_bmi2 0
#endif
+#if defined(CONFIG_CPUID_H) && defined(bit_LZCNT)
+static bool have_lzcnt;
+#else
+# define have_lzcnt 0
+#endif
static tcg_insn_unit *tb_ret_addr;
tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX);
break;
case 'c':
- case_c:
ct->ct |= TCG_CT_REG;
tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX);
break;
tcg_regset_set32(ct->u.regs, 0, 0xf);
break;
case 'r':
- case_r:
ct->ct |= TCG_CT_REG;
if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(ct->u.regs, 0, 0xffff);
tcg_regset_set32(ct->u.regs, 0, 0xff);
}
break;
- case 'C':
- /* With SHRX et al, we need not use ECX as shift count register. */
- if (have_bmi2) {
- goto case_r;
- } else {
- goto case_c;
- }
+ case 'W':
+ /* With TZCNT/LZCNT, we can have operand-size as an input. */
+ ct->ct |= TCG_CT_CONST_WSZ;
+ break;
/* qemu_ld/st address constraint */
case 'L':
if ((ct & TCG_CT_CONST_I32) && ~val == (int32_t)~val) {
return 1;
}
+ if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
+ return 1;
+ }
return 0;
}
#define OPC_ARITH_GvEv (0x03) /* ... plus (ARITH_FOO << 3) */
#define OPC_ANDN (0xf2 | P_EXT38)
#define OPC_ADD_GvEv (OPC_ARITH_GvEv | (ARITH_ADD << 3))
+#define OPC_BSF (0xbc | P_EXT)
+#define OPC_BSR (0xbd | P_EXT)
#define OPC_BSWAP (0xc8 | P_EXT)
#define OPC_CALL_Jz (0xe8)
#define OPC_CMOVCC (0x40 | P_EXT) /* ... plus condition code */
#define OPC_JMP_long (0xe9)
#define OPC_JMP_short (0xeb)
#define OPC_LEA (0x8d)
+#define OPC_LZCNT (0xbd | P_EXT | P_SIMDF3)
#define OPC_MOVB_EvGv (0x88) /* stores, more or less */
#define OPC_MOVL_EvGv (0x89) /* stores, more or less */
#define OPC_MOVL_GvEv (0x8b) /* loads, more or less */
#define OPC_SHLX (0xf7 | P_EXT38 | P_DATA16)
#define OPC_SHRX (0xf7 | P_EXT38 | P_SIMDF2)
#define OPC_TESTL (0x85)
+#define OPC_TZCNT (0xbc | P_EXT | P_SIMDF3)
#define OPC_XCHG_ax_r32 (0x90)
#define OPC_GRP3_Ev (0xf7)
if (opc & P_ADDR32) {
tcg_out8(s, 0x67);
}
+ if (opc & P_SIMDF3) {
+ tcg_out8(s, 0xf3);
+ } else if (opc & P_SIMDF2) {
+ tcg_out8(s, 0xf2);
+ }
rex = 0;
rex |= (opc & P_REXW) ? 0x8 : 0x0; /* REX.W */
if (opc & P_DATA16) {
tcg_out8(s, 0x66);
}
+ if (opc & P_SIMDF3) {
+ tcg_out8(s, 0xf3);
+ } else if (opc & P_SIMDF2) {
+ tcg_out8(s, 0xf2);
+ }
if (opc & (P_EXT | P_EXT38)) {
tcg_out8(s, 0x0f);
if (opc & P_EXT38) {
}
#endif
-static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGArg dest,
- TCGArg c1, TCGArg c2, int const_c2,
- TCGArg v1)
+static void tcg_out_cmov(TCGContext *s, TCGCond cond, int rexw,
+ TCGReg dest, TCGReg v1)
{
- tcg_out_cmp(s, c1, c2, const_c2, 0);
if (have_cmov) {
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond], dest, v1);
+ tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | rexw, dest, v1);
} else {
TCGLabel *over = gen_new_label();
tcg_out_jxx(s, tcg_cond_to_jcc[tcg_invert_cond(cond)], over, 1);
}
}
+static void tcg_out_movcond32(TCGContext *s, TCGCond cond, TCGReg dest,
+ TCGReg c1, TCGArg c2, int const_c2,
+ TCGReg v1)
+{
+ tcg_out_cmp(s, c1, c2, const_c2, 0);
+ tcg_out_cmov(s, cond, 0, dest, v1);
+}
+
#if TCG_TARGET_REG_BITS == 64
-static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGArg dest,
- TCGArg c1, TCGArg c2, int const_c2,
- TCGArg v1)
+static void tcg_out_movcond64(TCGContext *s, TCGCond cond, TCGReg dest,
+ TCGReg c1, TCGArg c2, int const_c2,
+ TCGReg v1)
{
tcg_out_cmp(s, c1, c2, const_c2, P_REXW);
- tcg_out_modrm(s, OPC_CMOVCC | tcg_cond_to_jcc[cond] | P_REXW, dest, v1);
+ tcg_out_cmov(s, cond, P_REXW, dest, v1);
}
#endif
+static void tcg_out_ctz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
+ TCGArg arg2, bool const_a2)
+{
+ if (const_a2) {
+ tcg_debug_assert(have_bmi1);
+ tcg_debug_assert(arg2 == (rexw ? 64 : 32));
+ tcg_out_modrm(s, OPC_TZCNT + rexw, dest, arg1);
+ } else {
+ /* ??? The manual says that the output is undefined when the
+ input is zero, but real hardware leaves it unchanged. As
+ noted in target-i386/translate.c, real programs depend on
+ this -- now we are one more of those. */
+ tcg_debug_assert(dest == arg2);
+ tcg_out_modrm(s, OPC_BSF + rexw, dest, arg1);
+ }
+}
+
+static void tcg_out_clz(TCGContext *s, int rexw, TCGReg dest, TCGReg arg1,
+ TCGArg arg2, bool const_a2)
+{
+ if (have_lzcnt) {
+ tcg_out_modrm(s, OPC_LZCNT + rexw, dest, arg1);
+ if (const_a2) {
+ tcg_debug_assert(arg2 == (rexw ? 64 : 32));
+ } else {
+ tcg_debug_assert(dest != arg2);
+ /* LZCNT sets C if the input was zero. */
+ tcg_out_cmov(s, TCG_COND_LTU, rexw, dest, arg2);
+ }
+ } else {
+ TCGType type = rexw ? TCG_TYPE_I64: TCG_TYPE_I32;
+ TCGArg rev = rexw ? 63 : 31;
+
+ /* Recall that the output of BSR is the index not the count.
+ Therefore we must adjust the result by ^ (SIZE-1). In some
+ cases below, we prefer an extra XOR to a JMP. */
+ /* ??? See the comment in tcg_out_ctz re BSF. */
+ if (const_a2) {
+ tcg_debug_assert(dest != arg1);
+ tcg_out_movi(s, type, dest, arg2 ^ rev);
+ } else {
+ tcg_debug_assert(dest == arg2);
+ tgen_arithi(s, ARITH_XOR + rexw, dest, rev, 0);
+ }
+ tcg_out_modrm(s, OPC_BSR + rexw, dest, arg1);
+ tgen_arithi(s, ARITH_XOR + rexw, dest, rev, 0);
+ }
+}
+
static void tcg_out_branch(TCGContext *s, int call, tcg_insn_unit *dest)
{
intptr_t disp = tcg_pcrel_diff(s, dest) - 5;
break;
OP_32_64(shl):
+ /* For small constant 3-operand shift, use LEA. */
+ if (const_a2 && a0 != a1 && (a2 - 1) < 3) {
+ if (a2 - 1 == 0) {
+ /* shl $1,a1,a0 -> lea (a1,a1),a0 */
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, a1, a1, 0, 0);
+ } else {
+ /* shl $n,a1,a0 -> lea 0(,a1,n),a0 */
+ tcg_out_modrm_sib_offset(s, OPC_LEA + rexw, a0, -1, a1, a2, 0);
+ }
+ break;
+ }
c = SHIFT_SHL;
vexop = OPC_SHLX;
goto gen_shift_maybe_vex;
c = SHIFT_ROR;
goto gen_shift;
gen_shift_maybe_vex:
- if (have_bmi2 && !const_a2) {
- tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
- break;
+ if (have_bmi2) {
+ if (!const_a2) {
+ tcg_out_vex_modrm(s, vexop + rexw, a0, a2, a1);
+ break;
+ }
+ tcg_out_mov(s, rexw ? TCG_TYPE_I64 : TCG_TYPE_I32, a0, a1);
}
/* FALLTHRU */
gen_shift:
}
break;
+ OP_32_64(ctz):
+ tcg_out_ctz(s, rexw, args[0], args[1], args[2], const_args[2]);
+ break;
+ OP_32_64(clz):
+ tcg_out_clz(s, rexw, args[0], args[1], args[2], const_args[2]);
+ break;
+
case INDEX_op_brcond_i32:
tcg_out_brcond32(s, a2, a0, a1, const_args[1], arg_label(args[3]), 0);
break;
static const TCGTargetOpDef r_q = { .args_ct_str = { "r", "q" } };
static const TCGTargetOpDef r_re = { .args_ct_str = { "r", "re" } };
static const TCGTargetOpDef r_0 = { .args_ct_str = { "r", "0" } };
+ static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } };
static const TCGTargetOpDef r_r_re = { .args_ct_str = { "r", "r", "re" } };
static const TCGTargetOpDef r_0_re = { .args_ct_str = { "r", "0", "re" } };
- static const TCGTargetOpDef r_0_Ci = { .args_ct_str = { "r", "0", "Ci" } };
static const TCGTargetOpDef r_0_ci = { .args_ct_str = { "r", "0", "ci" } };
static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } };
static const TCGTargetOpDef L_L = { .args_ct_str = { "L", "L" } };
case INDEX_op_shr_i64:
case INDEX_op_sar_i32:
case INDEX_op_sar_i64:
- return &r_0_Ci;
+ return have_bmi2 ? &r_r_ri : &r_0_ci;
case INDEX_op_rotl_i32:
case INDEX_op_rotl_i64:
case INDEX_op_rotr_i32:
= { .args_ct_str = { "r", "r", "0", "1", "re", "re" } };
return &arith2;
}
+ case INDEX_op_ctz_i32:
+ case INDEX_op_ctz_i64:
+ {
+ static const TCGTargetOpDef ctz[2] = {
+ { .args_ct_str = { "r", "r", "0" } },
+ { .args_ct_str = { "&r", "r", "rW" } },
+ };
+ return &ctz[have_bmi1];
+ }
+ case INDEX_op_clz_i32:
+ case INDEX_op_clz_i64:
+ {
+ static const TCGTargetOpDef clz[2] = {
+ { .args_ct_str = { "&r", "r", "0i" } },
+ { .args_ct_str = { "&r", "r", "rW" } },
+ };
+ return &clz[have_lzcnt];
+ }
case INDEX_op_qemu_ld_i32:
return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L;
}
#endif
+#ifndef have_lzcnt
+ max = __get_cpuid_max(0x8000000, 0);
+ if (max >= 1) {
+ __cpuid(0x80000001, a, b, c, d);
+ /* LZCNT was introduced with AMD Barcelona and Intel Haswell CPUs. */
+ have_lzcnt = (c & bit_LZCNT) != 0;
+ }
+#endif
+
if (TCG_TARGET_REG_BITS == 64) {
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffff);