From 0e0a32bacb29c4313ef195d2ea18809fd25cf5e2 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Tue, 24 Aug 2021 13:18:01 -0700 Subject: [PATCH] tcg/optimize: Split out fold_to_not Split out the conditional conversion from a more complex logical operation to a simple NOT. Create a couple more helpers to make this easy for the outer-most logical operations. Reviewed-by: Luis Pires Signed-off-by: Richard Henderson --- tcg/optimize.c | 158 +++++++++++++++++++++++++++---------------------- 1 file changed, 86 insertions(+), 72 deletions(-) diff --git a/tcg/optimize.c b/tcg/optimize.c index e869fa7e78..21f4251b4f 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -694,6 +694,52 @@ static bool fold_const2(OptContext *ctx, TCGOp *op) return false; } +/* + * Convert @op to NOT, if NOT is supported by the host. + * Return true f the conversion is successful, which will still + * indicate that the processing is complete. + */ +static bool fold_not(OptContext *ctx, TCGOp *op); +static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) +{ + TCGOpcode not_op; + bool have_not; + + switch (ctx->type) { + case TCG_TYPE_I32: + not_op = INDEX_op_not_i32; + have_not = TCG_TARGET_HAS_not_i32; + break; + case TCG_TYPE_I64: + not_op = INDEX_op_not_i64; + have_not = TCG_TARGET_HAS_not_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + not_op = INDEX_op_not_vec; + have_not = TCG_TARGET_HAS_not_vec; + break; + default: + g_assert_not_reached(); + } + if (have_not) { + op->opc = not_op; + op->args[1] = op->args[idx]; + return fold_not(ctx, op); + } + return false; +} + +/* If the binary operation has first argument @i, fold to NOT. */ +static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + return fold_to_not(ctx, op, 2); + } + return false; +} + /* If the binary operation has second argument @i, fold to @i. */ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) { @@ -703,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) return false; } +/* If the binary operation has second argument @i, fold to NOT. */ +static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return fold_to_not(ctx, op, 1); + } + return false; +} + /* If the binary operation has both arguments equal, fold to @i. */ static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) { @@ -781,7 +836,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op) static bool fold_andc(OptContext *ctx, TCGOp *op) { if (fold_const2(ctx, op) || - fold_xx_to_i(ctx, op, 0)) { + fold_xx_to_i(ctx, op, 0) || + fold_ix_to_not(ctx, op, -1)) { return true; } return false; @@ -987,7 +1043,11 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op) static bool fold_eqv(OptContext *ctx, TCGOp *op) { - return fold_const2(ctx, op); + if (fold_const2(ctx, op) || + fold_xi_to_not(ctx, op, 0)) { + return true; + } + return false; } static bool fold_extract(OptContext *ctx, TCGOp *op) @@ -1134,7 +1194,11 @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op) static bool fold_nand(OptContext *ctx, TCGOp *op) { - return fold_const2(ctx, op); + if (fold_const2(ctx, op) || + fold_xi_to_not(ctx, op, -1)) { + return true; + } + return false; } static bool fold_neg(OptContext *ctx, TCGOp *op) @@ -1144,12 +1208,22 @@ static bool fold_neg(OptContext *ctx, TCGOp *op) static bool fold_nor(OptContext *ctx, TCGOp *op) { - return fold_const2(ctx, op); + if (fold_const2(ctx, op) || + fold_xi_to_not(ctx, op, 0)) { + return true; + } + return false; } static bool fold_not(OptContext *ctx, TCGOp *op) { - return fold_const1(ctx, op); + if (fold_const1(ctx, op)) { + return true; + } + + /* Because of fold_to_not, we want to always return true, via finish. */ + finish_folding(ctx, op); + return true; } static bool fold_or(OptContext *ctx, TCGOp *op) @@ -1163,7 +1237,11 @@ static bool fold_or(OptContext *ctx, TCGOp *op) static bool fold_orc(OptContext *ctx, TCGOp *op) { - return fold_const2(ctx, op); + if (fold_const2(ctx, op) || + fold_ix_to_not(ctx, op, 0)) { + return true; + } + return false; } static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) @@ -1299,7 +1377,8 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) static bool fold_xor(OptContext *ctx, TCGOp *op) { if (fold_const2(ctx, op) || - fold_xx_to_i(ctx, op, 0)) { + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_not(ctx, op, -1)) { return true; } return false; @@ -1458,71 +1537,6 @@ void tcg_optimize(TCGContext *s) } } break; - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64(nand): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == -1) { - i = 1; - goto try_not; - } - break; - CASE_OP_32_64(nor): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - i = 1; - goto try_not; - } - break; - CASE_OP_32_64_VEC(andc): - if (!arg_is_const(op->args[2]) - && arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == -1) { - i = 2; - goto try_not; - } - break; - CASE_OP_32_64_VEC(orc): - CASE_OP_32_64(eqv): - if (!arg_is_const(op->args[2]) - && arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - i = 2; - goto try_not; - } - break; - try_not: - { - TCGOpcode not_op; - bool have_not; - - switch (ctx.type) { - case TCG_TYPE_I32: - not_op = INDEX_op_not_i32; - have_not = TCG_TARGET_HAS_not_i32; - break; - case TCG_TYPE_I64: - not_op = INDEX_op_not_i64; - have_not = TCG_TARGET_HAS_not_i64; - break; - case TCG_TYPE_V64: - case TCG_TYPE_V128: - case TCG_TYPE_V256: - not_op = INDEX_op_not_vec; - have_not = TCG_TARGET_HAS_not_vec; - break; - default: - g_assert_not_reached(); - } - if (!have_not) { - break; - } - op->opc = not_op; - reset_temp(op->args[0]); - op->args[1] = op->args[i]; - continue; - } default: break; } -- 2.39.2