diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-08-24 13:30:32 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-10-27 17:11:22 -0700 |
commit | 9caca88a76a6b1e5203dd2470800941c2670a9cd (patch) | |
tree | 25ddddb9cb1488829b9c8b751ae8170c675b077b /tcg | |
parent | 0e0a32bacb29c4313ef195d2ea18809fd25cf5e2 (diff) | |
download | qemu-9caca88a76a6b1e5203dd2470800941c2670a9cd.zip qemu-9caca88a76a6b1e5203dd2470800941c2670a9cd.tar.gz qemu-9caca88a76a6b1e5203dd2470800941c2670a9cd.tar.bz2 |
tcg/optimize: Split out fold_sub_to_neg
Even though there is only one user, place this more complex
conversion into its own helper.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/optimize.c | 89 |
1 files changed, 47 insertions, 42 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c index 21f4251..e0d850f 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1203,7 +1203,15 @@ static bool fold_nand(OptContext *ctx, TCGOp *op) static bool fold_neg(OptContext *ctx, TCGOp *op) { - return fold_const1(ctx, op); + if (fold_const1(ctx, op)) { + return true; + } + /* + * Because of fold_sub_to_neg, we want to always return true, + * via finish_folding. + */ + finish_folding(ctx, op); + return true; } static bool fold_nor(OptContext *ctx, TCGOp *op) @@ -1360,10 +1368,47 @@ static bool fold_shift(OptContext *ctx, TCGOp *op) return fold_const2(ctx, op); } +static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) +{ + TCGOpcode neg_op; + bool have_neg; + + if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { + return false; + } + + switch (ctx->type) { + case TCG_TYPE_I32: + neg_op = INDEX_op_neg_i32; + have_neg = TCG_TARGET_HAS_neg_i32; + break; + case TCG_TYPE_I64: + neg_op = INDEX_op_neg_i64; + have_neg = TCG_TARGET_HAS_neg_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + neg_op = INDEX_op_neg_vec; + have_neg = (TCG_TARGET_HAS_neg_vec && + tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); + break; + default: + g_assert_not_reached(); + } + if (have_neg) { + op->opc = neg_op; + op->args[1] = op->args[2]; + return fold_neg(ctx, op); + } + return false; +} + static bool fold_sub(OptContext *ctx, TCGOp *op) { if (fold_const2(ctx, op) || - fold_xx_to_i(ctx, op, 0)) { + fold_xx_to_i(ctx, op, 0) || + fold_sub_to_neg(ctx, op)) { return true; } return false; @@ -1497,46 +1542,6 @@ void tcg_optimize(TCGContext *s) continue; } break; - CASE_OP_32_64_VEC(sub): - { - TCGOpcode neg_op; - bool have_neg; - - if (arg_is_const(op->args[2])) { - /* Proceed with possible constant folding. */ - break; - } - switch (ctx.type) { - case TCG_TYPE_I32: - neg_op = INDEX_op_neg_i32; - have_neg = TCG_TARGET_HAS_neg_i32; - break; - case TCG_TYPE_I64: - neg_op = INDEX_op_neg_i64; - have_neg = TCG_TARGET_HAS_neg_i64; - break; - case TCG_TYPE_V64: - case TCG_TYPE_V128: - case TCG_TYPE_V256: - neg_op = INDEX_op_neg_vec; - have_neg = tcg_can_emit_vec_op(neg_op, ctx.type, - TCGOP_VECE(op)) > 0; - break; - default: - g_assert_not_reached(); - } - if (!have_neg) { - break; - } - if (arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - op->opc = neg_op; - reset_temp(op->args[0]); - op->args[1] = op->args[2]; - continue; - } - } - break; default: break; } |