aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-08-26 13:24:59 -0700
committerRichard Henderson <richard.henderson@linaro.org>2021-10-28 20:55:07 -0700
commit93a967fbb571ae34857c769dbf0bcc08f2286328 (patch)
tree6d48adccdac938a187965e46ff3a029432a8ec84 /tcg
parent2b9d0c59edec097c72ce9b917d3c08dc5d59cdda (diff)
downloadqemu-93a967fbb571ae34857c769dbf0bcc08f2286328.zip
qemu-93a967fbb571ae34857c769dbf0bcc08f2286328.tar.gz
qemu-93a967fbb571ae34857c769dbf0bcc08f2286328.tar.bz2
tcg/optimize: Propagate sign info for shifting
For constant shifts, we can simply shift the s_mask. For variable shifts, we know that sar does not reduce the s_mask, which helps for sequences like ext32s_i64 t, in sar_i64 t, t, v ext32s_i64 out, t allowing the final extend to be eliminated. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/optimize.c50
1 files changed, 47 insertions, 3 deletions
diff --git a/tcg/optimize.c b/tcg/optimize.c
index c0eccc6..dbb2d46 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -85,6 +85,18 @@ static uint64_t smask_from_zmask(uint64_t zmask)
return ~(~0ull >> rep);
}
+/*
+ * Recreate a properly left-aligned smask after manipulation.
+ * Some bit-shuffling, particularly shifts and rotates, may
+ * retain sign bits on the left, but may scatter disconnected
+ * sign bits on the right. Retain only what remains to the left.
+ */
+static uint64_t smask_from_smask(int64_t smask)
+{
+ /* Only the 1 bits are significant for smask */
+ return smask_from_zmask(~smask);
+}
+
static inline TempOptInfo *ts_info(TCGTemp *ts)
{
return ts->state_ptr;
@@ -1843,18 +1855,50 @@ static bool fold_sextract(OptContext *ctx, TCGOp *op)
static bool fold_shift(OptContext *ctx, TCGOp *op)
{
+ uint64_t s_mask, z_mask, sign;
+
if (fold_const2(ctx, op) ||
fold_ix_to_i(ctx, op, 0) ||
fold_xi_to_x(ctx, op, 0)) {
return true;
}
+ s_mask = arg_info(op->args[1])->s_mask;
+ z_mask = arg_info(op->args[1])->z_mask;
+
if (arg_is_const(op->args[2])) {
- ctx->z_mask = do_constant_folding(op->opc, ctx->type,
- arg_info(op->args[1])->z_mask,
- arg_info(op->args[2])->val);
+ int sh = arg_info(op->args[2])->val;
+
+ ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
+
+ s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
+ ctx->s_mask = smask_from_smask(s_mask);
+
return fold_masks(ctx, op);
}
+
+ switch (op->opc) {
+ CASE_OP_32_64(sar):
+ /*
+ * Arithmetic right shift will not reduce the number of
+ * input sign repetitions.
+ */
+ ctx->s_mask = s_mask;
+ break;
+ CASE_OP_32_64(shr):
+ /*
+ * If the sign bit is known zero, then logical right shift
+ * will not reduced the number of input sign repetitions.
+ */
+ sign = (s_mask & -s_mask) >> 1;
+ if (!(z_mask & sign)) {
+ ctx->s_mask = s_mask;
+ }
+ break;
+ default:
+ break;
+ }
+
return false;
}