aboutsummaryrefslogtreecommitdiff
path: root/gcc/combine.cc
diff options
context:
space:
mode:
authorliuhongt <hongtao.liu@intel.com>2023-03-21 13:35:06 +0800
committerliuhongt <hongtao.liu@intel.com>2023-05-06 09:01:18 +0800
commit23b60aeb006182f2a1af8a3cdbfc7ac90a29fb78 (patch)
tree5975c26afaa6e37c899bb789a1dd3a41e5a13a9d /gcc/combine.cc
parent82aef047eda376f41eccf51ba1277e46160f6420 (diff)
downloadgcc-23b60aeb006182f2a1af8a3cdbfc7ac90a29fb78.zip
gcc-23b60aeb006182f2a1af8a3cdbfc7ac90a29fb78.tar.gz
gcc-23b60aeb006182f2a1af8a3cdbfc7ac90a29fb78.tar.bz2
Canonicalize vec_merge when mask is constant.
Use swap_communattive_operands_p for canonicalization. When both value has same operand precedence value, then first bit in the mask should select first operand. The canonicalization should help backends for pattern match. .i.e. x86 backend has lots of vec_merge patterns, combine will create any form of vec_merge(mask, or inverted mask), then backend need to add 2 patterns to match exact 1 instruction. The canonicalization can simplify 2 patterns to 1. gcc/ChangeLog: * combine.cc (maybe_swap_commutative_operands): Canonicalize vec_merge when mask is constant. * doc/md.texi: Document vec_merge canonicalization.
Diffstat (limited to 'gcc/combine.cc')
-rw-r--r--gcc/combine.cc22
1 files changed, 22 insertions, 0 deletions
diff --git a/gcc/combine.cc b/gcc/combine.cc
index 0106092..5aa0ec5 100644
--- a/gcc/combine.cc
+++ b/gcc/combine.cc
@@ -5631,6 +5631,28 @@ maybe_swap_commutative_operands (rtx x)
SUBST (XEXP (x, 0), XEXP (x, 1));
SUBST (XEXP (x, 1), temp);
}
+
+ unsigned n_elts = 0;
+ if (GET_CODE (x) == VEC_MERGE
+ && CONST_INT_P (XEXP (x, 2))
+ && GET_MODE_NUNITS (GET_MODE (x)).is_constant (&n_elts)
+ && (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1))
+ /* Two operands have same precedence, then
+ first bit of mask select first operand. */
+ || (!swap_commutative_operands_p (XEXP (x, 1), XEXP (x, 0))
+ && !(UINTVAL (XEXP (x, 2)) & 1))))
+ {
+ rtx temp = XEXP (x, 0);
+ unsigned HOST_WIDE_INT sel = UINTVAL (XEXP (x, 2));
+ unsigned HOST_WIDE_INT mask = HOST_WIDE_INT_1U;
+ if (n_elts == HOST_BITS_PER_WIDE_INT)
+ mask = -1;
+ else
+ mask = (HOST_WIDE_INT_1U << n_elts) - 1;
+ SUBST (XEXP (x, 0), XEXP (x, 1));
+ SUBST (XEXP (x, 1), temp);
+ SUBST (XEXP (x, 2), GEN_INT (~sel & mask));
+ }
}
/* Simplify X, a piece of RTL. We just operate on the expression at the