diff options
author | Jakub Jelinek <jakub@redhat.com> | 2024-05-13 11:15:27 +0200 |
---|---|---|
committer | Jakub Jelinek <jakub@redhat.com> | 2024-05-13 11:17:41 +0200 |
commit | b621482296f6dec0abb22ed39cc4ce6811535d47 (patch) | |
tree | 40ed7f8c89d3fcae71c25757f86faa44009c9eed /gcc | |
parent | f3f02a750c7b34b751fa809ab03d29b2ccf0785d (diff) | |
download | gcc-b621482296f6dec0abb22ed39cc4ce6811535d47.zip gcc-b621482296f6dec0abb22ed39cc4ce6811535d47.tar.gz gcc-b621482296f6dec0abb22ed39cc4ce6811535d47.tar.bz2 |
tree-ssa-math-opts: Pattern recognize yet another .ADD_OVERFLOW pattern [PR113982]
We pattern recognize already many different patterns, and closest to the
requested one also
yc = (type) y;
zc = (type) z;
x = yc + zc;
w = (typeof_y) x;
if (x > max)
where y/z has the same unsigned type and type is a wider unsigned type
and max is maximum value of the narrower unsigned type.
But apparently people are creative in writing this in diffent ways,
this requests
yc = (type) y;
zc = (type) z;
x = yc + zc;
w = (typeof_y) x;
if (x >> narrower_type_bits)
The following patch implements that.
2024-05-13 Jakub Jelinek <jakub@redhat.com>
PR middle-end/113982
* tree-ssa-math-opts.cc (arith_overflow_check_p): Also return 1
for RSHIFT_EXPR by precision of maxval if shift result is only
used in a cast or comparison against zero.
(match_arith_overflow): Handle the RSHIFT_EXPR use case.
* gcc.dg/pr113982.c: New test.
Diffstat (limited to 'gcc')
-rw-r--r-- | gcc/testsuite/gcc.dg/pr113982.c | 60 | ||||
-rw-r--r-- | gcc/tree-ssa-math-opts.cc | 121 |
2 files changed, 177 insertions, 4 deletions
diff --git a/gcc/testsuite/gcc.dg/pr113982.c b/gcc/testsuite/gcc.dg/pr113982.c new file mode 100644 index 0000000..4c5be6c --- /dev/null +++ b/gcc/testsuite/gcc.dg/pr113982.c @@ -0,0 +1,60 @@ +/* PR middle-end/113982 */ +/* { dg-do compile } */ +/* { dg-options "-O2 -fdump-tree-widening_mul" } */ + +#if __SIZEOF_INT128__ +typedef __uint128_t W; +typedef unsigned long long T; +#else +typedef unsigned long long W; +typedef unsigned int T; +#endif +#define B __CHAR_BIT__ * sizeof (T) + +struct S { int p; T r; }; + +struct S +foo (T x, T y) +{ + W z = (W) x + y; + return (struct S) { z >> B, (T) z }; +} + +struct S +bar (T x) +{ + W z = (W) x + 132; + return (struct S) { z >> B, (T) z }; +} + +struct S +baz (T x, unsigned short y) +{ + W z = (W) x + y; + return (struct S) { z >> B, (T) z }; +} + +struct S +qux (unsigned short x, T y) +{ + W z = (W) x + y; + return (struct S) { z >> B, (T) z }; +} + +struct S +corge (T x, T y) +{ + T w = x + y; + W z = (W) x + y; + return (struct S) { z >> B, w }; +} + +struct S +garple (T x, T y) +{ + W z = (W) x + y; + T w = x + y; + return (struct S) { z >> B, w }; +} + +/* { dg-final { scan-tree-dump-times "ADD_OVERFLOW" 6 "widening_mul" { target { i?86-*-* x86_64-*-* } } } } */ diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc index 705f4a4..e8c804f 100644 --- a/gcc/tree-ssa-math-opts.cc +++ b/gcc/tree-ssa-math-opts.cc @@ -3947,6 +3947,66 @@ arith_overflow_check_p (gimple *stmt, gimple *cast_stmt, gimple *&use_stmt, else return 0; + if (maxval + && ccode == RSHIFT_EXPR + && crhs1 == lhs + && TREE_CODE (crhs2) == INTEGER_CST + && wi::to_widest (crhs2) == TYPE_PRECISION (TREE_TYPE (maxval))) + { + tree shiftlhs = gimple_assign_lhs (use_stmt); + if (!shiftlhs) + return 0; + use_operand_p use; + if (!single_imm_use (shiftlhs, &use, &cur_use_stmt)) + return 0; + if (gimple_code (cur_use_stmt) == GIMPLE_COND) + { + ccode = gimple_cond_code (cur_use_stmt); + crhs1 = gimple_cond_lhs (cur_use_stmt); + crhs2 = gimple_cond_rhs (cur_use_stmt); + } + else if (is_gimple_assign (cur_use_stmt)) + { + if (gimple_assign_rhs_class (cur_use_stmt) == GIMPLE_BINARY_RHS) + { + ccode = gimple_assign_rhs_code (cur_use_stmt); + crhs1 = gimple_assign_rhs1 (cur_use_stmt); + crhs2 = gimple_assign_rhs2 (cur_use_stmt); + } + else if (gimple_assign_rhs_code (cur_use_stmt) == COND_EXPR) + { + tree cond = gimple_assign_rhs1 (cur_use_stmt); + if (COMPARISON_CLASS_P (cond)) + { + ccode = TREE_CODE (cond); + crhs1 = TREE_OPERAND (cond, 0); + crhs2 = TREE_OPERAND (cond, 1); + } + else + return 0; + } + else + { + enum tree_code sc = gimple_assign_rhs_code (cur_use_stmt); + tree castlhs = gimple_assign_lhs (cur_use_stmt); + if (!CONVERT_EXPR_CODE_P (sc) + || !castlhs + || !INTEGRAL_TYPE_P (TREE_TYPE (castlhs)) + || (TYPE_PRECISION (TREE_TYPE (castlhs)) + > TYPE_PRECISION (TREE_TYPE (maxval)))) + return 0; + return 1; + } + } + else + return 0; + if ((ccode != EQ_EXPR && ccode != NE_EXPR) + || crhs1 != shiftlhs + || !integer_zerop (crhs2)) + return 0; + return 1; + } + if (TREE_CODE_CLASS (ccode) != tcc_comparison) return 0; @@ -4049,6 +4109,7 @@ arith_overflow_check_p (gimple *stmt, gimple *cast_stmt, gimple *&use_stmt, _8 = IMAGPART_EXPR <_7>; if (_8) and replace (utype) x with _9. + Or with x >> popcount (max) instead of x > max. Also recognize: x = ~z; @@ -4481,10 +4542,62 @@ match_arith_overflow (gimple_stmt_iterator *gsi, gimple *stmt, gcc_checking_assert (is_gimple_assign (use_stmt)); if (gimple_assign_rhs_class (use_stmt) == GIMPLE_BINARY_RHS) { - gimple_assign_set_rhs1 (use_stmt, ovf); - gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0)); - gimple_assign_set_rhs_code (use_stmt, - ovf_use == 1 ? NE_EXPR : EQ_EXPR); + if (gimple_assign_rhs_code (use_stmt) == RSHIFT_EXPR) + { + g2 = gimple_build_assign (make_ssa_name (boolean_type_node), + ovf_use == 1 ? NE_EXPR : EQ_EXPR, + ovf, build_int_cst (type, 0)); + gimple_stmt_iterator gsiu = gsi_for_stmt (use_stmt); + gsi_insert_before (&gsiu, g2, GSI_SAME_STMT); + gimple_assign_set_rhs_with_ops (&gsiu, NOP_EXPR, + gimple_assign_lhs (g2)); + update_stmt (use_stmt); + use_operand_p use; + single_imm_use (gimple_assign_lhs (use_stmt), &use, + &use_stmt); + if (gimple_code (use_stmt) == GIMPLE_COND) + { + gcond *cond_stmt = as_a <gcond *> (use_stmt); + gimple_cond_set_lhs (cond_stmt, ovf); + gimple_cond_set_rhs (cond_stmt, build_int_cst (type, 0)); + } + else + { + gcc_checking_assert (is_gimple_assign (use_stmt)); + if (gimple_assign_rhs_class (use_stmt) + == GIMPLE_BINARY_RHS) + { + gimple_assign_set_rhs1 (use_stmt, ovf); + gimple_assign_set_rhs2 (use_stmt, + build_int_cst (type, 0)); + } + else if (gimple_assign_cast_p (use_stmt)) + gimple_assign_set_rhs1 (use_stmt, ovf); + else + { + tree_code sc = gimple_assign_rhs_code (use_stmt); + gcc_checking_assert (sc == COND_EXPR); + tree cond = gimple_assign_rhs1 (use_stmt); + cond = build2 (TREE_CODE (cond), + boolean_type_node, ovf, + build_int_cst (type, 0)); + gimple_assign_set_rhs1 (use_stmt, cond); + } + } + update_stmt (use_stmt); + gsi_remove (&gsiu, true); + gsiu = gsi_for_stmt (g2); + gsi_remove (&gsiu, true); + continue; + } + else + { + gimple_assign_set_rhs1 (use_stmt, ovf); + gimple_assign_set_rhs2 (use_stmt, build_int_cst (type, 0)); + gimple_assign_set_rhs_code (use_stmt, + ovf_use == 1 + ? NE_EXPR : EQ_EXPR); + } } else { |